diff options
author | Daniele Varrazzo <daniele.varrazzo@gmail.com> | 2020-09-05 20:09:39 +0100 |
---|---|---|
committer | Daniele Varrazzo <daniele.varrazzo@gmail.com> | 2020-09-05 20:26:19 +0100 |
commit | 58c6a07e43b959b85ca4966b9f877801d502d51d (patch) | |
tree | f20c1822af90ce7d4ad00c238347dfc73afc48f4 | |
parent | 195b2549371ce4a2e8f1f9c94f06e921c1cda387 (diff) | |
download | psycopg2-58c6a07e43b959b85ca4966b9f877801d502d51d.tar.gz |
Errors fetch scripts ported to Python 3
-rwxr-xr-x | scripts/make_errorcodes.py | 8 | ||||
-rwxr-xr-x | scripts/make_errors.py | 8 |
2 files changed, 8 insertions, 8 deletions
diff --git a/scripts/make_errorcodes.py b/scripts/make_errorcodes.py index 1370ec8..26269c7 100755 --- a/scripts/make_errorcodes.py +++ b/scripts/make_errorcodes.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Generate the errorcodes module starting from PostgreSQL documentation. The script can be run at a new PostgreSQL release to refresh the module. @@ -20,7 +20,7 @@ from __future__ import print_function import re import sys -import urllib2 +from urllib.request import urlopen from collections import defaultdict @@ -57,10 +57,10 @@ def parse_errors_txt(url): classes = {} errors = defaultdict(dict) - page = urllib2.urlopen(url) + page = urlopen(url) for line in page: # Strip comments and skip blanks - line = line.split('#')[0].strip() + line = line.decode("ascii").split('#')[0].strip() if not line: continue diff --git a/scripts/make_errors.py b/scripts/make_errors.py index 91ed275..d88ca20 100755 --- a/scripts/make_errors.py +++ b/scripts/make_errors.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Generate the errors module from PostgreSQL source code. The script can be run at a new PostgreSQL release to refresh the module. @@ -21,7 +21,7 @@ from __future__ import print_function import os import re import sys -import urllib2 +from urllib.request import urlopen from collections import defaultdict @@ -43,10 +43,10 @@ def parse_errors_txt(url): classes = {} errors = defaultdict(dict) - page = urllib2.urlopen(url) + page = urlopen(url) for line in page: # Strip comments and skip blanks - line = line.split('#')[0].strip() + line = line.decode('ascii').split('#')[0].strip() if not line: continue |