Remove Python2 references from the source code.

refs #5443

Initial patch: Neel Patel
This commit is contained in:
Akshay Joshi
2020-04-30 17:22:48 +05:30
parent 7dd00a1494
commit ad80217593
48 changed files with 143 additions and 537 deletions

View File

@@ -15,7 +15,6 @@ object.
import random
import select
import sys
import six
import datetime
from collections import deque
@@ -39,13 +38,7 @@ from .typecast import register_global_typecasters, \
from .encoding import getEncoding, configureDriverEncodings
from pgadmin.utils import csv
from pgadmin.utils.master_password import get_crypt_key
if sys.version_info < (3,):
from StringIO import StringIO
IS_PY2 = True
else:
from io import StringIO
IS_PY2 = False
from io import StringIO
_ = gettext
@@ -692,8 +685,7 @@ WHERE
u"{conn_id} (Query-id: {query_id}):\n{query}".format(
server_id=self.manager.sid,
conn_id=self.conn_id,
query=query.decode(self.python_encoding) if
sys.version_info < (3,) else query,
query=query,
query_id=query_id
)
)
@@ -721,33 +713,6 @@ WHERE
return False, \
gettext('The query executed did not return any data.')
def handle_json_data(json_columns, results):
"""
[ This is only for Python2.x]
This function will be useful to handle json data types.
We will dump json data as proper json instead of unicode values
Args:
json_columns: Columns which contains json data
results: Query result
Returns:
results
"""
# Only if Python2 and there are columns with JSON type
if IS_PY2 and len(json_columns) > 0:
temp_results = []
for row in results:
res = dict()
for k, v in row.items():
if k in json_columns:
res[k] = json.dumps(v)
else:
res[k] = v
temp_results.append(res)
results = temp_results
return results
def convert_keys_to_unicode(results, conn_encoding):
"""
[ This is only for Python2.x]
@@ -809,15 +774,10 @@ WHERE
for c in cur.ordered_description():
# This is to handle the case in which column name is non-ascii
column_name = c.to_dict()['name']
if IS_PY2:
column_name = column_name.decode(conn_encoding)
header.append(column_name)
if c.to_dict()['type_code'] in ALL_JSON_TYPES:
json_columns.append(column_name)
if IS_PY2:
results = convert_keys_to_unicode(results, conn_encoding)
res_io = StringIO()
if quote == 'strings':
@@ -848,7 +808,6 @@ WHERE
)
csv_writer.writeheader()
results = handle_json_data(json_columns, results)
# Replace the null values with given string if configured.
if replace_nulls_with is not None:
results = handle_null_values(results, replace_nulls_with)
@@ -872,10 +831,6 @@ WHERE
replace_nulls_with=replace_nulls_with
)
if IS_PY2:
results = convert_keys_to_unicode(results, conn_encoding)
results = handle_json_data(json_columns, results)
# Replace the null values with given string if configured.
if replace_nulls_with is not None:
results = handle_null_values(results, replace_nulls_with)

View File

@@ -12,8 +12,6 @@ Typecast various data types so that they can be compatible with Javascript
data types.
"""
import sys
from psycopg2 import STRING as _STRING
import psycopg2
from psycopg2.extensions import encodings
@@ -124,10 +122,6 @@ PSYCOPG_SUPPORTED_RANGE_ARRAY_TYPES = (3905, 3927, 3907, 3913, 3909, 3911)
def register_global_typecasters():
if sys.version_info < (3,):
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
unicode_type_for_record = psycopg2.extensions.new_type(
(2249,),
"RECORD",
@@ -186,19 +180,12 @@ def register_string_typecasters(connection):
postgres_encoding, python_encoding, typecast_encoding = \
getEncoding(connection.encoding)
if postgres_encoding != 'UNICODE':
if sys.version_info >= (3,):
def non_ascii_escape(value, cursor):
if value is None:
return None
return bytes(
value, encodings[cursor.connection.encoding]
).decode(typecast_encoding, errors='replace')
else:
def non_ascii_escape(value, cursor):
if value is None:
return None
return value.decode(typecast_encoding, errors='replace')
# return value
def non_ascii_escape(value, cursor):
if value is None:
return None
return bytes(
value, encodings[cursor.connection.encoding]
).decode(typecast_encoding, errors='replace')
unicode_type = psycopg2.extensions.new_type(
# "char", name, text, character, character varying