Added support of Declarative Partitioning (Range, List) for PostgreSQL 10.

This allows the user to create partitioned table and it's partitions.

  - Edit partitions, Create/Listing of constraints, rules, triggers under partitions. (Implemented by Harshal)
  - Updating browser tree node when create/attach/detach partitions from table dialog.(Implemented by Ashesh)
This commit is contained in:
Akshay Joshi 2017-07-07 11:55:55 +05:30
parent ca7fe9a520
commit c2fb0394bf
48 changed files with 6274 additions and 2062 deletions

View File

@ -145,6 +145,16 @@ define('pgadmin.node.schema', [
group: gettext('Table'), mode: ['edit', 'create'],
type: 'switch',
disabled: function(m) {
// If table is partitioned table then disabled it.
if (m.top && m.top.get('is_partitioned')) {
// We also need to unset rest of all
setTimeout(function() {
m.set('autovacuum_custom', false);
}, 10);
return true;
}
if(!m.top.inSchema.apply(this, [m])) {
return false;
}
@ -459,6 +469,32 @@ define('pgadmin.node.schema', [
return true;
}
});
pgBrowser.tableChildTreeNodeHierarchy = function(i) {
var idx = 0,
res = {},
t = pgBrowser.tree;
do {
d = t.itemData(i);
if (
d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId
) {
if (d._type === 'partition' || d._type === 'table') {
if (!('table' in res)) {
res['table'] = _.extend({}, d, {'priority': idx});
idx -= 1;
}
} else {
res[d._type] = _.extend({}, d, {'priority': idx});
idx -= 1;
}
}
i = t.hasParent(i) ? t.parent(i) : null;
} while (i);
return res;
};
}
// Switch Cell with Deps (specifically for table children)

View File

@ -86,6 +86,7 @@ define('pgadmin.node.column', [
if (!pgBrowser.Nodes['column']) {
pgBrowser.Nodes['column'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
parent_type: ['table', 'view', 'mview'],
collection_type: ['coll-table', 'coll-view', 'coll-mview'],
type: 'column',
@ -197,11 +198,17 @@ define('pgadmin.node.column', [
);
},
disabled: function(m){
// If primary key already exist then disable.
// Disable it, when one of this:
// - Primary key already exist
// - Table is a partitioned table
if (
m.top && !_.isUndefined(m.top.get('oid')) &&
m.top.get('primary_key').length > 0 &&
!_.isUndefined(m.top.get('primary_key').first().get('oid'))
m.top && ((
!_.isUndefined(m.top.get('oid')) &&
m.top.get('primary_key').length > 0 &&
!_.isUndefined(m.top.get('primary_key').first().get('oid'))
) || (
m.top.has('is_partitioned') && m.top.get('is_partitioned')
))
) {
return true;
}
@ -228,6 +235,17 @@ define('pgadmin.node.column', [
return false;
}
// If table is partitioned table then disable
if (m.top && !_.isUndefined(m.top.get('is_partitioned')) &&
m.top.get('is_partitioned'))
{
setTimeout(function () {
m.set('is_primary_key', false);
}, 10);
return false;
}
if(!m.inSchemaWithColumnCheck.apply(this, [m]) &&
!_.isUndefined(name) && !_.isNull(name) && name !== '') {
return true;

View File

@ -24,7 +24,7 @@ from . import utils as columns_utils
class ColumnGetTestCase(BaseTestGenerator):
"""This class will get column under table node."""
scenarios = [
('Fetch table Node URL', dict(url='/browser/column/obj/'))
('Fetch columns under table node', dict(url='/browser/column/obj/'))
]
def setUp(self):

View File

@ -10,6 +10,7 @@ define('pgadmin.node.check_constraints', [
// Check Constraint Node
if (!pgBrowser.Nodes['check_constraints']) {
pgAdmin.Browser.Nodes['check_constraints'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'check_constraints',
label: gettext('Check'),
collection_type: 'coll-constraints',
@ -18,7 +19,7 @@ define('pgadmin.node.check_constraints', [
dialogHelp: url_for('help.static', {'filename': 'check_dialog.html'}),
hasSQL: true,
hasDepends: true,
parent_type: ['table'],
parent_type: ['table','partition'],
Init: function() {
// Avoid mulitple registration of menus
if (this.initialized)
@ -137,6 +138,18 @@ define('pgadmin.node.check_constraints', [
'switch', cell: 'boolean', group: gettext('Definition'), mode:
['properties', 'create', 'edit'], min_version: 90200,
disabled: function(m) {
// Disabled if table is a partitioned table.
if ((_.has(m , 'top') && !_.isUndefined(m.top) && m.top.get('is_partitioned')) ||
(_.has(m, 'node_info') && _.has(m.node_info, 'table') &&
_.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned)
){
setTimeout(function(){
m.set('connoinherit', false);
},10);
return true;
}
return ((_.has(m, 'handler') &&
!_.isUndefined(m.handler) &&
!_.isUndefined(m.get('oid'))) || (_.isFunction(m.isNew) && !m.isNew()));

View File

@ -602,6 +602,7 @@ define('pgadmin.node.exclusion_constraint', [
// Extend the browser's node class for exclusion constraint node
if (!pgBrowser.Nodes['exclusion_constraint']) {
pgAdmin.Browser.Nodes['exclusion_constraint'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'exclusion_constraint',
label: gettext('Exclusion constraint'),
collection_type: 'coll-constraints',
@ -609,7 +610,7 @@ define('pgadmin.node.exclusion_constraint', [
sqlCreateHelp: 'ddl-constraints.html',
dialogHelp: url_for('help.static', {'filename': 'exclusion_constraint_dialog.html'}),
hasSQL: true,
parent_type: 'table',
parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
hasDepends: true,
@ -916,12 +917,22 @@ define('pgadmin.node.exclusion_constraint', [
if (data && data.check == false)
return true;
var t = pgBrowser.tree, i = item, d = itemData, parents = [];
var t = pgBrowser.tree, i = item, d = itemData, parents = [],
immediate_parent_table_found = false,
is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
// If table is partitioned table then return false
if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
immediate_parent_table_found = true;
if ('is_partitioned' in d && d.is_partitioned) {
is_immediate_parent_table_partitioned = true;
}
}
// If it is schema then allow user to create table
if (_.indexOf(['schema'], d._type) > -1)
return true;
return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@ -930,7 +941,7 @@ define('pgadmin.node.exclusion_constraint', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
return true;
return !is_immediate_parent_table_partitioned;
}
}
});

View File

@ -602,6 +602,7 @@ define('pgadmin.node.foreign_key', [
// Extend the browser's node class for foreign key node
if (!pgBrowser.Nodes['foreign_key']) {
pgAdmin.Browser.Nodes['foreign_key'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'foreign_key',
label: gettext('Foreign key'),
collection_type: 'coll-constraints',
@ -610,7 +611,7 @@ define('pgadmin.node.foreign_key', [
dialogHelp: url_for('help.static', {'filename': 'foreign_key_dialog.html'}),
hasSQL: true,
hasDepends: false,
parent_type: 'table',
parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
hasDepends: true,
@ -1068,12 +1069,22 @@ define('pgadmin.node.foreign_key', [
if (data && data.check == false)
return true;
var t = pgBrowser.tree, i = item, d = itemData, parents = [];
var t = pgBrowser.tree, i = item, d = itemData, parents = [],
immediate_parent_table_found = false,
is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
// If table is partitioned table then return false
if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
immediate_parent_table_found = true;
if ('is_partitioned' in d && d.is_partitioned) {
is_immediate_parent_table_partitioned = true;
}
}
// If it is schema then allow user to c reate table
if (_.indexOf(['schema'], d._type) > -1)
return true;
return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@ -1082,7 +1093,7 @@ define('pgadmin.node.foreign_key', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
return true;
return !is_immediate_parent_table_partitioned;
}
}
});

View File

@ -6,6 +6,7 @@ define('pgadmin.node.{{node_type}}', [
// Extend the browser's node class for index constraint node
if (!pgBrowser.Nodes['{{node_type}}']) {
pgAdmin.Browser.Nodes['{{node_type}}'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: '{{node_type}}',
label: '{{ node_label }}',
collection_type: 'coll-constraints',
@ -20,7 +21,7 @@ define('pgadmin.node.{{node_type}}', [
hasDepends: true,
hasStatistics: true,
statsPrettifyFields: ['Index size'],
parent_type: 'table',
parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
Init: function() {
@ -45,12 +46,28 @@ define('pgadmin.node.{{node_type}}', [
if (data && data.check == false)
return true;
var t = pgBrowser.tree, i = item, d = itemData, parents = [];
var t = pgBrowser.tree, i = item, d = itemData, parents = [],
immediate_parent_table_found = false,
is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
// If it is schema then allow user to c reate table
// If table is partitioned table then return false
if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
immediate_parent_table_found = true;
if ('is_partitioned' in d && d.is_partitioned) {
is_immediate_parent_table_partitioned = true;
}
}
// If it is schema then allow user to create table
if (_.indexOf(['schema'], d._type) > -1) {
{% if node_type == 'primary_key' %}
if (is_immediate_parent_table_partitioned) {
return false;
}
// There should be only one primary key per table.
var children = t.children(arguments[1], false),
primary_key_found = false;
@ -63,7 +80,7 @@ define('pgadmin.node.{{node_type}}', [
});
return !primary_key_found;
{% else %}
return true;
return !is_immediate_parent_table_partitioned;
{% endif %}
}
parents.push(d._type);
@ -74,7 +91,7 @@ define('pgadmin.node.{{node_type}}', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
return true;
return !is_immediate_parent_table_partitioned;
}
},

View File

@ -9,16 +9,18 @@ define('pgadmin.node.constraints', [
node: 'constraints',
label: gettext('Constraints'),
type: 'coll-constraints',
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
columns: ['name', 'comment']
});
};
if (!pgBrowser.Nodes['constraints']) {
pgAdmin.Browser.Nodes['constraints'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'constraints',
label: gettext('Constraints'),
collection_type: 'coll-constraints',
parent_type: ['table'],
parent_type: ['table','partition'],
Init: function() {
/* Avoid mulitple registration of menus */
if (this.initialized)

View File

@ -72,12 +72,28 @@ class IndexesModule(CollectionNodeModule):
if super(IndexesModule, self).BackendSupported(manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
# In case of partitioned table return false.
if 'tid' in kwargs and manager.version >= 100000:
partition_path = 'partition/sql/#{0}#'.format(manager.version)
SQL = render_template(
"/".join([partition_path, 'backend_support.sql']),
tid=kwargs['tid']
)
status, res = conn.execute_scalar(SQL)
# check if any errors
if not status:
return internal_server_error(errormsg=res)
return not res
if 'vid' not in kwargs:
return True
template_path = 'index/sql/#{0}#'.format(manager.version)
SQL = render_template("/".join(
[template_path, 'backend_support.sql']), vid=kwargs['vid'])
SQL = render_template(
"/".join([template_path, 'backend_support.sql']),
vid=kwargs['vid']
)
status, res = conn.execute_scalar(SQL)
# check if any errors
@ -239,9 +255,10 @@ class IndexesView(PGChildNodeView):
# We need parent's name eg table name and schema name
# when we create new index in update we can fetch it using
# property sql
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
SQL = render_template(
"/".join([self.template_path, 'get_parent.sql']),
tid=kwargs['tid']
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@ -262,8 +279,9 @@ class IndexesView(PGChildNodeView):
"""
res = [{'label': '', 'value': ''}]
try:
SQL = render_template("/".join([self.template_path,
'get_collations.sql']))
SQL = render_template(
"/".join([self.template_path, 'get_collations.sql'])
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@ -289,8 +307,7 @@ class IndexesView(PGChildNodeView):
"""
res = [{'label': '', 'value': ''}]
try:
SQL = render_template("/".join([self.template_path,
'get_am.sql']))
SQL = render_template("/".join([self.template_path, 'get_am.sql']))
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@ -317,17 +334,17 @@ class IndexesView(PGChildNodeView):
res = dict()
try:
# Fetching all the access methods
SQL = render_template("/".join([self.template_path,
'get_am.sql']))
SQL = render_template("/".join([self.template_path, 'get_am.sql']))
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
for row in rset['rows']:
# Fetching all the op_classes for each access method
SQL = render_template("/".join([self.template_path,
'get_op_class.sql']),
oid=row['oid'])
SQL = render_template(
"/".join([self.template_path, 'get_op_class.sql']),
oid=row['oid']
)
status, result = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@ -365,8 +382,9 @@ class IndexesView(PGChildNodeView):
JSON of available schema nodes
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']), tid=tid)
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), tid=tid
)
status, res = self.conn.execute_dict(SQL)
if not status:
@ -393,10 +411,10 @@ class IndexesView(PGChildNodeView):
Returns:
JSON of available schema child nodes
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid,
idx=idx)
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']),
tid=tid, idx=idx
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@ -433,8 +451,9 @@ class IndexesView(PGChildNodeView):
JSON of available schema child nodes
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']), tid=tid)
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), tid=tid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@ -465,8 +484,9 @@ class IndexesView(PGChildNodeView):
Updated properties data with column details
"""
SQL = render_template("/".join([self.template_path,
'column_details.sql']), idx=idx)
SQL = render_template(
"/".join([self.template_path, 'column_details.sql']), idx=idx
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@ -521,10 +541,10 @@ class IndexesView(PGChildNodeView):
JSON of selected schema node
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid)
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
@ -595,9 +615,10 @@ class IndexesView(PGChildNodeView):
try:
# Start transaction.
self.conn.execute_scalar("BEGIN;")
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data, conn=self.conn, mode='create')
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data, conn=self.conn, mode='create'
)
status, res = self.conn.execute_scalar(SQL)
if not status:
# End transaction.
@ -606,9 +627,10 @@ class IndexesView(PGChildNodeView):
# If user chooses concurrent index then we cannot run it along
# with other alter statements so we will separate alter index part
SQL = render_template("/".join([self.template_path,
'alter.sql']),
data=data, conn=self.conn)
SQL = render_template(
"/".join([self.template_path, 'alter.sql']),
data=data, conn=self.conn
)
SQL = SQL.strip('\n').strip(' ')
if SQL != '':
status, res = self.conn.execute_scalar(SQL)
@ -618,9 +640,10 @@ class IndexesView(PGChildNodeView):
return internal_server_error(errormsg=res)
# we need oid to to add object in tree at browser
SQL = render_template("/".join([self.template_path,
'get_oid.sql']),
tid=tid, data=data)
SQL = render_template(
"/".join([self.template_path, 'get_oid.sql']),
tid=tid, data=data
)
status, idx = self.conn.execute_scalar(SQL)
if not status:
# End transaction.
@ -665,10 +688,10 @@ class IndexesView(PGChildNodeView):
try:
# We will first fetch the index name for current request
# so that we create template for dropping index
SQL = render_template("/".join([self.template_path,
'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid)
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
if not status:
@ -687,9 +710,10 @@ class IndexesView(PGChildNodeView):
data = dict(res['rows'][0])
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data, conn=self.conn, cascade=cascade)
SQL = render_template(
"/".join([self.template_path, 'delete.sql']),
data=data, conn=self.conn, cascade=cascade
)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
@ -787,10 +811,10 @@ class IndexesView(PGChildNodeView):
This function will genrate sql from model data
"""
if idx is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid)
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
if not status:
@ -826,11 +850,15 @@ class IndexesView(PGChildNodeView):
return gettext('-- definition incomplete')
# If the request for new object which do not have did
SQL = render_template("/".join([self.template_path, 'create.sql']),
data=data, conn=self.conn, mode=mode)
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data, conn=self.conn, mode=mode
)
SQL += "\n"
SQL += render_template("/".join([self.template_path, 'alter.sql']),
data=data, conn=self.conn)
SQL += render_template(
"/".join([self.template_path, 'alter.sql']),
data=data, conn=self.conn
)
return SQL, data['name'] if 'name' in data else old_data['name']
@ -848,10 +876,10 @@ class IndexesView(PGChildNodeView):
idx: Index ID
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid)
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
if not status:
@ -872,9 +900,10 @@ class IndexesView(PGChildNodeView):
return SQL
sql_header = u"-- Index: {0}\n\n-- ".format(data['name'])
sql_header += render_template("/".join([self.template_path,
'delete.sql']),
data=data, conn=self.conn)
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data, conn=self.conn
)
SQL = sql_header + '\n\n' + SQL
@ -959,10 +988,11 @@ class IndexesView(PGChildNodeView):
if is_pgstattuple:
# Fetch index details only if extended stats available
SQL = render_template("/".join([self.template_path,
'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid)
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)

View File

@ -10,6 +10,7 @@ define('pgadmin.node.index', [
node: 'index',
label: gettext('Indexes'),
type: 'coll-index',
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
sqlAlterHelp: 'sql-alterindex.html',
sqlCreateHelp: 'sql-createindex.html',
dialogHelp: url_for('help.static', {'filename': 'index_dialog.html'}),
@ -210,8 +211,9 @@ define('pgadmin.node.index', [
});
if (!pgBrowser.Nodes['index']) {
pgAdmin.Browser.Nodes['index'] = pgAdmin.Browser.Node.extend({
parent_type: ['table', 'view', 'mview'],
pgAdmin.Browser.Nodes['index'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
parent_type: ['table', 'view', 'mview', 'partition'],
collection_type: ['coll-table', 'coll-view'],
sqlAlterHelp: 'sql-alterindex.html',
sqlCreateHelp: 'sql-createindex.html',
@ -246,6 +248,12 @@ define('pgadmin.node.index', [
category: 'create', priority: 4, label: gettext('Index...'),
icon: 'wcTabIcon icon-index', data: {action: 'create', check: true},
enable: 'canCreate'
},{
name: 'create_index_onPartition', node: 'partition', module: this,
applies: ['object', 'context'], callback: 'show_obj_properties',
category: 'create', priority: 4, label: gettext('Index...'),
icon: 'wcTabIcon icon-index', data: {action: 'create', check: true},
enable: 'canCreate'
},{
name: 'create_index_onMatView', node: 'mview', module: this,
applies: ['object', 'context'], callback: 'show_obj_properties',
@ -472,12 +480,23 @@ define('pgadmin.node.index', [
if (data && data.check == false)
return true;
var t = pgBrowser.tree, i = item, d = itemData, parents = [];
var t = pgBrowser.tree, i = item, d = itemData, parents = [],
immediate_parent_table_found = false,
is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
// If it is schema then allow user to c reate table
// Do not allow creating index on partitioned tables.
if (!immediate_parent_table_found &&
_.indexOf(['table', 'partition'], d._type) > -1) {
immediate_parent_table_found = true;
if ('is_partitioned' in d && d.is_partitioned) {
is_immediate_parent_table_partitioned = true;
}
}
// If it is schema then allow user to create index
if (_.indexOf(['schema'], d._type) > -1)
return true;
return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@ -486,7 +505,7 @@ define('pgadmin.node.index', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
return true;
return !is_immediate_parent_table_partitioned;
}
}
});

View File

@ -0,0 +1,583 @@
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
""" Implements Partitions Node """
import re
import simplejson as json
import pgadmin.browser.server_groups.servers.databases.schemas as schema
from flask import render_template, request
from flask_babel import gettext
from pgadmin.browser.server_groups.servers.databases.schemas.utils \
import DataTypeReader, VacuumSettings
from pgadmin.utils.ajax import internal_server_error, \
make_response as ajax_response, gone
from pgadmin.browser.server_groups.servers.databases.schemas.tables.utils \
import BaseTableView
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.utils.ajax import make_json_response, precondition_required
from config import PG_DEFAULT_DRIVER
from pgadmin.browser.utils import PGChildModule
class PartitionsModule(CollectionNodeModule):
"""
class PartitionsModule(CollectionNodeModule)
A module class for Partition node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the Partition and it's base module.
* get_nodes(gid, sid, did, scid, tid)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for schema, when any of the server node is
initialized.
"""
NODE_TYPE = 'partition'
COLLECTION_LABEL = gettext("Partitions")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the PartitionsModule and it's base module.
Args:
*args:
**kwargs:
"""
super(PartitionsModule, self).__init__(*args, **kwargs)
self.min_ver = 100000
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, **kwargs):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(kwargs['tid'])
@property
def script_load(self):
"""
Load the module script for server, when any of the server-group node is
initialized.
"""
return schema.SchemaModule.NODE_TYPE
@property
def node_inode(self):
"""
Load the module node as a leaf node
"""
return True
def BackendSupported(self, manager, **kwargs):
"""
Load this module if it is a partition table
"""
if 'tid' in kwargs and CollectionNodeModule.BackendSupported(self, manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
template_path = 'partition/sql/#{0}#'.format(manager.version)
SQL = render_template("/".join(
[template_path, 'backend_support.sql']), tid=kwargs['tid'])
status, res = conn.execute_scalar(SQL)
# check if any errors
if not status:
return internal_server_error(errormsg=res)
return res
def register(self, app, options, first_registration=False):
"""
Override the default register function to automatically register
sub-modules of table node under partition table node.
"""
if first_registration:
self.submodules = list(app.find_submodules(self.import_name))
super(CollectionNodeModule, self).register(app, options, first_registration)
for module in self.submodules:
if first_registration:
module.parentmodules.append(self)
app.register_blueprint(module)
# Now add sub modules of table node to partition table node.
if first_registration:
# Exclude 'partition' module for now to avoid cyclic import issue.
modules_to_skip = ['partition', 'column']
for parent in self.parentmodules:
if parent.NODE_TYPE == 'table':
self.submodules += [
submodule for submodule in parent.submodules
if submodule.NODE_TYPE not in modules_to_skip
]
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
blueprint = PartitionsModule(__name__)
class PartitionsView(BaseTableView, DataTypeReader, VacuumSettings):
"""
This class is responsible for generating routes for Partition node
Methods:
-------
* list()
- This function is used to list all the Partition nodes within that
collection.
* nodes()
- This function will used to create all the child node within that
collection, Here it will create all the Partition node.
* properties(gid, sid, did, scid, tid, ptid)
- This function will show the properties of the selected Partition node
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'ptid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'nodes': [{'get': 'nodes'}, {'get': 'nodes'}],
'children': [{'get': 'children'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {}],
'detach': [{'put': 'detach'}],
'truncate': [{'put': 'truncate'}]
})
def children(self, **kwargs):
"""Build a list of treeview nodes from the child nodes."""
if 'sid' not in kwargs:
return precondition_required(
gettext('Required properties are missing.')
)
from pgadmin.utils.driver import get_driver
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
sid=kwargs['sid']
)
did = None
if 'did' in kwargs:
did = kwargs['did']
conn = manager.connection(did=did)
if not conn.connected():
return precondition_required(
gettext(
"Connection to the server has been lost."
)
)
nodes = []
for module in self.blueprint.submodules:
if isinstance(module, PGChildModule):
if manager is not None and \
module.BackendSupported(manager, **kwargs):
# treat partition table as normal table.
# replace tid with ptid and pop ptid from kwargs
if 'ptid' in kwargs:
ptid = kwargs.pop('ptid')
kwargs['tid'] = ptid
nodes.extend(module.get_nodes(**kwargs))
else:
nodes.extend(module.get_nodes(**kwargs))
# Explicitly include 'partition' module as we had excluded it during
# registration.
nodes.extend(self.blueprint.get_nodes(**kwargs))
# Return sorted nodes based on label
return make_json_response(
data=sorted(
nodes, key=lambda c: c['label']
)
)
@BaseTableView.check_precondition
def list(self, gid, sid, did, scid, tid):
"""
This function is used to list all the table nodes within that
collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
Returns:
JSON of available table nodes
"""
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@BaseTableView.check_precondition
def nodes(self, gid, sid, did, scid, tid, ptid=None):
"""
This function is used to list all the table nodes within that
collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Parent Table ID
ptid: Partition Table ID
Returns:
JSON of available table nodes
"""
SQL = render_template(
"/".join([self.partition_template_path, 'nodes.sql']),
scid=scid, tid=tid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
def browser_node(row):
return self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon="icon-partition",
tigger_count=row['triggercount'],
has_enable_triggers=row['has_enable_triggers'],
is_partitioned=row['is_partitioned'],
parent_schema_id=scid,
schema_id=row['schema_id'],
schema_name=row['schema_name']
)
if ptid is not None:
if len(rset['rows']) == 0:
return gone(gettext(
"The specified partitioned table could not be found."
))
return make_json_response(
data=browser_node(rset['rows'][0]), status=200
)
res = []
for row in rset['rows']:
res.append(browser_node(row))
return make_json_response(
data=res,
status=200
)
@BaseTableView.check_precondition
def properties(self, gid, sid, did, scid, tid, ptid):
"""
This function will show the properties of the selected table node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
scid: Schema ID
tid: Table ID
ptid: Partition Table ID
Returns:
JSON of selected table node
"""
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
ptid=ptid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext(
"The specified partitioned table could not be found."))
return super(PartitionsView, self).properties(
gid, sid, did, scid, ptid, res)
@BaseTableView.check_precondition
def sql(self, gid, sid, did, scid, tid, ptid):
"""
This function will creates reverse engineered sql for
the table object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
ptid: Partition Table ID
"""
main_sql = []
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
ptid=ptid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext(
"The specified partitioned table could not be found."))
data = res['rows'][0]
return BaseTableView.get_reverse_engineered_sql(self, did, scid, ptid,
main_sql, data)
@BaseTableView.check_precondition
def detach(self, gid, sid, did, scid, tid, ptid):
"""
This function will reset statistics of table
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
ptid: Partition Table ID
"""
# Fetch schema name
status, parent_schema = self.conn.execute_scalar(
render_template(
"/".join([self.table_template_path, 'get_schema.sql']),
conn=self.conn, scid=scid
)
)
if not status:
return internal_server_error(errormsg=parent_schema)
# Fetch Parent Table name
status, partitioned_table_name = self.conn.execute_scalar(
render_template(
"/".join([self.table_template_path, 'get_table.sql']),
conn=self.conn, scid=scid, tid=tid
)
)
if not status:
return internal_server_error(errormsg=partitioned_table_name)
# Get schema oid of partition
status, pscid = self.conn.execute_scalar(
render_template("/".join([self.table_template_path,
'get_schema_oid.sql']), tid=ptid))
if not status:
return internal_server_error(errormsg=scid)
# Fetch schema name
status, partition_schema = self.conn.execute_scalar(
render_template("/".join([self.table_template_path,
'get_schema.sql']), conn=self.conn,
scid=pscid)
)
if not status:
return internal_server_error(errormsg=partition_schema)
# Fetch Partition Table name
status, partition_name = self.conn.execute_scalar(
render_template(
"/".join([self.table_template_path, 'get_table.sql']),
conn=self.conn, scid=pscid, tid=ptid
)
)
if not status:
return internal_server_error(errormsg=partition_name)
try:
temp_data = dict()
temp_data['parent_schema'] = parent_schema
temp_data['partitioned_table_name'] = partitioned_table_name
temp_data['schema'] = partition_schema
temp_data['name'] = partition_name
SQL = render_template("/".join(
[self.partition_template_path, 'detach.sql']),
data=temp_data, conn=self.conn)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Partition detached."),
data={
'id': ptid,
'scid': scid
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@BaseTableView.check_precondition
def msql(self, gid, sid, did, scid, tid, ptid=None):
"""
This function will create modified sql for table object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
"""
data = dict()
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
if ptid is not None:
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
ptid=ptid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
SQL, name = self.get_sql(did, scid, ptid, data, res)
SQL = re.sub('\n{2,}', '\n\n', SQL)
SQL = SQL.strip('\n')
if SQL == '':
SQL = "--modified SQL"
return make_json_response(
data=SQL,
status=200
)
@BaseTableView.check_precondition
def update(self, gid, sid, did, scid, tid, ptid):
"""
This function will update an existing table object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
ptid: Partition Table ID
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
try:
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
ptid=ptid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return super(PartitionsView, self).update(
gid, sid, did, scid, ptid, data, res, parent_id=tid)
except Exception as e:
return internal_server_error(errormsg=str(e))
@BaseTableView.check_precondition
def truncate(self, gid, sid, did, scid, tid, ptid):
"""
This function will truncate the table object
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
"""
try:
SQL = render_template("/".join([self.partition_template_path,
'properties.sql']),
did=did, scid=scid, tid=tid,
ptid=ptid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return super(PartitionsView, self).truncate(gid, sid, did, scid, ptid, res)
except Exception as e:
return internal_server_error(errormsg=str(e))
PartitionsView.register_node_view(blueprint)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -16,6 +16,7 @@ define('pgadmin.node.rule', [
node: 'rule',
label: gettext('Rules'),
type: 'coll-rule',
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
columns: ["name", "owner", "comment"]
});
}
@ -33,8 +34,9 @@ define('pgadmin.node.rule', [
rule option in the context menu
*/
if (!pgBrowser.Nodes['rule']) {
pgAdmin.Browser.Nodes['rule'] = pgAdmin.Browser.Node.extend({
parent_type: ['table','view'],
pgAdmin.Browser.Nodes['rule'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
parent_type: ['table','view', 'partition'],
type: 'rule',
sqlAlterHelp: 'sql-alterrule.html',
sqlCreateHelp: 'sql-createrule.html',
@ -98,6 +100,12 @@ define('pgadmin.node.rule', [
category: 'create', priority: 4, label: gettext('Rule...'),
icon: 'wcTabIcon icon-rule', data: {action: 'create', check: true},
enable: 'canCreate'
},{
name: 'create_rule', node: 'partition', module: this,
applies: ['object', 'context'], callback: 'show_obj_properties',
category: 'create', priority: 4, label: gettext('Rule...'),
icon: 'wcTabIcon icon-rule', data: {action: 'create', check: true},
enable: 'canCreate'
}
]);
},

View File

@ -0,0 +1,323 @@
define(
['sources/gettext', 'jquery', 'underscore', 'pgadmin.browser',
'backform','backgrid', 'pgadmin.browser.collection'],
function(gettext, $, _, pgBrowser, Backform, Backgrid) {
Backgrid.PartitionRow = Backgrid.Row.extend({
modelDuplicateColor: "lightYellow",
modelUniqueColor: "#fff",
initialize: function () {
Backgrid.Row.prototype.initialize.apply(this, arguments);
var self = this;
self.model.on("change:is_attach", function() {
setTimeout(function() {
self.columns.each(function(col) {
if (col.get('name') == 'partition_name') {
var idx = self.columns.indexOf(col),
cf = col.get("cellFunction"),
cell = new (cf.apply(col, [self.model]))({
column: col,
model: self.model
}),
oldCell = self.cells[idx];
oldCell.remove();
self.cells[idx] = cell;
self.render();
}
});
}, 10);
});
self.listenTo(self.model, 'pgadmin-session:model:duplicate', self.modelDuplicate);
self.listenTo(self.model, 'pgadmin-session:model:unique', self.modelUnique);
},
modelDuplicate: function() {
$(this.el).removeClass("new");
this.el.style.backgroundColor = this.modelDuplicateColor;
},
modelUnique: function() {
this.el.style.backgroundColor = this.modelUniqueColor;
}
});
var getPartitionCell = function(model) {
var is_attach = model.get("is_attach");
if (is_attach) {
var options = [];
model.set({'partition_name': undefined}, {silent:true});
_.each(model.top.table_options, function(t) {
options.push([t.label, t.value]);
});
return Backgrid.Extension.Select2Cell.extend({optionValues: options});
} else {
return Backgrid.StringCell;
}
};
Backform.PartitionKeyModel = pgBrowser.Node.Model.extend({
defaults: {
key_type: 'column',
pt_column: undefined,
expression: undefined
},
keys:['pt_column'],
schema: [{
id: 'key_type', label:'Key type', type:'select2', editable: true,
cell:'select2', cellHeaderClasses: 'width_percent_25',
select2: {allowClear: false},
options:[{
label: 'Column', value: 'column'
},{
label: 'Expression', value: 'expression'
}]
},{
id: 'pt_column', label: gettext('Column'), type:'text',
cell: Backgrid.Extension.Select2DepCell.extend({
keyPathAccessor: function(obj, path) {
var res = obj;
if(_.isArray(res)) {
return _.map(res, function(o) { return o['pt_column']
});
}
path = path.split('.');
for (var i = 0; i < path.length; i++) {
if (_.isNull(res)) return null;
if (_.isEmpty(path[i])) continue;
if (!_.isUndefined(res[path[i]])) res = res[path[i]];
}
return _.isObject(res) && !_.isArray(res) ? null : res;
},
initialize: function() {
// Here we will decide if we need to call URL
// Or fetch the data from parent columns collection
var self = this;
if(this.model.handler) {
Backgrid.Extension.Select2DepCell.prototype.initialize.apply(this, arguments);
// Do not listen for any event(s) for existing constraint.
if (_.isUndefined(self.model.get('oid'))) {
var tableCols = self.model.top.get('columns');
self.listenTo(tableCols, 'remove' , self.resetColOptions);
self.listenTo(tableCols, 'change:name', self.resetColOptions);
}
self.custom_options();
}
},
resetColOptions: function(m) {
var self = this;
setTimeout(function () {
self.custom_options();
self.render.apply(self);
}, 50);
},
custom_options: function() {
// We will add all the columns entered by user in table model
var columns = this.model.top.get('columns'),
added_columns_from_tables = [];
if (columns.length > 0) {
_.each(columns.models, function(m) {
var col = m.get('name');
if(!_.isUndefined(col) && !_.isNull(col)) {
added_columns_from_tables.push(
{label: col, value: col, image:'icon-column'}
);
}
});
}
// Set the values in to options so that user can select
this.column.set('options', added_columns_from_tables);
},
remove: function() {
if(this.model.handler) {
var self = this,
tableCols = self.model.top.get('columns');
self.stopListening(tableCols, 'remove' , self.resetColOptions);
self.stopListening(tableCols, 'change:name' , self.resetColOptions);
Backgrid.Extension.Select2DepCell.prototype.remove.apply(this, arguments);
}
}
}),
deps: ['key_type'],
cellHeaderClasses: 'width_percent_30',
transform : function(data){
var res = [];
if (data && _.isArray(data)) {
_.each(data, function(d) {
res.push({label: d.label, value: d.label, image:'icon-column'});
})
}
return res;
},
select2:{allowClear:false},
editable: function(m) {
if (m.get('key_type') == 'expression') {
setTimeout( function() {
m.set('pt_column', undefined);
}, 10);
return false;
}
return true;
}
},{
id: 'expression', label:'Expression', type:'text',
cell:Backgrid.Extension.StringDepCell,
cellHeaderClasses: 'width_percent_45',
deps: ['key_type'],
editable: function(m) {
if (m.get('key_type') == 'column') {
setTimeout( function() {
m.set('expression', undefined);
}, 10);
return false;
}
return true;
}
}
],
validate: function(keys) {
var col_type = this.get('key_type'),
pt_column = this.get('pt_column'),
expression = this.get('expression');
// Have to clear existing validation before initiating current state
// validation only
this.errorModel.clear();
if (_.isUndefined(col_type) || _.isNull(col_type) ||
String(col_type).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('Partition key type cannot be empty.');
this.errorModel.set('key_type', msg);
return msg;
}
else if (col_type == 'column' &&
_.isUndefined(pt_column) || _.isNull(pt_column) ||
String(pt_column).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('Partition key column cannot be empty.');
this.errorModel.set('pt_column', msg);
return msg;
}
else if (col_type == 'expression' &&
_.isUndefined(expression) || _.isNull(expression) ||
String(expression).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('Partition key expression cannot be empty.');
this.errorModel.set('expression', msg);
return msg;
}
return null;
}
});
Backform.PartitionsModel = pgBrowser.Node.Model.extend({
defaults: {
oid: undefined,
is_attach: false,
partition_name: undefined,
values_from: undefined,
values_to: undefined,
values_in: undefined
},
keys:['partition_name'],
schema: [{
id: 'oid', label: gettext('OID'), type: 'text'
},{
id: 'is_attach', label:gettext('Operation'), cell: 'switch',
type: 'switch', options: { 'onText': 'Attach', 'offText': 'Create'},
cellHeaderClasses: 'width_percent_5',
editable: function(m) {
if (m instanceof Backbone.Model && m.isNew() && !m.top.isNew())
return true;
return false;
}
},{
id: 'partition_name', label: gettext('Name'), type: 'text', cell:'string',
cellHeaderClasses: 'width_percent_25',
editable: function(m) {
if (m instanceof Backbone.Model && m.isNew())
return true;
return false;
}, cellFunction: getPartitionCell
},{
id: 'values_from', label:'From', type:'text',
cell:Backgrid.Extension.StringDepCell,
cellHeaderClasses: 'width_percent_20',
editable: function(m) {
if(m.handler && m.handler.top &&
m.handler.top.attributes &&
m.handler.top.attributes.partition_type == 'range' &&
m instanceof Backbone.Model && m.isNew())
return true;
return false;
}
},{
id: 'values_to', label:'To', type:'text',
cell:Backgrid.Extension.StringDepCell,
cellHeaderClasses: 'width_percent_20',
editable: function(m) {
if(m.handler && m.handler.top &&
m.handler.top.attributes &&
m.handler.top.attributes.partition_type == 'range' &&
m instanceof Backbone.Model && m.isNew())
return true;
return false;
}
},{
id: 'values_in', label:'In', type:'text',
cell:Backgrid.Extension.StringDepCell,
cellHeaderClasses: 'width_percent_25',
editable: function(m) {
if(m.handler && m.handler.top &&
m.handler.top.attributes &&
m.handler.top.attributes.partition_type == 'list' &&
m instanceof Backbone.Model && m.isNew())
return true;
return false;
}
}],
validate: function(keys) {
var partition_name = this.get('partition_name'),
values_from = this.get('values_from'),
values_to = this.get('values_to'),
values_in = this.get('values_in');
// Have to clear existing validation before initiating current state
// validation only
this.errorModel.clear();
if (_.isUndefined(partition_name) || _.isNull(partition_name) ||
String(partition_name).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('Partition name cannot be empty.');
this.errorModel.set('partition_name', msg);
return msg;
}
if (this.top.get('partition_type') == 'range') {
if (_.isUndefined(values_from) || _.isNull(values_from) ||
String(values_from).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('For range partition From field cannot be empty.');
this.errorModel.set('values_from', msg);
return msg;
} else if (_.isUndefined(values_to) || _.isNull(values_to) ||
String(values_to).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('For range partition To field cannot be empty.');
this.errorModel.set('values_to', msg);
return msg;
}
} else if (this.top.get('partition_type') == 'list') {
if (_.isUndefined(values_in) || _.isNull(values_in) ||
String(values_in).replace(/^\s+|\s+$/g, '') == '') {
msg = gettext('For list partition In field cannot be empty.');
this.errorModel.set('values_in', msg);
return msg;
}
}
return null;
}
});
});

View File

@ -3,8 +3,10 @@ define('pgadmin.node.table', [
'underscore.string', 'pgadmin', 'pgadmin.browser', 'alertify',
'sources/alerts/alertify_wrapper',
'pgadmin.browser.collection', 'pgadmin.node.column',
'pgadmin.node.constraints'
], function(gettext, url_for, $, _, S, pgAdmin, pgBrowser, alertify, AlertifyWrapper) {
'pgadmin.node.constraints', 'pgadmin.browser.table.partition.utils'
], function(
gettext, url_for, $, _, S, pgAdmin, pgBrowser, alertify, AlertifyWrapper
) {
if (!pgBrowser.Nodes['coll-table']) {
var databases = pgBrowser.Nodes['coll-table'] =
@ -12,7 +14,7 @@ define('pgadmin.node.table', [
node: 'table',
label: gettext('Tables'),
type: 'coll-table',
columns: ['name', 'relowner', 'description'],
columns: ['name', 'relowner', 'is_partitioned', 'description'],
hasStatistics: true,
statsPrettifyFields: ['Size', 'Indexes size', 'Table size',
'Toast table size', 'Tuple length',
@ -22,6 +24,7 @@ define('pgadmin.node.table', [
if (!pgBrowser.Nodes['table']) {
pgBrowser.Nodes['table'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'table',
label: gettext('Table'),
collection_type: 'coll-table',
@ -39,7 +42,7 @@ define('pgadmin.node.table', [
height: '95%',
width: '85%',
Init: function() {
/* Avoid mulitple registration of menus */
/* Avoid multiple registration of menus */
if (this.initialized)
return;
@ -91,6 +94,9 @@ define('pgadmin.node.table', [
icon: 'fa fa-bar-chart', enable : 'canCreate'
}
]);
pgBrowser.Events.on(
'pgadmin:browser:node:table:updated', this.onTableUpdated, this
);
},
canDrop: pgBrowser.Nodes['schema'].canChildDrop,
canDropCascade: pgBrowser.Nodes['schema'].canChildDrop,
@ -284,7 +290,10 @@ define('pgadmin.node.table', [
hastoasttable: true,
toast_autovacuum_enabled: false,
autovacuum_enabled: false,
primary_key: []
primary_key: [],
partitions: [],
partition_type: 'range',
is_partitioned: false
},
// Default values!
initialize: function(attrs, args) {
@ -329,13 +338,38 @@ define('pgadmin.node.table', [
// If tablespace name is not "pg_global" then we need to exclude them
return (!(d && d.label.match(/pg_global/)))
}
},{
id: 'partition', type: 'group', label: gettext('Partition'),
mode: ['edit', 'create'], min_version: 100000,
visible: function(m) {
// Always show in case of create mode
if (m.isNew() || m.get('is_partitioned'))
return true;
return false;
}
},{
id: 'is_partitioned', label:gettext('Partitioned Table?'), cell: 'switch',
type: 'switch', mode: ['properties', 'create', 'edit'],
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
},
disabled: function(m) {
if (!m.isNew())
return true;
return false;
}
},{
id: 'description', label: gettext('Comment'), type: 'multiline',
mode: ['properties', 'create', 'edit'], disabled: 'inSchema'
},{
id: 'coll_inherits', label: gettext('Inherited from table(s)'),
url: 'get_inherits', type: 'array', group: gettext('Columns'),
disabled: 'checkInheritance', deps: ['typname'],
disabled: 'checkInheritance', deps: ['typname', 'is_partitioned'],
mode: ['create', 'edit'],
select2: { multiple: true, allowClear: true,
placeholder: gettext('Select to inherit from...')},
@ -432,7 +466,26 @@ define('pgadmin.node.table', [
model: pgBrowser.Nodes['column'].model,
subnode: pgBrowser.Nodes['column'].model,
mode: ['create', 'edit'],
disabled: 'inSchema', deps: ['typname'],
disabled: function(m) {
// In case of partitioned table remove inherited columns
if (m.isNew() && m.get('is_partitioned')) {
setTimeout(function() {
var coll = m.get('columns');
coll.remove(coll.filter(function(model) {
if (_.isUndefined(model.get('inheritedfrom')))
return false;
return true;
}));
}, 10);
}
if(this.node_info && 'catalog' in this.node_info)
{
return true;
}
return false;
},
deps: ['typname', 'is_partitioned'],
canAdd: 'check_grid_add_condition',
canEdit: true, canDelete: true,
// For each row edit/delete button enable/disable
@ -535,10 +588,22 @@ define('pgadmin.node.table', [
subnode: pgBrowser.Nodes['primary_key'].model,
editable: false, type: 'collection',
group: gettext('Primary Key'), mode: ['edit', 'create'],
canEdit: true, canDelete: true,
canEdit: true, canDelete: true, deps:['is_partitioned'],
control: 'unique-col-collection',
columns : ['name', 'columns'],
canAdd: true,
canAdd: function(m) {
if (m.get('is_partitioned')) {
setTimeout(function() {
var coll = m.get('primary_key');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
return false;
}
return true;
},
canAddRow: function(m) {
// User can only add one primary key
var columns = m.get('columns');
@ -553,9 +618,21 @@ define('pgadmin.node.table', [
subnode: pgBrowser.Nodes['foreign_key'].model,
editable: false, type: 'collection',
group: gettext('Foreign Key'), mode: ['edit', 'create'],
canEdit: true, canDelete: true,
canEdit: true, canDelete: true, deps:['is_partitioned'],
control: 'unique-col-collection',
canAdd: true,
canAdd: function(m) {
if (m.get('is_partitioned')) {
setTimeout(function() {
var coll = m.get('foreign_key');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
return false;
}
return true;
},
columns : ['name', 'columns'],
canAddRow: function(m) {
// User can only add if there is at least one column with name.
@ -568,7 +645,7 @@ define('pgadmin.node.table', [
subnode: pgBrowser.Nodes['check_constraints'].model,
editable: false, type: 'collection',
group: gettext('Check'), mode: ['edit', 'create'],
canEdit: true, canDelete: true,
canEdit: true, canDelete: true, deps:['is_partitioned'],
control: 'unique-col-collection',
canAdd: true,
columns : ['name', 'consrc']
@ -578,10 +655,22 @@ define('pgadmin.node.table', [
subnode: pgBrowser.Nodes['unique_constraint'].model,
editable: false, type: 'collection',
group: gettext('Unique'), mode: ['edit', 'create'],
canEdit: true, canDelete: true,
canEdit: true, canDelete: true, deps:['is_partitioned'],
control: 'unique-col-collection',
columns : ['name', 'columns'],
canAdd: true,
canAdd: function(m) {
if (m.get('is_partitioned')) {
setTimeout(function() {
var coll = m.get('unique_constraint');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
return false;
}
return true;
},
canAddRow: function(m) {
// User can only add if there is at least one column with name.
var columns = m.get('columns');
@ -593,10 +682,22 @@ define('pgadmin.node.table', [
subnode: pgBrowser.Nodes['exclusion_constraint'].model,
editable: false, type: 'collection',
group: gettext('Exclude'), mode: ['edit', 'create'],
canEdit: true, canDelete: true,
canEdit: true, canDelete: true, deps:['is_partitioned'],
control: 'unique-col-collection',
columns : ['name', 'columns', 'constraint'],
canAdd: true,
canAdd: function(m) {
if (m.get('is_partitioned')) {
setTimeout(function() {
var coll = m.get('exclude_constraint');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
return false;
}
return true;
},
canAddRow: function(m) {
// User can only add if there is at least one column with name.
var columns = m.get('columns');
@ -607,7 +708,7 @@ define('pgadmin.node.table', [
id: 'typname', label: gettext('Of type'), type: 'text',
control: 'node-ajax-options', mode: ['properties', 'create', 'edit'],
disabled: 'checkOfType', url: 'get_oftype', group: gettext('Advanced'),
deps: ['coll_inherits'], transform: function(data, cell) {
deps: ['coll_inherits', 'is_partitioned'], transform: function(data, cell) {
var control = cell || this,
m = control.model;
m.of_types_tables = data;
@ -710,11 +811,217 @@ define('pgadmin.node.table', [
type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
disabled: 'isLikeDisable', group: gettext('Like')
}]
},{
id: 'partition_type', label:gettext('Partition Type'),
editable: false, type: 'select2', select2: {allowClear: false},
group: 'partition', deps: ['is_partitioned'],
options:[{
label: 'Range', value: 'range'
},{
label: 'List', value: 'list'
}],
mode:['create'],
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
},
disabled: function(m) {
if (!m.isNew() || !m.get('is_partitioned'))
return true;
return false;
}
},{
id: 'partition_keys', label:gettext('Partition Keys'),
model: Backform.PartitionKeyModel,
subnode: Backform.PartitionKeyModel,
editable: true, type: 'collection',
group: 'partition', mode: ['create'],
deps: ['is_partitioned', 'partition_type'],
canEdit: false, canDelete: true,
control: 'sub-node-collection',
canAdd: function(m) {
if (m.isNew() && m.get('is_partitioned'))
return true;
return false;
},
canAddRow: function(m) {
var columns = m.get('columns');
var max_row_count = 1000;
if (m.get('partition_type') && m.get('partition_type') == 'list')
max_row_count = 1;
return (m.get('partition_keys') &&
m.get('partition_keys').length < max_row_count &&
_.some(columns.pluck('name'))
);
},
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
},
disabled: function(m) {
if (m.get('partition_keys') && m.get('partition_keys').models.length > 0) {
setTimeout(function () {
var coll = m.get('partition_keys');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
}
}
},{
id: 'partition_scheme', label: gettext('Partition Scheme'),
type: 'note', group: 'partition', mode: ['edit'],
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
},
disabled: function(m) {
if (!m.isNew()) {
this.text = m.get('partition_scheme');
}
}
},{
id: 'partition_key_note', label: gettext('Partition Keys'),
type: 'note', group: 'partition', mode: ['create'],
text: [
'<br>&nbsp;&nbsp;',
gettext('Partition table supports two types of keys:'),
'<br><ul><li>',
gettext('Column: User can select any column from the list of available columns.'),
'</li><li>',
gettext('Expression: User can specify expression to create partition key.'),
'<br><p>',
gettext('Example'),
':',
gettext("Let's say, we want to create a partition table based per year for the column 'saledate', having datatype 'date/timestamp', then we need to specify the expression as 'extract(YEAR from saledate)' as partition key."),
'</p></li></ul>'
].join(''),
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
}
}, {
id: 'partitions', label:gettext('Partitions'),
model: Backform.PartitionsModel,
subnode: Backform.PartitionsModel,
editable: true, type: 'collection',
group: 'partition', mode: ['edit', 'create'],
deps: ['is_partitioned', 'partition_type'],
canEdit: false, canDelete: true,
customDeleteTitle: gettext('Detach Partition'),
customDeleteMsg: gettext('Are you sure you wish to detach this partition?'),
columns:['is_attach', 'partition_name', 'values_from', 'values_to', 'values_in'],
control: Backform.SubNodeCollectionControl.extend({
row: Backgrid.PartitionRow,
initialize: function() {
Backform.SubNodeCollectionControl.prototype.initialize.apply(this, arguments);
var self = this;
if (!this.model.isNew()) {
var node = this.field.get('schema_node'),
node_info = this.field.get('node_info');
// Make ajax call to get the tables to be attached
$.ajax({
url: node.generate_url.apply(
node, [
null, 'get_attach_tables', this.field.get('node_data'),
true, node_info
]),
type: 'GET',
async: false,
success: function(res) {
if (res.success == 1) {
self.model.table_options = res.data;
}
else {
alertify.alert(
'Error fetching tables to be attached', res.data.result
);
}
},
error: function(e) {
var errmsg = $.parseJSON(e.responseText);
alertify.alert('Error fetching tables to be attached.', errmsg.errormsg);
}
});
}
}
}
),
canAdd: function(m) {
if (m.get('is_partitioned'))
return true;
return false;
},
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
},
disabled: function(m) {
if (m.isNew() && m.get('partitions') && m.get('partitions').models.length > 0) {
setTimeout(function () {
var coll = m.get('partitions');
coll.remove(coll.filter(function(model) {
return true;
}));
}, 10);
}
}
},{
id: 'partition_note', label: gettext('Partitions'),
type: 'note', group: 'partition',
text: [
'<ul>',
' <li>',
gettext('Create a table: User can create multiple partitions while creating new partitioned table. Operation switch is disabled in this scenario.'),
'</li><li>',
gettext('Edit existing table: User can create/attach/detach multiple partitions. In attach operation user can select table from the list of suitable tables to be attached.'),
'</li><li>',
gettext('From/To/In input: Values for these fields must be quoted with single quote. For more than one partition key values must be comma(,) separated.'),
'<br>',
gettext('Example'),
':<ul><li>',
gettext("From/To: Enabled for range partition. Consider partitioned table with multiple keys of type Integer, then values should be specified like '100','200'."),
'</li><li> ',
gettext('In: Enabled for list partition. Values must be comma(,) separated and quoted with single quote.'),
'</li></ul></li></ul>'
].join(''),
visible: function(m) {
if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
&& !_.isUndefined(m.node_info.server.version) &&
m.node_info.server.version >= 100000)
return true;
return false;
}
},{
// Here - we will create tab control for storage parameters
// (auto vacuum).
type: 'nested', control: 'tab', group: gettext('Parameter'),
mode: ['edit', 'create'],
mode: ['edit', 'create'], deps: ['is_partitioned'],
schema: Backform.VacuumSettingsSchema
},{
id: 'relacl_str', label: gettext('Privileges'), disabled: 'inSchema',
@ -741,35 +1048,46 @@ define('pgadmin.node.table', [
msg = undefined,
name = this.get('name'),
schema = this.get('schema'),
relowner = this.get('relowner');
relowner = this.get('relowner'),
is_partitioned = this.get('is_partitioned'),
partition_keys = this.get('partition_keys');
// If nothing to validate or VacuumSetting keys then
// return from here
if ( keys && (keys.length == 0
|| _.indexOf(keys, 'autovacuum_enabled') != -1
|| _.indexOf(keys, 'toast_autovacuum_enabled') != -1) ) {
return null;
}
// Have to clear existing validation before initiating current state validation only
this.errorModel.clear();
if (_.isUndefined(name) || _.isNull(name) ||
String(name).replace(/^\s+|\s+$/g, '') == '') {
if (
_.isUndefined(name) || _.isNull(name) ||
String(name).replace(/^\s+|\s+$/g, '') == ''
) {
msg = gettext('Table name cannot be empty.');
this.errorModel.set('name', msg);
return msg;
} else if (_.isUndefined(schema) || _.isNull(schema) ||
String(schema).replace(/^\s+|\s+$/g, '') == '') {
}
this.errorModel.unset('name');
if (
_.isUndefined(schema) || _.isNull(schema) ||
String(schema).replace(/^\s+|\s+$/g, '') == ''
) {
msg = gettext('Table schema cannot be empty.');
this.errorModel.set('schema', msg);
return msg;
} else if (_.isUndefined(relowner) || _.isNull(relowner) ||
String(relowner).replace(/^\s+|\s+$/g, '') == '') {
}
this.errorModel.unset('schema');
if (
_.isUndefined(relowner) || _.isNull(relowner) ||
String(relowner).replace(/^\s+|\s+$/g, '') == ''
) {
msg = gettext('Table owner cannot be empty.');
this.errorModel.set('relowner', msg);
return msg;
}
this.errorModel.unset('relowner');
if (
is_partitioned && this.isNew() &&
!_.isNull(partition_keys) && partition_keys.length <= 0
) {
msg = gettext('Please specify at least one key for partitioned table.');
this.errorModel.set('partition_keys', msg);
return msg;
}
this.errorModel.unset('partition_keys');
return null;
},
// We will disable everything if we are under catalog node
@ -797,7 +1115,15 @@ define('pgadmin.node.table', [
},
// Oftype is defined?
checkInheritance: function(m) {
// coll_inherits || typname
// Disabled if it is partitioned table
if (m.get('is_partitioned')) {
setTimeout( function() {
m.set('coll_inherits', []);
}, 10);
return true;
}
// coll_inherits || typname
if(!m.inSchema.apply(this, [m]) &&
( _.isUndefined(m.get('typname')) ||
_.isNull(m.get('typname')) ||
@ -841,7 +1167,15 @@ define('pgadmin.node.table', [
},
// We will disable it if Inheritance is defined
checkOfType: function(m) {
//coll_inherits || typname
// Disabled if it is partitioned table
if (m.get('is_partitioned')) {
setTimeout( function() {
m.set('typname', undefined);
}, 10);
return true;
}
//coll_inherits || typname
if(!m.inSchemaWithModelCheck.apply(this, [m]) &&
(_.isUndefined(m.get('coll_inherits')) ||
_.isNull(m.get('coll_inherits')) ||
@ -967,6 +1301,116 @@ define('pgadmin.node.table', [
return false;
}
}
},
onTableUpdated: function(_node, _oldNodeData, _newNodeData) {
if (
_newNodeData.is_partitioned && 'affected_partitions' in _newNodeData
) {
var partitions = _newNodeData.affected_partitions,
idx, node_info, self = this,
newPartitionsIDs = [],
insertChildTreeNodes = [],
insertChildrenNodes = function() {
if (!insertChildTreeNodes.length)
return;
var option = insertChildTreeNodes.pop();
pgBrowser.addChildTreeNodes(
option.treeHierarchy, option.parent, option.type,
option.childrenIDs, insertChildrenNodes
);
};
if ('detached' in partitions && partitions.detached.length > 0) {
// Remove it from the partition collections node first
pgBrowser.removeChildTreeNodesById(
_node, 'coll-partition', _.map(
partitions.detached, function(_d) { return parseInt(_d.oid); }
)
);
var schemaNode = pgBrowser.findParentTreeNodeByType(
_node, 'schema'
),
detachedBySchema = _.groupBy(
partitions.detached,
function(_d) { return parseInt(_d.schema_id); }
), childIDs;
for (var key in detachedBySchema) {
schemaNode = pgBrowser.findSiblingTreeNode(schemaNode, key);
if (schemaNode) {
childIDs = _.map(
detachedBySchema[key],
function(_d) { return parseInt(_d.oid); }
);
var tablesCollNode = pgBrowser.findChildCollectionTreeNode(
schemaNode, 'coll-table'
);
if (tablesCollNode) {
insertChildTreeNodes.push({
'parent': tablesCollNode,
'type': 'table',
'treeHierarchy': pgAdmin.Browser.Nodes.schema.getTreeNodeHierarchy(schemaNode),
'childrenIDs': _.clone(childIDs)
});
}
}
}
}
if ('attached' in partitions && partitions.attached.length > 0) {
var schemaNode = pgBrowser.findParentTreeNodeByType(
_node, 'schema'
),
attachedBySchema = _.groupBy(
partitions.attached,
function(_d) { return parseInt(_d.schema_id); }
), childIDs;
for (var key in attachedBySchema) {
schemaNode = pgBrowser.findSiblingTreeNode(schemaNode, key);
if (schemaNode) {
childIDs = _.map(
attachedBySchema[key],
function(_d) { return parseInt(_d.oid); }
);
// Remove it from the table collections node first
pgBrowser.removeChildTreeNodesById(
schemaNode, 'coll-table', childIDs
);
}
newPartitionsIDs = newPartitionsIDs.concat(childIDs);
}
}
if ('created' in partitions && partitions.created.length > 0) {
_.each(partitions.created, function(_data) {
newPartitionsIDs.push(_data.oid);
});
}
if (newPartitionsIDs.length) {
node_info = self.getTreeNodeHierarchy(_node);
var partitionsCollNode = pgBrowser.findChildCollectionTreeNode(
_node, 'coll-partition'
);
if (partitionsCollNode) {
insertChildTreeNodes.push({
'parent': partitionsCollNode,
'type': 'partition',
'treeHierarchy': self.getTreeNodeHierarchy(_node),
'childrenIDs': newPartitionsIDs
});
}
}
insertChildrenNodes();
}
}
});
}

View File

@ -0,0 +1,2 @@
ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} ATTACH PARTITION {{conn|qtIdent(data.schema, data.name)}}
{{ data.partition_value }};

View File

@ -0,0 +1,9 @@
{#=============Checks if it is partitioned table========#}
{% if tid %}
SELECT
CASE WHEN c.relkind = 'p' THEN True ELSE False END As ptable
FROM
pg_class c
WHERE
c.oid = {{ tid }}::oid
{% endif %}

View File

@ -0,0 +1,30 @@
{% import 'table/sql/macros/constraints.macro' as CONSTRAINTS %}
{#===========================================#}
{#====== MAIN TABLE TEMPLATE STARTS HERE ======#}
{#===========================================#}
{### CREATE TABLE STATEMENT FOR partitions ###}
CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data.schema, data.name)}}{% if data.relispartition is defined and data.relispartition %} PARTITION OF {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}}{% endif %}
{# Macro to render for constraints #}
{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %}
( {% endif %}
{% if data.primary_key|length > 0 %}{{CONSTRAINTS.PRIMARY_KEY(conn, data.primary_key[0])}}{% endif %}{% if data.unique_constraint|length > 0 %}{% if data.primary_key|length > 0 %},{% endif %}
{{CONSTRAINTS.UNIQUE(conn, data.unique_constraint)}}{% endif %}{% if data.foreign_key|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 %},{% endif %}
{{CONSTRAINTS.FOREIGN_KEY(conn, data.foreign_key)}}{% endif %}{% if data.check_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 %},{% endif %}
{{CONSTRAINTS.CHECK(conn, data.check_constraint)}}{% endif %}{% if data.exclude_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 %},{% endif %}
{{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %}
{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %}
)
{% endif %}
{{ data.partition_value }}{% if data.is_partitioned is defined and data.is_partitioned %}
PARTITION BY {{ data.partition_scheme }}{% endif %};
{### Alter SQL for Owner ###}
{% if data.relowner %}
ALTER TABLE {{conn|qtIdent(data.schema, data.name)}}
OWNER to {{conn|qtIdent(data.relowner)}};
{% endif %}

View File

@ -0,0 +1 @@
ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} DETACH PARTITION {{conn|qtIdent(data.schema, data.name)}};

View File

@ -0,0 +1,23 @@
SELECT oid, quote_ident(nspname)||'.'||quote_ident(relname) AS table_name FROM
(SELECT
r.oid, r.relname, n.nspname, array_agg(a.attname) attnames, array_agg(a.atttypid) atttypes
FROM
(SELECT oid, relname, relnamespace FROM pg_catalog.pg_class
WHERE relkind in ('r', 'p') AND NOT relispartition) r
JOIN (SELECT oid AS nspoid, nspname FROM
pg_catalog.pg_namespace WHERE nspname NOT LIKE E'pg\_%') n
ON (r.relnamespace = n.nspoid)
JOIN (SELECT attrelid, attname, atttypid FROM
pg_catalog.pg_attribute WHERE attnum > 0 ORDER BY attrelid, attnum) a
ON (r.oid = a.attrelid)
GROUP BY r.oid, r.relname, r.relnamespace, n.nspname) all_tables
JOIN
(SELECT
attrelid, array_agg(attname) attnames, array_agg(atttypid) atttypes
FROM
(SELECT * FROM pg_catalog.pg_attribute
WHERE attrelid = {{ tid }} AND attnum > 0
ORDER BY attrelid, attnum) attributes
GROUP BY attrelid) current_table ON current_table.attrelid != all_tables.oid
AND current_table.attnames = all_tables.attnames
AND current_table.atttypes = all_tables.atttypes

View File

@ -0,0 +1,15 @@
SELECT rel.oid, rel.relname AS name,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers,
pg_get_expr(rel.relpartbound, rel.oid) AS partition_value,
rel.relnamespace AS schema_id,
nsp.nspname AS schema_name,
(CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned,
(CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef(rel.oid::oid) ELSE '' END) AS partition_scheme
FROM
(SELECT * FROM pg_inherits WHERE inhparent = {{ tid }}::oid) inh
LEFT JOIN pg_class rel ON inh.inhrelid = rel.oid
LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid
WHERE rel.relispartition
{% if ptid %} AND rel.oid = {{ ptid }}::OID {% endif %}
ORDER BY rel.relname;

View File

@ -0,0 +1,82 @@
SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str,
(CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE
(SELECT sp.spcname FROM pg_database dtb
JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid
WHERE dtb.oid = {{ did }}::oid)
END) as spcname,
(select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as parent_schema,
nsp.nspname as schema,
pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relispartition,
rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey,
EXISTS(select 1 FROM pg_trigger
JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger'
JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion'
WHERE tgrelid=rel.oid) AS isrepl,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount,
(SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN
quote_ident(nspname)||'.'||quote_ident(c.relname)
ELSE quote_ident(c.relname) END AS inherited_tables
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhparent
JOIN pg_namespace n ON n.oid=c.relnamespace
WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits,
(SELECT count(*)
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhparent
JOIN pg_namespace n ON n.oid=c.relnamespace
WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt,
(CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence,
substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor,
(CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true')
THEN true ELSE false END) AS autovacuum_enabled,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age,
(CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true')
THEN true ELSE false END) AS toast_autovacuum_enabled,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age,
array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str,
array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str,
rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname,
(CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable,
-- Added for pgAdmin4
(CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom,
(CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum,
(SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels,
(CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table,
-- Added for partition table
(CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned,
(CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef(rel.oid::oid) ELSE '' END) AS partition_scheme,
{% if ptid %}
(CASE WHEN rel.relispartition THEN pg_get_expr(rel.relpartbound, {{ ptid }}::oid) ELSE '' END) AS partition_value,
(SELECT relname FROM pg_class WHERE oid = {{ tid }}::oid) AS partitioned_table_name
{% else %}
pg_get_expr(rel.relpartbound, rel.oid) AS partition_value
{% endif %}
FROM pg_class rel
LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace
LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass)
LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p'
LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid
LEFT JOIN pg_type typ ON rel.reloftype=typ.oid
LEFT JOIN pg_inherits inh ON inh.inhrelid = rel.oid
LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid
WHERE rel.relispartition AND inh.inhparent = {{ tid }}::oid
{% if ptid %} AND rel.oid = {{ ptid }}::oid {% endif %}
ORDER BY rel.relname;

View File

@ -0,0 +1,46 @@
{### SQL to fetch privileges for tablespace ###}
SELECT 'relacl' as deftype, COALESCE(gt.rolname, 'PUBLIC') grantee, g.rolname grantor,
array_agg(privilege_type) as privileges, array_agg(is_grantable) as grantable
FROM
(SELECT
d.grantee, d.grantor, d.is_grantable,
CASE d.privilege_type
WHEN 'CONNECT' THEN 'c'
WHEN 'CREATE' THEN 'C'
WHEN 'DELETE' THEN 'd'
WHEN 'EXECUTE' THEN 'X'
WHEN 'INSERT' THEN 'a'
WHEN 'REFERENCES' THEN 'x'
WHEN 'SELECT' THEN 'r'
WHEN 'TEMPORARY' THEN 'T'
WHEN 'TRIGGER' THEN 't'
WHEN 'TRUNCATE' THEN 'D'
WHEN 'UPDATE' THEN 'w'
WHEN 'USAGE' THEN 'U'
ELSE 'UNKNOWN'
END AS privilege_type
FROM
(SELECT rel.relacl
FROM pg_class rel
LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace
LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p'
LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid
LEFT JOIN pg_type typ ON rel.reloftype=typ.oid
WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid
AND rel.oid = {{ tid }}::oid
) acl,
(SELECT (d).grantee AS grantee, (d).grantor AS grantor, (d).is_grantable
AS is_grantable, (d).privilege_type AS privilege_type FROM (SELECT
aclexplode(rel.relacl) as d
FROM pg_class rel
LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace
LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p'
LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid
LEFT JOIN pg_type typ ON rel.reloftype=typ.oid
WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid
AND rel.oid = {{ tid }}::oid
) a) d
) d
LEFT JOIN pg_catalog.pg_roles g ON (d.grantor = g.oid)
LEFT JOIN pg_catalog.pg_roles gt ON (d.grantee = gt.oid)
GROUP BY g.rolname, gt.rolname

View File

@ -0,0 +1,17 @@
{% import 'table/sql/macros/db_catalogs.macro' as CATALOG %}
SELECT c.oid, c.relname , nspname,
CASE WHEN nspname NOT LIKE E'pg\_%' THEN
quote_ident(nspname)||'.'||quote_ident(c.relname)
ELSE quote_ident(c.relname)
END AS inherits
FROM pg_class c
JOIN pg_namespace n
ON n.oid=c.relnamespace
WHERE relkind='r' AND NOT relispartition
{% if not show_system_objects %}
{{ CATALOG.VALID_CATALOGS(server_type) }}
{% endif %}
{% if tid %}
AND c.oid != tid
{% endif %}
ORDER BY relnamespace, c.relname

View File

@ -0,0 +1,5 @@
SELECT rel.oid as tid
FROM pg_class rel
WHERE rel.relkind IN ('r','s','t','p')
AND rel.relnamespace = {{ scid }}::oid
AND rel.relname = {{data.name|qtLiteral}}

View File

@ -0,0 +1,8 @@
SELECT
rel.relname AS name
FROM
pg_class rel
WHERE
rel.relkind IN ('r','s','t','p')
AND rel.relnamespace = {{ scid }}::oid
AND rel.oid = {{ tid }}::oid;

View File

@ -0,0 +1,9 @@
SELECT rel.oid, rel.relname AS name,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers,
(CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned
FROM pg_class rel
WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid
AND NOT rel.relispartition
{% if tid %} AND rel.oid = {{tid}}::OID {% endif %}
ORDER BY rel.relname;

View File

@ -0,0 +1,73 @@
SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str,
(CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE
(SELECT sp.spcname FROM pg_database dtb
JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid
WHERE dtb.oid = {{ did }}::oid)
END) as spcname,
(select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as schema,
pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relkind,
(CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned,
rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey,
EXISTS(select 1 FROM pg_trigger
JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger'
JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion'
WHERE tgrelid=rel.oid) AS isrepl,
(SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount,
(SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN
quote_ident(nspname)||'.'||quote_ident(c.relname)
ELSE quote_ident(c.relname) END AS inherited_tables
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhparent
JOIN pg_namespace n ON n.oid=c.relnamespace
WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits,
(SELECT count(*)
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhparent
JOIN pg_namespace n ON n.oid=c.relnamespace
WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt,
(CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence,
substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor,
(CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true')
THEN true ELSE false END) AS autovacuum_enabled,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age,
substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age,
(CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true')
THEN true ELSE false END) AS toast_autovacuum_enabled,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age,
substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age,
array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str,
array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str,
rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname,
(CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable,
-- Added for pgAdmin4
(CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom,
(CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum,
(SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels,
(CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table
-- Added for partition table
{% if tid %}, (CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef({{ tid }}::oid) ELSE '' END) AS partition_scheme {% endif %}
FROM pg_class rel
LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace
LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass)
LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p'
LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid
LEFT JOIN pg_type typ ON rel.reloftype=typ.oid
WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid
AND NOT rel.relispartition
{% if tid %} AND rel.oid = {{ tid }}::oid {% endif %}
ORDER BY rel.relname;

View File

@ -57,7 +57,8 @@ CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data
{{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %}
{% if data.like_relation or data.coll_inherits or data.columns|length > 0 or data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %}
)
){% if data.relkind is defined and data.relkind == 'p' %} PARTITION BY {{ data.partition_scheme }} {% endif %}
{% endif %}
{### If we are inheriting it from another table(s) ###}
{% if data.coll_inherits %}

View File

@ -1,8 +1,9 @@
{# ===== fetch new assigned schema oid ===== #}
SELECT
c.relnamespace as scid
c.relnamespace as scid, nsp.nspname as nspname
FROM
pg_class c
LEFT JOIN pg_namespace nsp ON nsp.oid = c.relnamespace
WHERE
{% if tid %}
c.oid = {{tid}}::oid;

View File

@ -14,6 +14,7 @@ from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
@ -23,7 +24,19 @@ class TableAddTestCase(BaseTestGenerator):
""" This class will add new collation under schema node. """
scenarios = [
# Fetching default URL for table node.
('Fetch table Node URL', dict(url='/browser/table/obj/'))
('Create Table', dict(url='/browser/table/obj/')),
('Create Range partitioned table with 2 partitions',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range'
)
),
('Create List partitioned table with 2 partitions',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list'
)
)
]
def setUp(self):
@ -43,6 +56,19 @@ class TableAddTestCase(BaseTestGenerator):
if not schema_response:
raise Exception("Could not find the schema to add a table.")
self.is_partition = False
if hasattr(self, 'server_min_version'):
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add "
"partitioned table.")
if server_con["data"]["version"] < self.server_min_version:
message = "Partitioned table are not supported by " \
"PPAS/PG 10.0 and below."
self.skipTest(message)
else:
self.is_partition = True
def runTest(self):
""" This function will add table under schema node. """
db_user = self.server["username"]
@ -68,7 +94,7 @@ class TableAddTestCase(BaseTestGenerator):
"seclabels": []
},
{"name": "DOJ",
"cltype": "date[]",
"cltype": "date",
"attacl": [],
"is_primary_key": False,
"attoptions": [],
@ -76,7 +102,7 @@ class TableAddTestCase(BaseTestGenerator):
}
],
"exclude_constraint": [],
"fillfactor": "11",
"fillfactor": "",
"hastoasttable": True,
"like_constraints": True,
"like_default_value": True,
@ -166,6 +192,35 @@ class TableAddTestCase(BaseTestGenerator):
}
]
}
if self.is_partition:
data['partition_type'] = self.partition_type
data['is_partitioned'] = True
if self.partition_type == 'range':
data['partitions'] = \
[{'values_from': "'2010-01-01'",
'values_to': "'2010-12-31'",
'is_attach': False,
'partition_name': 'emp_2010'
},
{'values_from': "'2011-01-01'",
'values_to': "'2011-12-31'",
'is_attach': False,
'partition_name': 'emp_2011'
}]
else:
data['partitions'] = \
[{'values_in': "'2012-01-01', '2012-12-31'",
'is_attach': False,
'partition_name': 'emp_2012'
},
{'values_in': "'2013-01-01', '2013-12-31'",
'is_attach': False,
'partition_name': 'emp_2013'
}]
data['partition_keys'] = \
[{'key_type': 'column', 'pt_column': 'DOJ'}]
# Add table
response = self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +

View File

@ -23,7 +23,7 @@ class TableDeleteTestCase(BaseTestGenerator):
"""This class will delete new table under schema node."""
scenarios = [
# Fetching default URL for table node.
('Fetch table Node URL', dict(url='/browser/table/obj/'))
('Delete Table', dict(url='/browser/table/obj/'))
]
def setUp(self):

View File

@ -14,6 +14,7 @@ from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
@ -24,7 +25,49 @@ class TableUpdateTestCase(BaseTestGenerator):
"""This class will add new collation under schema node."""
scenarios = [
# Fetching default URL for table node.
('Fetch table Node URL', dict(url='/browser/table/obj/'))
('Update Table', dict(url='/browser/table/obj/')),
('Create partitions of existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='create'
)
),
('Create partitions of existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='create'
)
),
('Detach partition from existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='detach'
)
),
('Detach partition from existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='detach'
)
),
('Attach partition to existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='attach'
)
),
('Attach partition to existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='attach'
)
)
]
def setUp(self):
@ -44,9 +87,31 @@ class TableUpdateTestCase(BaseTestGenerator):
if not schema_response:
raise Exception("Could not find the schema to add a table.")
self.table_name = "test_table_put_%s" % (str(uuid.uuid4())[1:6])
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
self.is_partition = False
if hasattr(self, 'server_min_version'):
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add "
"partitioned table.")
if server_con["data"]["version"] < self.server_min_version:
message = "Partitioned table are not supported by " \
"PPAS/PG 10.0 and below."
self.skipTest(message)
else:
self.is_partition = True
self.table_id = tables_utils.create_table_for_partition(
self.server,
self.db_name,
self.schema_name,
self.table_name,
'partitioned',
self.partition_type)
else:
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
def runTest(self):
"""This function will fetch added table under schema node."""
@ -54,10 +119,18 @@ class TableUpdateTestCase(BaseTestGenerator):
self.table_id)
if not table_response:
raise Exception("Could not find the table to update.")
data = {
"description": "This is test comment for table",
"id": self.table_id
}
if self.is_partition:
data = {"id": self.table_id}
tables_utils.set_partition_data(
self.server, self.db_name, self.schema_name, self.table_name,
self.partition_type, data, self.mode)
else:
data = {
"description": "This is test comment for table",
"id": self.table_id
}
response = self.tester.put(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) + '/' +

View File

@ -85,3 +85,153 @@ def verify_table(server, db_name, table_id):
except Exception:
traceback.print_exc(file=sys.stderr)
raise
def create_table_for_partition(server, db_name, schema_name, table_name,
table_type, partition_type, partition_name=None):
"""
This function creates partitioned/partition/regular table
under provided schema.
:param server: server details
:param db_name: database name
:param schema_name: schema name
:param table_name: table name
:param table_type: regular/partitioned/partition
:param partition_type: partition table type (range/list)
:param partition_name: Partition Name
:return table_id: table id
"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
query = ''
if table_type == 'partitioned':
if partition_type == 'range':
query = "CREATE TABLE %s.%s(country text, sales bigint, " \
"saledate date) PARTITION BY RANGE(saledate)" % \
(schema_name, table_name)
else:
query = "CREATE TABLE %s.%s(country text, sales bigint, " \
"saledate date) PARTITION BY LIST(saledate)" % \
(schema_name, table_name)
elif table_type == 'partition':
if partition_type == 'range':
query = "CREATE TABLE %s.%s PARTITION OF %s.%s " \
"FOR VALUES FROM ('2012-01-01') TO ('2012-12-31')" % \
(schema_name, partition_name, schema_name, table_name)
else:
query = "CREATE TABLE %s.%s PARTITION OF %s.%s " \
"FOR VALUES IN ('2013-01-01')" % \
(schema_name, partition_name, schema_name, table_name)
# To fetch OID table name is actually partition name
table_name = partition_name
elif table_type == 'regular':
query = "CREATE TABLE %s.%s(country text, sales bigint," \
"saledate date NOT NULL)" % (schema_name, table_name)
pg_cursor.execute(query)
connection.set_isolation_level(old_isolation_level)
connection.commit()
# Get 'oid' from newly created table
pg_cursor.execute("select oid from pg_class where relname='%s'" %
table_name)
table = pg_cursor.fetchone()
table_id = ''
if table:
table_id = table[0]
connection.close()
return table_id
except Exception:
traceback.print_exc(file=sys.stderr)
raise
def set_partition_data(server, db_name, schema_name, table_name,
partition_type, data, mode):
"""
This function is used to set the partitions data on the basis of
partition type and action.
:param server: server details
:param db_name: Database Name
:param schema_name: Schema Name
:param table_name: Table Name
:param partition_type: range or list
:param data: Data
:param mode: create/detach
:return:
"""
data['partitions'] = dict()
if partition_type == 'range' and mode == 'create':
data['partitions'].update(
{'added': [{'values_from': "'2014-01-01'",
'values_to': "'2014-12-31'",
'is_attach': False,
'partition_name': 'sale_2014'},
{'values_from': "'2015-01-01'",
'values_to': "'2015-12-31'",
'is_attach': False,
'partition_name': 'sale_2015'
}]
}
)
elif partition_type == 'list' and mode == 'create':
data['partitions'].update(
{'added': [{'values_in': "'2016-01-01', '2016-12-31'",
'is_attach': False,
'partition_name': 'sale_2016'},
{'values_in': "'2017-01-01', '2017-12-31'",
'is_attach': False,
'partition_name': 'sale_2017'
}]
}
)
elif partition_type == 'range' and mode == 'detach':
partition_id = create_table_for_partition(server, db_name, schema_name,
table_name, 'partition',
partition_type, 'sale_2012')
data['partitions'].update(
{'deleted': [{'oid': partition_id}]
}
)
elif partition_type == 'list' and mode == 'detach':
partition_id = create_table_for_partition(server, db_name, schema_name,
table_name, 'partition',
partition_type, 'sale_2013')
data['partitions'].update(
{'deleted': [{'oid': partition_id}]
}
)
elif partition_type == 'range' and mode == 'attach':
partition_id = create_table_for_partition(server, db_name, schema_name,
'attach_sale_2010', 'regular',
partition_type)
data['partitions'].update(
{'added': [{'values_from': "'2010-01-01'",
'values_to': "'2010-12-31'",
'is_attach': True,
'partition_name': partition_id
}]
}
)
elif partition_type == 'list' and mode == 'attach':
partition_id = create_table_for_partition(server, db_name, schema_name,
'attach_sale_2011', 'regular',
partition_type)
data['partitions'].update(
{'added': [{'values_in': "'2011-01-01'",
'is_attach': True,
'partition_name': partition_id
}]
}
)

View File

@ -30,13 +30,15 @@ define('pgadmin.node.trigger', [
node: 'trigger',
label: gettext('Triggers'),
type: 'coll-trigger',
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
columns: ['name', 'description']
});
};
if (!pgBrowser.Nodes['trigger']) {
pgAdmin.Browser.Nodes['trigger'] = pgAdmin.Browser.Node.extend({
parent_type: ['table', 'view'],
pgAdmin.Browser.Nodes['trigger'] = pgBrowser.Node.extend({
getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
parent_type: ['table', 'view', 'partition'],
collection_type: ['coll-table', 'coll-view'],
type: 'trigger',
label: gettext('Trigger'),
@ -71,6 +73,12 @@ define('pgadmin.node.trigger', [
category: 'create', priority: 4, label: gettext('Trigger...'),
icon: 'wcTabIcon icon-trigger', data: {action: 'create', check: true},
enable: 'canCreate'
},{
name: 'create_trigger_onPartition', node: 'partition', module: this,
applies: ['object', 'context'], callback: 'show_obj_properties',
category: 'create', priority: 4, label: gettext('Trigger...'),
icon: 'wcTabIcon icon-trigger', data: {action: 'create', check: true},
enable: 'canCreate'
},{
name: 'enable_trigger', node: 'trigger', module: this,
applies: ['object', 'context'], callback: 'enable_trigger',
@ -206,6 +214,17 @@ define('pgadmin.node.trigger', [
mode: ['create','edit', 'properties'],
deps: ['is_constraint_trigger'],
disabled: function(m) {
// Disabled if table is a partitioned table.
if (_.has(m, 'node_info') && _.has(m.node_info, 'table') &&
_.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned)
{
setTimeout(function(){
m.set('is_row_trigger', false);
},10);
return true;
}
// If constraint trigger is set to True then row trigger will
// automatically set to True and becomes disable
var is_constraint_trigger = m.get('is_constraint_trigger');
@ -232,7 +251,19 @@ define('pgadmin.node.trigger', [
id: 'is_constraint_trigger', label: gettext('Constraint trigger?'),
type: 'switch', disabled: 'inSchemaWithModelCheck',
mode: ['create','edit', 'properties'],
group: gettext('Definition')
group: gettext('Definition'),
disabled: function(m) {
// Disabled if table is a partitioned table.
if (_.has(m, 'node_info') && _.has(m.node_info, 'table') &&
_.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned)
{
setTimeout(function(){
m.set('is_constraint_trigger', false);
},10);
return true;
}
}
},{
id: 'tgdeferrable', label: gettext('Deferrable?'),
type: 'switch', group: gettext('Definition'),

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ define(
$ = $ || window.jQuery || window.$;
Bootstrap = Bootstrap || window.Bootstrap;
pgAdmin.Browser = pgAdmin.Browser || {};
var pgBrowser = pgAdmin.Browser = pgAdmin.Browser || {};
var panelEvents = {};
panelEvents[wcDocker.EVENT.VISIBILITY_CHANGED] = function() {
@ -1756,6 +1756,201 @@ define(
}
},
removeChildTreeNodesById: function(_parentNode, _collType, _childIds) {
var tree = pgBrowser.tree;
if(_parentNode && _collType) {
var children = tree.children(_parentNode),
idx = 0, size = children.length,
childNode, childNodeData;
_parentNode = null;
for (; idx < size; idx++) {
childNode = children.eq(idx);
childNodeData = tree.itemData(childNode);
if (childNodeData._type == _collType) {
_parentNode = childNode;
break;
}
}
}
if (_parentNode) {
var children = tree.children(_parentNode),
idx = 0, size = children.length,
childNode, childNodeData,
prevChildNode;
for (; idx < size; idx++) {
childNode = children.eq(idx);
childNodeData = tree.itemData(childNode);
if (_childIds.indexOf(childNodeData._id) != -1) {
pgBrowser.removeTreeNode(childNode, false, _parentNode);
}
}
return true;
}
return false;
},
removeTreeNode: function(_node, _selectNext, _parentNode) {
var tree = pgBrowser.tree,
nodeToSelect = null;
if (!_node)
return false;
if (_selectNext) {
nodeToSelect = tree.next(_node);
if (!nodeToSelect || !nodeToSelect.length) {
nodeToSelect = tree.prev(_node);
if (!nodeToSelect || !nodeToSelect.length) {
if (!_parentNode) {
nodeToSelect = tree.parent(_node);
} else {
nodeToSelect = _parentNode;
}
}
}
if (nodeToSelect)
tree.select(nodeToSelect);
}
tree.remove(_node);
return true;
},
findSiblingTreeNode: function(_node, _id) {
var tree = pgBrowser.tree,
parentNode = tree.parent(_node),
siblings = tree.children(parentNode),
idx = 0, nodeData, node;
for(; idx < siblings.length; idx++) {
node = siblings.eq(idx);
nodeData = tree.itemData(node);
if (nodeData && nodeData._id == _id)
return node;
}
return null;
},
findParentTreeNodeByType: function(_node, _parentType) {
var tree = pgBrowser.tree,
nodeData,
node = _node;
do {
nodeData = tree.itemData(node);
if (nodeData && nodeData._type == _parentType)
return node;
node = tree.hasParent(node) ? tree.parent(node) : null;
} while (node);
return null;
},
findChildCollectionTreeNode: function(_node, _collType) {
var tree = pgBrowser.tree,
nodeData, idx = 0,
node = _node,
children = _node && tree.children(_node);
if (!children || !children.length)
return null;
for(; idx < children.length; idx++) {
node = children.eq(idx);
nodeData = tree.itemData(node);
if (nodeData && nodeData._type == _collType)
return node;
}
return null;
},
addChildTreeNodes: function(_treeHierarchy, _node, _type, _arrayIds, _callback) {
var module = _type in pgBrowser.Nodes && pgBrowser.Nodes[_type],
childTreeInfo = _arrayIds.length && _.extend(
{}, _.mapObject(
_treeHierarchy, function(_val, _key) {
_val.priority -= 1; return _val;
})
),
arrayChildNodeData = [],
fetchNodeInfo = function(_callback) {
if (!_arrayIds.length) {
if (_callback) {
_callback();
}
return;
}
var childDummyInfo = {
'_id': _arrayIds.pop(), '_type': _type, 'priority': 0
},
childNodeUrl;
childTreeInfo[_type] = childDummyInfo;
childNodeUrl = module.generate_url(
null, 'nodes', childDummyInfo, true, childTreeInfo
);
console.debug("Fetching node information using: ", childNodeUrl);
$.ajax({
url: childNodeUrl,
dataType: "json",
success: function(res) {
if (res.success) {
arrayChildNodeData.push(res.data);
}
fetchNodeInfo(_callback);
},
error: function(xhr, status, error) {
try {
var err = $.parseJSON(xhr.responseText);
if (err.success == 0) {
var alertifyWrapper = new AlertifyWrapper();
alertifyWrapper.error(err.errormsg);
}
} catch (e) {}
fetchNodeInfo(_callback);
}
});
};
if (!module) {
console.warning(
"Developer: Couldn't find the module for the given child: ",
_.clone(arguments)
);
return;
}
if (pgBrowser.tree.wasLoad(_node) || pgBrowser.tree.isLeaf(_node)) {
fetchNodeInfo(function() {
console.log('Append this nodes:', arrayChildNodeData);
_.each(arrayChildNodeData, function(_nodData) {
pgBrowser.Events.trigger(
'pgadmin:browser:tree:add', _nodData, _treeHierarchy
);
});
if (_callback) {
_callback();
}
});
} else {
if (_callback) {
_callback();
}
}
},
_refreshNode: function(_ctx, _d) {
var traverseNodes = function(_d) {
var _ctx = this, idx = 0, ctx, d,

View File

@ -652,18 +652,7 @@ define([
if (res.success == 0) {
pgBrowser.report_error(res.errormsg, res.info);
} else {
var n = t.next(i);
if (!n || !n.length) {
n = t.prev(i);
if (!n || !n.length) {
n = t.parent(i);
t.setInode(n, true);
}
}
t.remove(i);
if (n.length) {
t.select(n);
}
pgBrowser.removeTreeNode(i, true);
}
return true;
},
@ -1320,9 +1309,14 @@ define([
pgBrowser.Events.trigger(
'pgadmin:browser:tree:update',
_old, _new, info, {
success: function() {
success: function(_item, _newNodeData, _oldNodeData) {
pgBrowser.Events.trigger(
'pgadmin:browser:node:updated', _new
'pgadmin:browser:node:updated', _item, _newNodeData,
_oldNodeData
);
pgBrowser.Events.trigger(
'pgadmin:browser:node:' + _newNodeData._type + ':updated',
_item, _newNodeData, _oldNodeData
);
}
}

View File

@ -270,7 +270,7 @@ TODO LIST FOR BACKUP:
// Define list of nodes on which backup context menu option appears
var backup_supported_nodes = [
'database', 'schema', 'table'
'database', 'schema', 'table', 'partition'
];
/**

View File

@ -127,6 +127,10 @@ def initialize_datagrid(cmd_type, obj_type, sid, did, obj_id):
return internal_server_error(errormsg=str(msg))
try:
# if object type is partition then it is nothing but a table.
if obj_type == 'partition':
obj_type = 'table'
# Get the object as per the object type
command_obj = ObjectRegistry.get_object(obj_type, conn_id=conn_id, sid=sid,
did=did, obj_id=obj_id, cmd_type=cmd_type,
@ -201,12 +205,14 @@ def panel(trans_id, is_query_tool, editor_title):
else:
new_browser_tab = 'false'
return render_template("datagrid/index.html", _=gettext, uniqueId=trans_id,
is_query_tool=is_query_tool,
editor_title=editor_title, script_type_url=sURL,
is_desktop_mode=app.PGADMIN_RUNTIME,
is_linux=is_linux_platform,
is_new_browser_tab=new_browser_tab)
return render_template(
"datagrid/index.html", _=gettext, uniqueId=trans_id,
is_query_tool=is_query_tool,
editor_title=editor_title, script_type_url=sURL,
is_desktop_mode=app.PGADMIN_RUNTIME,
is_linux=is_linux_platform,
is_new_browser_tab=new_browser_tab
)
@blueprint.route(
@ -346,6 +352,8 @@ def validate_filter(sid, did, obj_id):
@login_required
def script():
"""render the required javascript"""
return Response(response=render_template("datagrid/js/datagrid.js", _=gettext),
status=200,
mimetype="application/javascript")
return Response(
response=render_template("datagrid/js/datagrid.js", _=gettext),
status=200, mimetype="application/javascript"
)

View File

@ -29,7 +29,7 @@ define([
// Define list of nodes on which view data option appears
var supported_nodes = [
'table', 'view', 'mview',
'foreign-table', 'catalog_object'
'foreign-table', 'catalog_object', 'partition'
],
/* Enable/disable View data menu in tools based

View File

@ -150,7 +150,7 @@ define([
var maintenance_supported_nodes = [
'database', 'table', 'primary_key',
'unique_constraint', 'index'
'unique_constraint', 'index', 'partition'
];
/**
@ -180,7 +180,7 @@ define([
var menus = [{
name: 'maintenance', module: this,
applies: ['tools'], callback: 'callback_maintenace',
applies: ['tools'], callback: 'callback_maintenance',
priority: 10, label: gettext('Maintenance...'),
icon: 'fa fa-wrench', enable: menu_enabled
}];
@ -190,7 +190,7 @@ define([
menus.push({
name: 'maintenance_context_' + maintenance_supported_nodes[idx],
node: maintenance_supported_nodes[idx], module: this,
applies: ['context'], callback: 'callback_maintenace',
applies: ['context'], callback: 'callback_maintenance',
priority: 10, label: gettext('Maintenance...'),
icon: 'fa fa-wrench', enable: menu_enabled
});
@ -201,7 +201,7 @@ define([
/*
Open the dialog for the maintenance functionality
*/
callback_maintenace: function(args, item) {
callback_maintenance: function(args, item) {
var i = item || pgBrowser.tree.selected(),
server_data = null;
@ -320,7 +320,10 @@ define([
if (treeInfo.schema != undefined) {
schema = treeInfo.schema._label;
}
if (treeInfo.table != undefined) {
if (treeInfo.partition != undefined) {
table = treeInfo.partition._label;
} else if (treeInfo.table != undefined) {
table = treeInfo.table._label;
}

View File

@ -228,7 +228,8 @@ define([
var restore_supported_nodes = [
'database', 'schema',
'table', 'function',
'trigger', 'index'
'trigger', 'index',
'partition'
];
/**

View File

@ -29,6 +29,7 @@ class PgAdminModule(Blueprint):
kwargs.setdefault('template_folder', 'templates')
kwargs.setdefault('static_folder', 'static')
self.submodules = []
self.parentmodules = []
super(PgAdminModule, self).__init__(name, import_name, **kwargs)
@ -59,6 +60,8 @@ class PgAdminModule(Blueprint):
super(PgAdminModule, self).register(app, options, first_registration)
for module in self.submodules:
if first_registration:
module.parentmodules.append(self)
app.register_blueprint(module)
def get_own_stylesheets(self):

View File

@ -40,3 +40,11 @@ class ConnectionLost(HTTPException):
'conn_id': self.conn_id
}
)
def __str__(self):
return "Connection (id #{2}) lost for the server (#{0}) on " \
"database ({1})".format(self.sid, self.db, self.conn_id)
def __repr__(self):
return "Connection (id #{2}) lost for the server (#{0}) on " \
"database ({1})".format(self.sid, self.db, self.conn_id)