Added capability to deploy PostgreSQL servers on EDB BigAnimal. Fixes #7179

This commit is contained in:
Khushboo Vashi
2022-04-26 16:41:10 +05:30
committed by Akshay Joshi
parent 0795b22ae6
commit 5677b1e5f8
36 changed files with 2401 additions and 872 deletions

View File

@@ -10,8 +10,8 @@
"""Implements Cloud Deployment"""
import simplejson as json
from flask import Response, url_for, session
from flask import render_template, request, current_app
from flask import Response, url_for
from flask import render_template, request
from flask_babel import gettext
from flask_security import login_required, current_user
@@ -20,18 +20,15 @@ from pgadmin.utils.ajax import make_json_response,\
internal_server_error, bad_request, success_return
from pgadmin.utils.constants import MIMETYPE_APP_JS
from pgadmin.misc.bgprocess.processes import BatchProcess, IProcessDesc
from pgadmin.model import db, Server, Process
from pgadmin.misc.cloud.utils.rds import RDS, verify_aws_credentials,\
get_aws_db_instances, get_aws_db_versions, clear_aws_session,\
get_aws_regions
from pgadmin.misc.cloud.utils import get_my_ip
from config import root
from pgadmin.misc.cloud.biganimal import deploy_on_biganimal,\
clear_biganimal_session
from pgadmin.misc.cloud.rds import deploy_on_rds, clear_aws_session
# set template path for sql scripts
MODULE_NAME = 'cloud'
server_info = {}
class CloudModule(PgAdminModule):
@@ -42,7 +39,6 @@ class CloudModule(PgAdminModule):
class and define methods to load its own
javascript file.
LABEL = gettext('Browser')
"""
def get_own_stylesheets(self):
@@ -77,12 +73,8 @@ class CloudModule(PgAdminModule):
list: URL endpoints for cloud module
"""
return ['cloud.deploy_on_cloud',
'cloud.get_aws_db_versions',
'cloud.verify_credentials',
'cloud.get_aws_db_instances',
'cloud.update_cloud_server',
'cloud.update_cloud_process',
'cloud.get_aws_regions',
'cloud.get_host_ip']
@@ -114,172 +106,46 @@ def script():
methods=['GET'], endpoint='get_host_ip')
@login_required
def get_host_ip():
"""test"""
"""Get host IP Address"""
ip = get_my_ip()
return make_json_response(data=ip)
@blueprint.route('/verify_credentials/',
methods=['POST'], endpoint='verify_credentials')
@login_required
def verify_credentials():
"""Verify Credentials."""
data = json.loads(request.data, encoding='utf-8')
status, msg = verify_aws_credentials(data)
if status:
msg = 'verified'
return make_json_response(success=status, info=msg)
@blueprint.route('/get_aws_db_instances/',
methods=['GET'], endpoint='get_aws_db_instances')
@login_required
def get_db_instances():
"""
Fetch AWS DB Instances based on engine version.
"""
# Get Engine Version
eng_version = request.args.get('eng_version')
status, versions = get_aws_db_instances(eng_version)
if not status:
return make_json_response(
status=410,
success=0,
errormsg=versions
)
return make_json_response(data=versions)
@blueprint.route('/get_aws_db_versions/',
methods=['GET', 'POST'], endpoint='get_aws_db_versions')
@login_required
def get_db_versions():
"""GET AWS Database Versions for AWS."""
status, versions = get_aws_db_versions()
if not status:
return make_json_response(
status=410,
success=0,
errormsg=str(versions)
)
return make_json_response(data=versions)
@blueprint.route('/get_aws_regions/',
methods=['GET', 'POST'], endpoint='get_aws_regions')
@login_required
def get_db_versions():
"""GET AWS Regions for AWS."""
status, regions = get_aws_regions()
if not status:
return make_json_response(
status=410,
success=0,
errormsg=str(regions)
)
return make_json_response(data=regions)
@blueprint.route(
'/deploy', methods=['POST'], endpoint='deploy_on_cloud'
)
@login_required
def deploy_on_cloud():
"""Deploy on Cloud"""
"""Deploy on Cloud."""
data = json.loads(request.data, encoding='utf-8')
from subprocess import Popen, PIPE
_cmd = 'python'
_cmd_script = '{0}/pgacloud/pgacloud.py'.format(root)
if data['cloud'] == 'rds':
status, resp = deploy_on_rds(data)
elif data['cloud'] == 'biganimal':
status, resp = deploy_on_biganimal(data)
else:
status = False
resp = gettext('No cloud implementation.')
args = [_cmd_script,
'--debug',
data['cloud'],
'--region',
str(data['secret']['aws_region']),
'create-instance',
'--name',
data['instance_details']['aws_name'],
'--db-name',
data['db_details']['aws_db_name'],
'--db-username',
data['db_details']['aws_db_username'],
'--db-port',
str(data['db_details']['aws_db_port']),
'--db-version',
str(data['instance_details']['aws_db_version']),
'--instance-type',
data['instance_details']['aws_instance_type'],
'--storage-type',
data['instance_details']['aws_storage_type'],
'--storage-size',
str(data['instance_details']['aws_storage_size']),
'--public-ip',
str(data['instance_details']['aws_public_ip']),
]
if data['instance_details']['aws_storage_type'] == 'io1':
args.append('--storage-iops')
args.append(str(data['instance_details']['aws_storage_IOPS']))
_cmd_msg = '{0} {1} {2}'.format(_cmd, _cmd_script, ' '.join(args))
try:
sid = _create_server({
'gid': data['db_details']['gid'],
'name': data['instance_details']['aws_name'],
'db': data['db_details']['aws_db_name'],
'username': data['db_details']['aws_db_username'],
'port': data['db_details']['aws_db_port'],
'cloud_status': -1
})
p = BatchProcess(
desc=CloudProcessDesc(sid, _cmd_msg, data['cloud'],
data['instance_details']['aws_name']),
cmd=_cmd,
args=args
)
env = dict()
env['AWS_ACCESS_KEY_ID'] = data['secret']['aws_access_key']
env['AWS_SECRET_ACCESS_KEY'] = data['secret']['aws_secret_access_key']
if 'aws_session_token' in data['secret'] and\
data['secret']['aws_session_token'] is not None:
env['AWS_SESSION_TOKEN'] = data['secret']['aws_session_token']
if 'aws_db_password' in data['db_details']:
env['AWS_DATABASE_PASSWORD'] = data[
'db_details']['aws_db_password']
p.set_env_variables(None, env=env)
p.update_server_id(p.id, sid)
p.start()
except Exception as e:
current_app.logger.exception(e)
if not status:
return make_json_response(
status=410,
success=0,
errormsg=str(e)
errormsg=resp
)
# Return response
return make_json_response(
success=1,
data={'job_id': 1, 'node': {
'_id': sid,
'_id': resp['sid'],
'_pid': data['db_details']['gid'],
'connected': False,
'_type': 'server',
'icon': 'icon-server-cloud-deploy',
'id': 'server_{}'.format(sid),
'id': 'server_{}'.format(resp['sid']),
'inode': True,
'label': data['instance_details']['aws_name'],
'label': resp['label'],
'server_type': 'pg',
'module': 'pgadmin.node.server',
'cloud_status': -1
@@ -287,25 +153,6 @@ def deploy_on_cloud():
)
def _create_server(data):
"""Create Server"""
server = Server(
user_id=current_user.id,
servergroup_id=data.get('gid'),
name=data.get('name'),
maintenance_db=data.get('db'),
username=data.get('username'),
ssl_mode='prefer',
cloud_status=data.get('cloud_status'),
connect_timeout=30,
)
db.session.add(server)
db.session.commit()
return server.id
def update_server(data):
"""Update Server."""
server_data = data
@@ -315,7 +162,7 @@ def update_server(data):
).first()
if server is None:
return False, "Could not find the server."
return False, gettext("Could not find the server.")
if server_data['instance'] == '' or\
not server_data['instance']['status']:
@@ -341,11 +188,17 @@ def update_server(data):
_server['status'] = False
else:
_server['status'] = True
clear_aws_session()
clear_cloud_session()
return True, _server
def clear_cloud_session():
"""Clear cloud sessions."""
clear_aws_session()
clear_biganimal_session()
@blueprint.route(
'/update_cloud_process/<sid>', methods=['GET'],
endpoint='update_cloud_process'
@@ -386,37 +239,3 @@ def update_cloud_server():
'label': server.name
}}
)
class CloudProcessDesc(IProcessDesc):
"""Cloud Server Process Description."""
def __init__(self, _sid, _cmd, _provider, _instance_name):
self.sid = _sid
self.cmd = _cmd
self.instance_name = _instance_name
self.provider = 'Amazon RDS'
if _provider == 'rds':
self.provider = 'Amazon RDS'
elif _provider == 'azure':
self.provider = 'Azure PostgreSQL'
else:
self.provider = 'EDB Big Animal'
@property
def message(self):
return "Deployment on {0} is started for instance {1}.".format(
self.provider, self.instance_name)
def details(self, cmd, args):
res = '<div>' + self.message
res += '</div><div class="py-1">'
res += '<div class="pg-bg-cmd enable-selection p-1">'
res += html.safe_str(self.cmd)
res += '</div></div>'
return res
@property
def type_desc(self):
return "Cloud Deployment"

View File

@@ -0,0 +1,432 @@
# ##########################################################################
# #
# # pgAdmin 4 - PostgreSQL Tools
# #
# # Copyright (C) 2013 - 2022, The pgAdmin Development Team
# # This software is released under the PostgreSQL Licence
# #
# ##########################################################################
# EDB BigAnimal Cloud Deployment Implementation
import requests
import json
import pickle
from flask_babel import gettext
import simplejson as json
from flask import session, current_app
from flask_security import login_required
from werkzeug.datastructures import Headers
from pgadmin.utils import PgAdminModule
from pgadmin.misc.cloud.utils import _create_server, CloudProcessDesc
from pgadmin.misc.bgprocess.processes import BatchProcess
from pgadmin.utils.ajax import make_json_response,\
internal_server_error, bad_request, success_return
from config import root
from pgadmin.utils.constants import MIMETYPE_APP_JSON
MODULE_NAME = 'biganimal'
class BigAnimalModule(PgAdminModule):
"""Cloud module to deploy on EDB BigAnimal"""
def get_own_stylesheets(self):
"""
Returns:
list: the stylesheets used by this module.
"""
stylesheets = []
return stylesheets
def get_exposed_url_endpoints(self):
return ['biganimal.verification',
'biganimal.verification_ack',
'biganimal.regions',
'biganimal.db_types',
'biganimal.db_versions',
'biganimal.instance_types',
'biganimal.volume_types',
'biganimal.volume_properties']
blueprint = BigAnimalModule(MODULE_NAME, __name__,
static_url_path='/misc/cloud/biganimal')
@blueprint.route('/verification_ack/',
methods=['GET'], endpoint='verification_ack')
@login_required
def biganimal_verification_ack():
"""Check the Verification is done or not."""
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
status, error = biganimal_obj.polling_for_token()
session['biganimal']['provider_obj'] = pickle.dumps(biganimal_obj, -1)
return make_json_response(success=status,
errormsg=error)
@blueprint.route('/verification/',
methods=['GET'], endpoint='verification')
@login_required
def verification():
"""Verify Credentials."""
biganimal = BigAnimalProvider()
verification_uri = biganimal.get_device_code()
session['biganimal'] = {}
session['biganimal']['provider_obj'] = pickle.dumps(biganimal, -1)
return make_json_response(data=verification_uri)
@blueprint.route('/regions/',
methods=['GET'], endpoint='regions')
@login_required
def biganimal_regions():
"""Get Regions."""
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
status, regions = biganimal_obj.get_regions()
return make_json_response(data=regions)
@blueprint.route('/db_types/',
methods=['GET'], endpoint='db_types')
@login_required
def biganimal_db_types():
"""Get Database Types."""
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
pg_types = biganimal_obj.get_postgres_types()
return make_json_response(data=pg_types)
@blueprint.route('/db_versions/',
methods=['GET'], endpoint='db_versions')
@login_required
def biganimal_db_versions():
"""Get Database Version."""
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
pg_versions = biganimal_obj.get_postgres_versions()
return make_json_response(data=pg_versions)
@blueprint.route('/instance_types/<region_id>',
methods=['GET'], endpoint='instance_types')
@login_required
def biganimal_instance_types(region_id):
"""Get Instance Types."""
if not region_id:
return make_json_response(data=[])
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
biganimal_instances = biganimal_obj.get_instance_types(region_id)
return make_json_response(data=biganimal_instances)
@blueprint.route('/volume_types/<region_id>',
methods=['GET'], endpoint='volume_types')
@login_required
def biganimal_volume_types(region_id):
"""Get Volume Types."""
if not region_id:
return make_json_response(data=[])
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
biganimal_volumes = biganimal_obj.get_volume_types(region_id)
return make_json_response(data=biganimal_volumes)
@blueprint.route('/volume_properties/<region_id>/<volume_type>',
methods=['GET'], endpoint='volume_properties')
@login_required
def biganimal_volume_properties(region_id, volume_type):
"""Get Volume Properties."""
if not region_id:
return make_json_response(data=[])
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
biganimal_volume_properties = biganimal_obj.get_volume_properties(
region_id,
volume_type)
return make_json_response(data=biganimal_volume_properties)
class BigAnimalProvider():
"""BigAnimal provider class"""
BASE_URL = 'https://portal.biganimal.com/api/v1'
def __init__(self):
self.provider = {}
self.device_code = {}
self.token = {}
self.raw_access_token = None
self.access_token = None
self.token_error = {}
self.token_status = -1
self.get_auth_provider()
def _get_headers(self):
return {
'content-type': MIMETYPE_APP_JSON,
'Authorization': 'Bearer {0}'.format(self.access_token)
}
def get_auth_provider(self):
"""Get Authentication Provider Relevant Information."""
provider_resp = requests.get("{0}/{1}".format(self.BASE_URL,
'auth/provider'))
if provider_resp.status_code == 200 and provider_resp.content:
self.provider = json.loads(provider_resp.content)
def get_device_code(self):
"""Get device code"""
_url = "{0}/{1}".format(self.provider['issuerUri'],
'oauth/device/code')
_headers = {"content-type": "application/x-www-form-urlencoded"}
_data = {
'client_id': self.provider['clientId'],
'audience': self.provider['audience'],
'scope': self.provider['scope']
}
device_resp = requests.post(_url,
headers=_headers,
data=_data)
if device_resp.status_code == 200 and device_resp.content:
self.device_code = json.loads(device_resp.content)
return self.device_code['verification_uri_complete']
def polling_for_token(self):
# Polling for the Token
_url = "{0}/{1}".format(self.provider['issuerUri'], 'oauth/token')
_headers = {"content-type": "application/x-www-form-urlencoded"}
_data = {
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': self.device_code['device_code'],
'client_id': self.provider['clientId']
}
token_resp = requests.post(_url,
headers=_headers,
data=_data)
if token_resp.status_code == 200:
self.token = json.loads(token_resp.content)
self.raw_access_token = self.token['access_token']
self.token_error['error'] = None
self.token_status = 1
status, msg = self.exchange_token()
if status and not self._check_admin_permission():
return False, gettext('forbidden')
return status, msg
elif token_resp.status_code == 403:
self.token_error = json.loads(token_resp.content)
if self.token_error['error'] == 'authorization_pending' or\
self.token_error['error'] == 'access_denied':
self.token_status = 0
return False, self.token_error['error']
return False, None
def exchange_token(self):
_url = "{0}/{1}".format(self.BASE_URL, 'auth/token')
_headers = {"content-type": "application/json"}
_data = {'token': self.raw_access_token}
token_resp = requests.post(_url,
headers=_headers,
data=json.dumps(_data))
final_token = json.loads(token_resp.content)
if token_resp.status_code == 200:
self.access_token = final_token['token']
return True, None
else:
return False, self.token_error['error']
def _check_admin_permission(self):
"""
Check wehether the user has valid role or not.
There is no direct way to do this, so just checking the create cluster
permission.
"""
_url = "{0}/{1}".format(
self.BASE_URL,
'admin/permissions')
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code != 200:
return False
if resp.status_code == 200 and resp.content:
content = json.loads(resp.content)
if 'permissionsList' in content and 'create:clusters' in content[
'permissionsList']:
return True
return False
def get_regions(self):
"""Get regions"""
_url = "{0}/{1}".format(
self.BASE_URL,
'cloud-providers/azure/regions')
regions = []
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
regions_resp = json.loads(resp.content)
for value in regions_resp['regionsList']:
regions.append({
'label': value['regionName'],
'value': value['regionId']
})
return True, regions
elif resp.content:
regions_resp = json.loads(resp.content)
return False, regions_resp['error']['message']
else:
return False, gettext('Error retrieving regions.')
def get_postgres_types(self):
"""Get Postgres Types."""
_url = "{0}/{1}".format(
self.BASE_URL,
'postgres-types')
pg_types = []
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
pg_types_resp = json.loads(resp.content)
for value in pg_types_resp['pgTypesList']:
pg_types.append({
'label': value['name'],
'value': value['id']
})
return pg_types
def get_postgres_versions(self):
"""Get Postgres Versions."""
_url = "{0}/{1}".format(
self.BASE_URL,
'postgres-versions')
pg_versions = []
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
pg_versions_resp = json.loads(resp.content)
for value in pg_versions_resp['pgVersionsList']:
pg_versions.append({
'label': value['versionName'],
'value': value['versionId']
})
return pg_versions
def get_instance_types(self, region_id):
"""GEt Instance Types."""
_url = "{0}/{1}".format(
self.BASE_URL,
'cloud-providers/azure/regions/'
'{0}/instance-types'.format(region_id))
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
pg_types = json.loads(resp.content)
return pg_types['instanceTypesList']
return []
def get_volume_types(self, region_id):
"""Get Volume Types."""
_url = "{0}/{1}".format(
self.BASE_URL,
'cloud-providers/azure/regions/{0}/volume-types'.format(region_id))
volume_types = []
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
volume_resp = json.loads(resp.content)
for value in volume_resp['volumeTypesList']:
volume_types.append({
'label': value['displayName'],
'value': value['id']
})
return volume_types
def get_volume_properties(self, region_id, volume_type):
"""Get Volume Properties."""
_url = "{0}/{1}".format(
self.BASE_URL,
'cloud-providers/azure/regions/{0}/volume-types'
'/{1}/volume-properties'.format(region_id, volume_type))
volume_properties = []
resp = requests.get(_url, headers=self._get_headers())
if resp.status_code == 200 and resp.content:
volume_prop = json.loads(resp.content)
for value in volume_prop['volumePropertiesList']:
volume_properties.append({
'label': value['value'],
'value': value['id']
})
return volume_properties
def clear_biganimal_session():
"""Clear session data."""
if 'biganimal' in session:
session.pop('biganimal')
def deploy_on_biganimal(data):
"""Deploy Postgres instance on BigAnimal"""
_cmd = 'python'
_cmd_script = '{0}/pgacloud/pgacloud.py'.format(root)
_label = data['instance_details']['name']
_private_network = '1' if str(data['instance_details']['cloud_type']
) == 'private' else '0'
_instance_size = data['instance_details']['instance_size'].split('||')[1]
args = [_cmd_script,
data['cloud'],
'create-instance',
'--name',
data['instance_details']['name'],
'--region',
str(data['instance_details']['region']),
'--db-type',
str(data['db_details']['database_type']),
'--db-version',
str(data['db_details']['postgres_version']),
'--volume-type',
str(data['instance_details']['volume_type']),
'--volume-properties',
str(data['instance_details']['volume_properties']),
'--instance-type',
str(_instance_size),
'--private-network',
_private_network
]
if 'public_ip' in data['instance_details']:
args.append('--public-ip')
args.append(str(data['instance_details']['public_ip']))
_cmd_msg = '{0} {1} {2}'.format(_cmd, _cmd_script, ' '.join(args))
try:
sid = _create_server({
'gid': data['db_details']['gid'],
'name': data['instance_details']['name'],
'db': 'edb_admin',
'username': 'edb_admin',
'port': 5432,
'cloud_status': -1
})
p = BatchProcess(
desc=CloudProcessDesc(sid, _cmd_msg,
data['cloud'],
data['instance_details']['name']
),
cmd=_cmd,
args=args
)
env = dict()
biganimal_obj = pickle.loads(session['biganimal']['provider_obj'])
env['BIGANIMAL_ACCESS_KEY'] = biganimal_obj.access_token
if 'password' in data['db_details']:
env['BIGANIMAL_DATABASE_PASSWORD'] = data[
'db_details']['password']
p.set_env_variables(None, env=env)
p.update_server_id(p.id, sid)
p.start()
return True, {'label': _label, 'sid': sid}
except Exception as e:
current_app.logger.exception(e)
return False, str(e)

View File

@@ -0,0 +1,330 @@
# ##########################################################################
# #
# # pgAdmin 4 - PostgreSQL Tools
# #
# # Copyright (C) 2013 - 2022, The pgAdmin Development Team
# # This software is released under the PostgreSQL Licence
# #
# ##########################################################################
# AWS RDS Cloud Deployment Implementation
import requests
import boto3
import json
import pickle
from boto3.session import Session
from flask_babel import gettext
from flask import session, current_app, request
from flask_security import login_required
from werkzeug.datastructures import Headers
from pgadmin.utils import PgAdminModule
from pgadmin.misc.cloud.utils import _create_server, CloudProcessDesc
from pgadmin.misc.bgprocess.processes import BatchProcess
from pgadmin.utils.ajax import make_json_response,\
internal_server_error, bad_request, success_return
from .regions import AWS_REGIONS
import simplejson as json
from config import root
MODULE_NAME = 'rds'
class RDSModule(PgAdminModule):
"""Cloud module to deploy on AWS RDS"""
def get_own_stylesheets(self):
"""
Returns:
list: the stylesheets used by this module.
"""
stylesheets = []
return stylesheets
def get_exposed_url_endpoints(self):
return ['rds.db_versions',
'rds.verify_credentials',
'rds.db_instances',
'rds.regions']
blueprint = RDSModule(MODULE_NAME, __name__,
static_url_path='/misc/cloud/rds')
@blueprint.route('/verify_credentials/',
methods=['POST'], endpoint='verify_credentials')
@login_required
def verify_credentials():
"""Verify Credentials."""
data = json.loads(request.data, encoding='utf-8')
session_token = data['secret']['session_token'] if\
'session_token' in data['secret'] else None
if 'aws' not in session:
session['aws'] = {}
if 'aws_rds_obj' not in session['aws'] or\
session['aws']['secret'] != data['secret']:
_rds = RDS(
access_key=data['secret']['access_key'],
secret_key=data['secret']['secret_access_key'],
session_token=session_token,
default_region=data['secret']['region'])
status, identity = _rds.validate_credentials()
if status:
session['aws']['secret'] = data['secret']
session['aws']['aws_rds_obj'] = pickle.dumps(_rds, -1)
if status:
msg = 'verified'
return make_json_response(success=status, info=msg)
@blueprint.route('/db_instances/',
methods=['GET'], endpoint='db_instances')
@login_required
def get_db_instances():
"""
Fetch AWS DB Instances based on engine version.
"""
# Get Engine Version
eng_version = request.args.get('eng_version')
if 'aws' not in session:
return make_json_response(
status=410,
success=0,
errormsg=gettext('Session has not created yet.')
)
if not eng_version or eng_version == '' or eng_version == 'undefined':
eng_version = '10.17'
rds_obj = pickle.loads(session['aws']['aws_rds_obj'])
res = rds_obj.get_available_db_instance_class(
engine_version=eng_version)
versions_set = set()
versions = []
for value in res:
versions_set.add(value['DBInstanceClass'])
for value in versions_set:
versions.append({
'label': value,
'value': value
})
return make_json_response(data=versions)
@blueprint.route('/db_versions/',
methods=['GET', 'POST'], endpoint='db_versions')
@login_required
def get_db_versions():
"""GET AWS Database Versions for AWS."""
if 'aws' not in session:
return make_json_response(
status=410,
success=0,
errormsg=gettext('Session has not created yet.')
)
rds_obj = pickle.loads(session['aws']['aws_rds_obj'])
db_versions = rds_obj.get_available_db_version()
res = list(filter(lambda val: not val['EngineVersion'].startswith('9.6'),
db_versions['DBEngineVersions']))
versions = []
for value in res:
versions.append({
'label': value['DBEngineVersionDescription'],
'value': value['EngineVersion']
})
return make_json_response(data=versions)
@blueprint.route('/regions/',
methods=['GET', 'POST'], endpoint='regions')
@login_required
def get_regions():
"""GET Regions for AWS."""
try:
clear_aws_session()
_session = Session()
res = _session.get_available_regions('rds')
regions = []
for value in res:
if value in AWS_REGIONS:
regions.append({
'label': AWS_REGIONS[value] + ' | ' + value,
'value': value
})
return make_json_response(data=regions)
except Exception as e:
return make_json_response(
status=410,
success=0,
errormsg=str(e)
)
class RDS():
def __init__(self, access_key, secret_key, session_token=None,
default_region='ap-south-1'):
self._clients = {}
self._access_key = access_key
self._secret_key = secret_key
self._session_token = session_token
self._default_region = default_region
##########################################################################
# AWS Helper functions
##########################################################################
def _get_aws_client(self, type):
""" Create/cache/return an AWS client object """
if type in self._clients:
return self._clients[type]
session = boto3.Session(
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
aws_session_token=self._session_token
)
self._clients[type] = session.client(
type, region_name=self._default_region)
return self._clients[type]
def get_available_db_version(self, engine='postgres'):
rds = self._get_aws_client('rds')
return rds.describe_db_engine_versions(Engine=engine)
def get_available_db_instance_class(self, engine='postgres',
engine_version='10'):
rds = self._get_aws_client('rds')
_instances = rds.describe_orderable_db_instance_options(
Engine=engine,
EngineVersion=engine_version)
_instances_list = _instances['OrderableDBInstanceOptions']
_marker = _instances['Marker'] if 'Marker' in _instances else None
while _marker:
_tmp_instances = rds.describe_orderable_db_instance_options(
Engine=engine,
EngineVersion=engine_version,
Marker=_marker)
_instances_list = [*_instances_list,
*_tmp_instances['OrderableDBInstanceOptions']]
_marker = _tmp_instances['Marker'] if 'Marker'\
in _tmp_instances else None
return _instances_list
def get_db_instance(self, instance_name):
rds = self._get_aws_client('rds')
return rds.describe_db_instances(
DBInstanceIdentifier=instance_name)
def validate_credentials(self):
client = self._get_aws_client('sts')
try:
identity = client.get_caller_identity()
return True, identity
except Exception as e:
return False, str(e)
finally:
self._clients.pop('sts')
def clear_aws_session():
"""Clear AWS Session"""
if 'aws' in session:
session.pop('aws')
def deploy_on_rds(data):
"""Deploy the Postgres instance on RDS."""
_cmd = 'python'
_cmd_script = '{0}/pgacloud/pgacloud.py'.format(root)
_label = None
from subprocess import Popen, PIPE
_label = data['instance_details']['name']
args = [_cmd_script,
data['cloud'],
'--region',
str(data['secret']['region']),
'create-instance',
'--name',
data['instance_details']['name'],
'--db-name',
data['db_details']['db_name'],
'--db-username',
data['db_details']['db_username'],
'--db-port',
str(data['db_details']['db_port']),
'--db-version',
str(data['instance_details']['db_version']),
'--instance-type',
data['instance_details']['instance_type'],
'--storage-type',
data['instance_details']['storage_type'],
'--storage-size',
str(data['instance_details']['storage_size']),
'--public-ip',
str(data['instance_details']['public_ip']),
]
if data['instance_details']['storage_type'] == 'io1':
args.append('--storage-iops')
args.append(str(data['instance_details']['storage_IOPS']))
_cmd_msg = '{0} {1} {2}'.format(_cmd, _cmd_script, ' '.join(args))
try:
sid = _create_server({
'gid': data['db_details']['gid'],
'name': data['instance_details']['name'],
'db': data['db_details']['db_name'],
'username': data['db_details']['db_username'],
'port': data['db_details']['db_port'],
'cloud_status': -1
})
p = BatchProcess(
desc=CloudProcessDesc(sid, _cmd_msg, data['cloud'],
data['instance_details']['name']),
cmd=_cmd,
args=args
)
env = dict()
env['AWS_ACCESS_KEY_ID'] = data['secret']['access_key']
env['AWS_SECRET_ACCESS_KEY'] = data['secret'][
'secret_access_key']
if 'session_token' in data['secret'] and\
data['secret']['session_token'] is not None:
env['AWS_SESSION_TOKEN'] = data['secret']['session_token']
if 'db_password' in data['db_details']:
env['AWS_DATABASE_PASSWORD'] = data[
'db_details']['db_password']
p.set_env_variables(None, env=env)
p.update_server_id(p.id, sid)
p.start()
return True, {'label': _label, 'sid': sid}
except Exception as e:
current_app.logger.exception(e)
return False, str(e)

View File

@@ -10,21 +10,21 @@
import gettext from 'sources/gettext';
import url_for from 'sources/url_for';
import React from 'react';
import { Box, Table, TableBody, TableCell, TableHead, TableRow, Paper } from '@material-ui/core';
import { Box, Paper } from '@material-ui/core';
import { makeStyles } from '@material-ui/core/styles';
import Wizard from '../../../../static/js/helpers/wizard/Wizard';
import WizardStep from '../../../../static/js/helpers/wizard/WizardStep';
import {FormFooterMessage, MESSAGE_TYPE, InputToggle } from '../../../../static/js/components/FormComponents';
import {FormFooterMessage, MESSAGE_TYPE } from '../../../../static/js/components/FormComponents';
import getApiInstance from '../../../../static/js/api_instance';
import SchemaView from '../../../../static/js/SchemaView';
import Alertify from 'pgadmin.alertifyjs';
import PropTypes from 'prop-types';
import {CloudInstanceDetailsSchema, CloudDBCredSchema, DatabaseSchema} from './cloud_db_details_schema.ui';
import { isEmptyString } from 'sources/validators';
import pgAdmin from 'sources/pgadmin';
import { getNodeAjaxOptions, getNodeListById } from 'pgbrowser/node_ajax';
import { commonTableStyles } from '../../../../static/js/Theme';
import clsx from 'clsx';
import {ToggleButtons, FinalSummary} from './cloud_components';
import { PrimaryButton } from '../../../../static/js/components/Buttons';
import {AwsCredentials, AwsInstanceDetails, AwsDatabaseDetails, validateCloudStep1,
validateCloudStep2, validateCloudStep3} from './aws';
import {BigAnimalInstance, BigAnimalDatabase, validateBigAnimal,
validateBigAnimalStep2, validateBigAnimalStep3} from './biganimal';
const useStyles = makeStyles(() =>
({
@@ -33,44 +33,49 @@ const useStyles = makeStyles(() =>
display: 'flex',
},
messagePadding: {
flex: 2.5
paddingTop: '10px',
flex: 2.5,
},
buttonMarginEDB: {
position: 'relative',
top: '20%',
},
toggleButton: {
height: '100px',
},
table: {
marginLeft: '4px',
marginTop: '12px',
summaryContainer: {
flexGrow: 1,
minHeight: 0,
overflow: 'auto',
},
tableCellHeading: {
fontWeight: 'bold',
paddingLeft: '9px',
boxText: {
paddingBottom: '5px'
},
tableCell: {
padding: '9px',
paddingLeft: '11px',
}
}),
);
export default function CloudWizard({ nodeInfo, nodeData }) {
const classes = useStyles();
const tableClasses = commonTableStyles();
var steps = ['Cloud Provider', 'Credentials', 'Instance Specification', 'Database Details', 'Review'];
const [currentStep, setCurrentStep] = React.useState('');
const [selectionVal, setCloudSelection] = React.useState('');
const [errMsg, setErrMsg] = React.useState('');
const [cloudInstanceDetailsInstance, setCloudInstanceDetailsInstance] = React.useState();
const [cloudDBCredInstance, setCloudDBCredInstance] = React.useState();
const [cloudDBInstance, setCloudDBInstance] = React.useState();
const [cloudInstanceDetails, setCloudInstanceDetails] = React.useState({});
const [cloudDBCred, setCloudDBCred] = React.useState({});
const [cloudDBDetails, setCloudDBDetails] = React.useState({});
const [callRDSAPI, setCallRDSAPI] = React.useState({});
const [hostIP, setHostIP] = React.useState('127.0.0.1/32');
const [cloudProvider, setCloudProvider] = React.useState('');
const [verificationIntiated, setVerificationIntiated] = React.useState(false);
const [bigAnimalInstanceData, setBigAnimalInstanceData] = React.useState({});
const [bigAnimalDatabaseData, setBigAnimalDatabaseData] = React.useState({});
const axiosApi = getApiInstance();
const [verificationURI, setVerificationURI] = React.useState('');
const [verificationCode, setVerificationCode] = React.useState('');
React.useEffect(() => {
let _url = url_for('cloud.get_host_ip') ;
axiosApi.get(_url)
@@ -82,139 +87,33 @@ export default function CloudWizard({ nodeInfo, nodeData }) {
.catch((error) => {
Alertify.error(gettext(`Error while getting the host ip: ${error.response.data.errormsg}`));
});
}, []);
React.useEffect(() => {
if (callRDSAPI == 2) {
const cloudDBInstanceSchema = new CloudInstanceDetailsSchema({
version: ()=>getNodeAjaxOptions('get_aws_db_versions', pgAdmin.Browser.Nodes['server'], nodeInfo, nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('cloud.get_aws_db_versions');
}
}),
getInstances: (engine, reload, options) =>
{
return new Promise((resolve, reject)=>{
const api = getApiInstance();
var _url = url_for('cloud.get_aws_db_instances') ;
if (engine) _url += '?eng_version=' + engine;
if (reload || options === undefined || options.length == 0) {
api.get(_url)
.then(res=>{
let data = res.data.data;
resolve(data);
})
.catch((err)=>{
reject(err);
});
} else {
resolve(options);
}
});
},
instance_type: ()=>getNodeAjaxOptions('get_aws_db_instances', pgAdmin.Browser.Nodes['server'], nodeInfo, nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('cloud.get_aws_db_instances');
}
}),
server_groups: ()=>getNodeListById(pgAdmin.Browser.Nodes['server_group'], nodeInfo, nodeData),
}, {
gid: nodeInfo['server_group']._id,
hostIP: hostIP,
});
setCloudInstanceDetailsInstance(cloudDBInstanceSchema);
}
}, [callRDSAPI]);
React.useEffect(() => {
const cloudDBCredSchema = new CloudDBCredSchema({
regions: ()=>getNodeAjaxOptions('get_aws_regions', pgAdmin.Browser.Nodes['server'], nodeInfo, nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('cloud.get_aws_regions');
}
}),
});
setCloudDBCredInstance(cloudDBCredSchema);
const cloudDBSchema = new DatabaseSchema({
server_groups: ()=>getNodeListById(pgAdmin.Browser.Nodes['server_group'], nodeInfo, nodeData),
},
{
gid: nodeInfo['server_group']._id,
}
);
setCloudDBInstance(cloudDBSchema);
}, []);
}, [cloudProvider]);
const wizardStepChange = (data) => {
setCurrentStep(data.currentStep);
};
const validateCloudStep1 = (cloudDBCred) => {
let isError = false;
if (isEmptyString(cloudDBCred.aws_access_key) || isEmptyString(cloudDBCred.aws_secret_access_key)) {
isError = true;
}
return isError;
};
const validateCloudStep2 = (cloudInstanceDetails, host_ip) => {
let isError = false;
if (isEmptyString(cloudInstanceDetails.aws_name) ||
isEmptyString(cloudInstanceDetails.aws_db_version) || isEmptyString(cloudInstanceDetails.aws_instance_type) ||
isEmptyString(cloudInstanceDetails.aws_storage_type)|| isEmptyString(cloudInstanceDetails.aws_storage_size)) {
isError = true;
}
if(cloudInstanceDetails.aws_storage_type == 'io1' && isEmptyString(cloudInstanceDetails.aws_storage_IOPS)) {
isError = true;
}
if (isEmptyString(cloudInstanceDetails.aws_public_ip)) cloudInstanceDetails.aws_public_ip = host_ip;
return isError;
};
const validateCloudStep3 = (cloudDBDetails) => {
let isError = false;
if (isEmptyString(cloudDBDetails.aws_db_name) ||
isEmptyString(cloudDBDetails.aws_db_username) || isEmptyString(cloudDBDetails.aws_db_password)) {
isError = true;
}
if (isEmptyString(cloudDBDetails.aws_db_port)) cloudDBDetails.aws_db_port = 5432;
if (isEmptyString(cloudDBDetails.gid)) cloudDBDetails.gid = nodeInfo['server_group']._id;
return isError;
};
const getStorageType = (cloudInstanceDetails) => {
let _storage_type = 'General Purpose SSD (gp2)',
_io1 = undefined;
if(cloudInstanceDetails.aws_storage_type == 'gp2') _storage_type = 'General Purpose SSD (gp2)';
else if(cloudInstanceDetails.aws_storage_type == 'io1') {
_storage_type = 'Provisioned IOPS SSD (io1)';
_io1 = cloudInstanceDetails.aws_storage_IOPS;
}
else if(cloudInstanceDetails.aws_storage_type == 'magnetic') _storage_type = 'Magnetic';
return [_io1, _storage_type];
};
const onSave = () => {
var _url = url_for('cloud.deploy_on_cloud');
const post_data = {
gid: nodeInfo.server_group._id,
cloud: selectionVal,
secret: cloudDBCred,
instance_details:cloudInstanceDetails,
db_details: cloudDBDetails
};
var _url = url_for('cloud.deploy_on_cloud'),
post_data = {};
if (cloudProvider == 'rds') {
post_data = {
gid: nodeInfo.server_group._id,
cloud: cloudProvider,
secret: cloudDBCred,
instance_details:cloudInstanceDetails,
db_details: cloudDBDetails
};
} else {
post_data = {
gid: nodeInfo.server_group._id,
cloud: cloudProvider,
instance_details:bigAnimalInstanceData,
db_details: bigAnimalDatabaseData
};
}
axiosApi.post(_url, post_data)
.then((res) => {
pgAdmin.Browser.Events.trigger('pgadmin:browser:tree:add', res.data.data.node, {'server_group': nodeInfo['server_group']});
@@ -228,21 +127,43 @@ export default function CloudWizard({ nodeInfo, nodeData }) {
const disableNextCheck = () => {
setCallRDSAPI(currentStep);
let isError = false;
switch (currentStep) {
case 0:
setCloudSelection('rds');
let isError = (cloudProvider == '');
switch(cloudProvider) {
case 'rds':
switch (currentStep) {
case 0:
setCloudSelection('rds');
break;
case 1:
isError = validateCloudStep1(cloudDBCred);
break;
case 2:
isError = validateCloudStep2(cloudInstanceDetails, hostIP);
break;
case 3:
isError = validateCloudStep3(cloudDBDetails, nodeInfo);
break;
default:
break;
}
break;
case 1:
isError = validateCloudStep1(cloudDBCred);
break;
case 2:
isError = validateCloudStep2(cloudInstanceDetails, hostIP);
break;
case 3:
isError = validateCloudStep3(cloudDBDetails);
break;
default:
case 'biganimal':
switch (currentStep) {
case 0:
setCloudSelection('biganimal');
break;
case 1:
isError = !verificationIntiated;
break;
case 2:
isError = validateBigAnimalStep2(bigAnimalInstanceData);
break;
case 3:
isError = validateBigAnimalStep3(bigAnimalDatabaseData, nodeInfo);
break;
default:
break;
}
break;
}
return isError;
@@ -250,9 +171,9 @@ export default function CloudWizard({ nodeInfo, nodeData }) {
const onBeforeNext = (activeStep) => {
return new Promise((resolve, reject)=>{
if(activeStep == 1) {
if(activeStep == 1 && cloudProvider == 'rds') {
setErrMsg([MESSAGE_TYPE.INFO, 'Validating credentials...']);
var _url = url_for('cloud.verify_credentials');
var _url = url_for('rds.verify_credentials');
const post_data = {
cloud: selectionVal,
secret: cloudDBCred,
@@ -271,76 +192,69 @@ export default function CloudWizard({ nodeInfo, nodeData }) {
setErrMsg([MESSAGE_TYPE.ERROR, 'Error while checking cloud credentials']);
reject();
});
} else {
} else if(activeStep == 0 && cloudProvider == 'biganimal') {
setErrMsg([MESSAGE_TYPE.INFO, 'Getting EDB BigAnimal verification URL...']);
validateBigAnimal()
.then((res) => {
setVerificationURI(res);
setVerificationCode(res.substring(res.indexOf('=')+1));
setErrMsg(['', '']);
resolve();
})
.catch((error) => {
setErrMsg([MESSAGE_TYPE.ERROR, gettext(error)]);
reject();
});
}
else {
resolve();
}
});
};
const authenticateBigAnimal = () => {
var loading_icon_url = url_for(
'static', { 'filename': 'img/loading.gif'}
);
setErrMsg([MESSAGE_TYPE.INFO, 'EDB BigAnimal authentication process is in progress...<img src="' + loading_icon_url + '" alt="' + gettext('Loading...') + '">']);
window.open(verificationURI, 'edb_biganimal_authentication');
let _url = url_for('biganimal.verification_ack') ;
const myInterval = setInterval(() => {
axiosApi.get(_url)
.then((res) => {
if (res.data && res.data.success == 1 ) {
setErrMsg([MESSAGE_TYPE.SUCCESS, 'Authentication completed successfully. Click the Next button to proceed.']);
setVerificationIntiated(true);
clearInterval(myInterval);
}
else if (res.data && res.data.success == 0 && res.data.errormsg == 'access_denied') {
setErrMsg([MESSAGE_TYPE.INFO, 'Verification failed. Access Denied...']);
setVerificationIntiated(false);
clearInterval(myInterval);
}
else if (res.data && res.data.success == 0 && res.data.errormsg == 'forbidden') {
setErrMsg([MESSAGE_TYPE.INFO, 'Authentication completed successfully but you do not have permission to create the cluster.']);
setVerificationIntiated(false);
clearInterval(myInterval);
}
})
.catch((error) => {
setErrMsg([MESSAGE_TYPE.ERROR, gettext(`Error while verification EDB BigAnimal: ${error.response.data.errormsg}`)]);
});
}, 1000);
};
const onDialogHelp = () => {
window.open(url_for('help.static', { 'filename': 'cloud_deployment.html' }), 'pgadmin_help');
};
function createData(name, value) {
return { name, value };
}
let cloud = '';
switch (selectionVal) {
case 'rds':
cloud = 'Amazon RDS';
break;
case 'azure':
cloud = 'Azure PostgreSQL';
break;
case 'biganimal':
cloud = 'EDB Big Animal';
break;
}
const rows1 = [
createData('Cloud', cloud),
createData('Instance name', cloudInstanceDetails.aws_name),
createData('Public IP', cloudInstanceDetails.aws_public_ip),
];
const rows2 = [
createData('PostgreSQL version', cloudInstanceDetails.aws_db_version),
createData('Instance type', cloudInstanceDetails.aws_instance_type),
];
let _storage_type = getStorageType(cloudInstanceDetails);
const rows3 = [
createData('Storage type', _storage_type[1]),
createData('Allocated storage', cloudInstanceDetails.aws_storage_size + ' GiB'),
];
if (_storage_type[0] !== undefined) {
rows3.push(createData('Provisioned IOPS', _storage_type[0]));
}
const rows4 = [
createData('Database name', cloudDBDetails.aws_db_name),
createData('Username', cloudDBDetails.aws_db_username),
createData('Password', 'xxxxxxx'),
createData('Port', cloudDBDetails.aws_db_port),
];
const onErrClose = React.useCallback(()=>{
setErrMsg([]);
});
const displayTableRows = (rows) => {
return rows.map((row) => (
<TableRow key={row.name} >
<TableCell scope="row">{row.name}</TableCell>
<TableCell align="right">{row.value}</TableCell>
</TableRow>
));
};
return (
<>
<Wizard
@@ -353,107 +267,81 @@ export default function CloudWizard({ nodeInfo, nodeData }) {
beforeNext={onBeforeNext}>
<WizardStep stepId={0}>
<Box className={classes.messageBox}>
<Box className={classes.messagePadding}>{gettext('Deploy on Amazon RDS cloud.')}</Box>
<Box className={classes.messagePadding}>{gettext('Select any option to deploy on cloud.')}</Box>
</Box>
<Box className={classes.messageBox}>
<InputToggle
value='rds'
options={[{'label': gettext('Amazon RDS'), value: 'rds'}]}
className={classes.toggleButton}
onChange={(value) => {
setCloudSelection(value);}
}
>
</InputToggle>
<ToggleButtons cloudProvider={cloudProvider} setCloudProvider={setCloudProvider}
options={[{'label': 'Amazon RDS', value: 'rds'}, {'label': 'EDB BigAnimal', value: 'biganimal'}]}
></ToggleButtons>
</Box>
<Box className={classes.messageBox}>
<Box className={classes.messagePadding}>{gettext('More cloud providers are coming soon...')}</Box>
</Box>
<FormFooterMessage type={errMsg[0]} message={errMsg[1]} onClose={onErrClose} />
</WizardStep>
<WizardStep stepId={1} >
{cloudDBCredInstance &&
<SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudDBCredInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
setCloudDBCred(changedData);
}}
/>
}
<Box className={classes.buttonMarginEDB}>
{cloudProvider == 'biganimal' && <Box className={classes.messageBox}>
<Box>{gettext('The verification code to authenticate the pgAdmin to EDB BigAnimal is: ')} <strong>{verificationCode}</strong>
<br/>{gettext('By clicking the below button, you will be redirected to the EDB BigAnimal authentication page in a new tab.')}
</Box>
</Box>}
{cloudProvider == 'biganimal' && <PrimaryButton onClick={authenticateBigAnimal}>
{gettext('Click here to authenticate yourself to EDB BigAnimal')}
</PrimaryButton>}
{cloudProvider == 'biganimal' && <Box className={classes.messageBox}>
<Box ></Box>
</Box>}
</Box>
{cloudProvider == 'rds' && <AwsCredentials cloudProvider={cloudProvider} nodeInfo={nodeInfo} nodeData={nodeData} setCloudDBCred={setCloudDBCred}/>}
<FormFooterMessage type={errMsg[0]} message={errMsg[1]} onClose={onErrClose} />
</WizardStep>
<WizardStep stepId={2} >
{cloudInstanceDetailsInstance &&
<SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudInstanceDetailsInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
setCloudInstanceDetails(changedData);
}}
/>
}
{cloudProvider == 'rds' && callRDSAPI == 2 && <AwsInstanceDetails
cloudProvider={cloudProvider}
nodeInfo={nodeInfo}
nodeData={nodeData}
setCloudInstanceDetails={setCloudInstanceDetails}
hostIP={hostIP} /> }
{cloudProvider == 'biganimal' && callRDSAPI == 2 && <BigAnimalInstance
cloudProvider={cloudProvider}
nodeInfo={nodeInfo}
nodeData={nodeData}
setBigAnimalInstanceData={setBigAnimalInstanceData}
hostIP={hostIP}
/> }
</WizardStep>
<WizardStep stepId={3} >
{cloudDBInstance &&
<SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudDBInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
setCloudDBDetails(changedData);
}}
/>
{cloudProvider == 'rds' && <AwsDatabaseDetails
cloudProvider={cloudProvider}
nodeInfo={nodeInfo}
nodeData={nodeData}
setCloudDBDetails={setCloudDBDetails}
/>
}
{cloudProvider == 'biganimal' && callRDSAPI == 3 && <BigAnimalDatabase
cloudProvider={cloudProvider}
nodeInfo={nodeInfo}
nodeData={nodeData}
setBigAnimalDatabaseData={setBigAnimalDatabaseData}
/>
}
</WizardStep>
<WizardStep stepId={4} >
<Box className={classes.boxText}>{gettext('Please review the details before creating the cloud instance.')}</Box>
<Paper variant="outlined" elevation={0} className={classes.summaryContainer}>
<Table aria-label="simple table" className={clsx(tableClasses.table)}>
<TableBody>
{displayTableRows(rows1)}
</TableBody>
</Table>
<Table aria-label="simple table" className={clsx(tableClasses.table)}>
<TableHead>
<TableRow>
<TableCell colSpan={2}>{gettext('Version and Instance Details')}</TableCell>
</TableRow>
</TableHead>
<TableBody>
{displayTableRows(rows2)}
</TableBody>
</Table>
<Table aria-label="simple table" className={clsx(tableClasses.table)}>
<TableHead>
<TableRow>
<TableCell colSpan={2}>{gettext('Storage Details')}</TableCell>
</TableRow>
</TableHead>
<TableBody>
{displayTableRows(rows3)}
</TableBody>
</Table>
<Table aria-label="simple table" className={clsx(tableClasses.table)}>
<TableHead>
<TableRow>
<TableCell colSpan={2}>{gettext('Database Details')}</TableCell>
</TableRow>
</TableHead>
<TableBody>
{displayTableRows(rows4)}
</TableBody>
</Table>
{cloudProvider == 'rds' && callRDSAPI == 4 && <FinalSummary
cloudProvider={cloudProvider}
instanceData={cloudInstanceDetails}
databaseData={cloudDBDetails}
/>
}
{cloudProvider == 'biganimal' && callRDSAPI == 4 && <FinalSummary
cloudProvider={cloudProvider}
instanceData={bigAnimalInstanceData}
databaseData={bigAnimalDatabaseData}
/>
}
</Paper>
</WizardStep>
</Wizard>

View File

@@ -0,0 +1,243 @@
/////////////////////////////////////////////////////////////
//
// pgAdmin 4 - PostgreSQL Tools
//
// Copyright (C) 2013 - 2022, The pgAdmin Development Team
// This software is released under the PostgreSQL Licence
//
//////////////////////////////////////////////////////////////
import React from 'react';
import pgAdmin from 'sources/pgadmin';
import { getNodeAjaxOptions, getNodeListById } from 'pgbrowser/node_ajax';
import {CloudInstanceDetailsSchema, CloudDBCredSchema, DatabaseSchema} from './cloud_db_details_schema.ui';
import SchemaView from '../../../../static/js/SchemaView';
import url_for from 'sources/url_for';
import getApiInstance from '../../../../static/js/api_instance';
import { isEmptyString } from 'sources/validators';
import PropTypes from 'prop-types';
// AWS credentials
export function AwsCredentials(props) {
const [cloudDBCredInstance, setCloudDBCredInstance] = React.useState();
React.useMemo(() => {
const cloudDBCredSchema = new CloudDBCredSchema({
regions: ()=>getNodeAjaxOptions('get_aws_regions', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('rds.regions');
}
}),
});
setCloudDBCredInstance(cloudDBCredSchema);
}, [props.cloudProvider]);
return <SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudDBCredInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
props.setCloudDBCred(changedData);
}}
/>;
}
AwsCredentials.propTypes = {
nodeInfo: PropTypes.object,
nodeData: PropTypes.object,
cloudProvider: PropTypes.string,
setCloudDBCred: PropTypes.func,
};
// AWS Instance Details
export function AwsInstanceDetails(props) {
const [cloudInstanceDetailsInstance, setCloudInstanceDetailsInstance] = React.useState();
React.useMemo(() => {
const cloudDBInstanceSchema = new CloudInstanceDetailsSchema({
version: ()=>getNodeAjaxOptions('get_aws_db_versions', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('rds.db_versions');
}
}),
getInstances: (engine, reload, options) =>
{
return new Promise((resolve, reject)=>{
const api = getApiInstance();
var _url = url_for('rds.db_instances') ;
if (engine) _url += '?eng_version=' + engine;
if (reload || options === undefined || options.length == 0) {
api.get(_url)
.then(res=>{
let data = res.data.data;
resolve(data);
})
.catch((err)=>{
reject(err);
});
} else {
resolve(options);
}
});
},
instance_type: ()=>getNodeAjaxOptions('get_aws_db_instances', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('rds.db_instances');
}
}),
server_groups: ()=>getNodeListById(pgAdmin.Browser.Nodes['server_group'], props.nodeInfo, props.nodeData),
}, {
gid: props.nodeInfo['server_group']._id,
hostIP: props.hostIP,
});
setCloudInstanceDetailsInstance(cloudDBInstanceSchema);
}, [props.cloudProvider]);
return <SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudInstanceDetailsInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
props.setCloudInstanceDetails(changedData);
}}
/>;
}
AwsInstanceDetails.propTypes = {
nodeInfo: PropTypes.object,
nodeData: PropTypes.object,
cloudProvider: PropTypes.string,
hostIP: PropTypes.string,
setCloudInstanceDetails: PropTypes.func,
};
// AWS Database Details
export function AwsDatabaseDetails(props) {
const [cloudDBInstance, setCloudDBInstance] = React.useState();
React.useMemo(() => {
const cloudDBSchema = new DatabaseSchema({
server_groups: ()=>getNodeListById(pgAdmin.Browser.Nodes['server_group'], props.nodeInfo, props.nodeData),
},
{
gid: props.nodeInfo['server_group']._id,
}
);
setCloudDBInstance(cloudDBSchema);
}, [props.cloudProvider]);
return <SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={cloudDBInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
props.setCloudDBDetails(changedData);
}}
/>;
}
AwsDatabaseDetails.propTypes = {
nodeInfo: PropTypes.object,
nodeData: PropTypes.object,
cloudProvider: PropTypes.string,
setCloudDBDetails: PropTypes.func,
};
export function validateCloudStep1(cloudDBCred) {
let isError = false;
if (isEmptyString(cloudDBCred.access_key) || isEmptyString(cloudDBCred.secret_access_key)) {
isError = true;
}
return isError;
}
export function validateCloudStep2(cloudInstanceDetails, host_ip) {
let isError = false;
if (isEmptyString(cloudInstanceDetails.name) ||
isEmptyString(cloudInstanceDetails.db_version) || isEmptyString(cloudInstanceDetails.instance_type) ||
isEmptyString(cloudInstanceDetails.storage_type)|| isEmptyString(cloudInstanceDetails.storage_size)) {
isError = true;
}
if(cloudInstanceDetails.storage_type == 'io1' && isEmptyString(cloudInstanceDetails.storage_IOPS)) {
isError = true;
}
if (isEmptyString(cloudInstanceDetails.public_ip)) cloudInstanceDetails.public_ip = host_ip;
return isError;
}
export function validateCloudStep3(cloudDBDetails, nodeInfo) {
let isError = false;
if (isEmptyString(cloudDBDetails.db_name) ||
isEmptyString(cloudDBDetails.db_username) || isEmptyString(cloudDBDetails.db_password)) {
isError = true;
}
if (isEmptyString(cloudDBDetails.db_port)) cloudDBDetails.db_port = 5432;
if (isEmptyString(cloudDBDetails.gid)) cloudDBDetails.gid = nodeInfo['server_group']._id;
return isError;
}
function createData(name, value) {
return { name, value };
}
export function getAWSSummary(cloud, cloudInstanceDetails, cloudDBDetails) {
const rows1 = [
createData('Cloud', cloud),
createData('Instance name', cloudInstanceDetails.name),
createData('Public IP', cloudInstanceDetails.public_ip),
];
const rows2 = [
createData('PostgreSQL version', cloudInstanceDetails.db_version),
createData('Instance type', cloudInstanceDetails.instance_type),
];
let _storage_type = getStorageType(cloudInstanceDetails);
const rows3 = [
createData('Storage type', _storage_type[1]),
createData('Allocated storage', cloudInstanceDetails.storage_size + ' GiB'),
];
if (_storage_type[0] !== undefined) {
rows3.push(createData('Provisioned IOPS', _storage_type[0]));
}
const rows4 = [
createData('Database name', cloudDBDetails.db_name),
createData('Username', cloudDBDetails.db_username),
createData('Password', 'xxxxxxx'),
createData('Port', cloudDBDetails.db_port),
];
return [rows1, rows2, rows3, rows4];
}
const getStorageType = (cloudInstanceDetails) => {
let _storage_type = 'General Purpose SSD (gp2)',
_io1 = undefined;
if(cloudInstanceDetails.storage_type == 'gp2') _storage_type = 'General Purpose SSD (gp2)';
else if(cloudInstanceDetails.storage_type == 'io1') {
_storage_type = 'Provisioned IOPS SSD (io1)';
_io1 = cloudInstanceDetails.storage_IOPS;
}
else if(cloudInstanceDetails.storage_type == 'magnetic') _storage_type = 'Magnetic';
return [_io1, _storage_type];
};

View File

@@ -0,0 +1,206 @@
/////////////////////////////////////////////////////////////
//
// pgAdmin 4 - PostgreSQL Tools
//
// Copyright (C) 2013 - 2022, The pgAdmin Development Team
// This software is released under the PostgreSQL Licence
//
//////////////////////////////////////////////////////////////
import React from 'react';
import pgAdmin from 'sources/pgadmin';
import { getNodeAjaxOptions, getNodeListById } from 'pgbrowser/node_ajax';
import {BigAnimalClusterSchema, BigAnimalDatabaseSchema} from './cloud_db_details_schema.ui';
import SchemaView from '../../../../static/js/SchemaView';
import url_for from 'sources/url_for';
import getApiInstance from '../../../../static/js/api_instance';
import { isEmptyString } from 'sources/validators';
import PropTypes from 'prop-types';
const axiosApi = getApiInstance();
// BigAnimal Instance
export function BigAnimalInstance(props) {
const [bigAnimalInstance, setBigAnimalInstance] = React.useState();
React.useMemo(() => {
const bigAnimalSchema = new BigAnimalClusterSchema({
regions: ()=>getNodeAjaxOptions('biganimal_regions', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.regions');
}
}),
instance_types: (region_id)=>getNodeAjaxOptions('biganimal_instance_types', pgAdmin.Browser.Nodes['server'],
props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.instance_types', {'region_id': region_id || 0});
}
}),
volume_types: (region_id)=>getNodeAjaxOptions('biganimal_volume_types', pgAdmin.Browser.Nodes['server'],
props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.volume_types', {'region_id': region_id || 0});
}
}),
volume_properties: (region_id, volume_type)=>getNodeAjaxOptions('biganimal_volume_properties', pgAdmin.Browser.Nodes['server'],
props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.volume_properties', {'region_id': region_id || 0, 'volume_type': volume_type || ''});
}
}),
}, {
nodeInfo: props.nodeInfo,
nodeData: props.nodeData,
hostIP: props.hostIP,
});
setBigAnimalInstance(bigAnimalSchema);
}, [props.cloudProvider]);
return <SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={bigAnimalInstance}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
props.setBigAnimalInstanceData(changedData);
}}
/>;
}
BigAnimalInstance.propTypes = {
nodeInfo: PropTypes.object,
nodeData: PropTypes.object,
cloudProvider: PropTypes.string,
setBigAnimalInstanceData: PropTypes.func,
hostIP: PropTypes.string,
};
// BigAnimal Instance
export function BigAnimalDatabase(props) {
const [bigAnimalDatabase, setBigAnimalDatabase] = React.useState();
React.useMemo(() => {
const bigAnimalDBSchema = new BigAnimalDatabaseSchema({
db_types: ()=>getNodeAjaxOptions('biganimal_db_types', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.db_types');
}
}),
db_versions: ()=>getNodeAjaxOptions('biganimal_db_versions', pgAdmin.Browser.Nodes['server'], props.nodeInfo, props.nodeData, {
useCache:false,
cacheNode: 'server',
customGenerateUrl: ()=>{
return url_for('biganimal.db_versions');
}
}),
server_groups: ()=>getNodeListById(pgAdmin.Browser.Nodes['server_group'], props.nodeInfo, props.nodeData),
}, {gid: props.nodeInfo['server_group']._id});
setBigAnimalDatabase(bigAnimalDBSchema);
}, [props.cloudProvider]);
return <SchemaView
formType={'dialog'}
getInitData={() => { /*This is intentional (SonarQube)*/ }}
viewHelperProps={{ mode: 'create' }}
schema={bigAnimalDatabase}
showFooter={false}
isTabView={false}
onDataChange={(isChanged, changedData) => {
props.setBigAnimalDatabaseData(changedData);
}}
/>;
}
BigAnimalDatabase.propTypes = {
nodeInfo: PropTypes.object,
nodeData: PropTypes.object,
cloudProvider: PropTypes.string,
setBigAnimalDatabaseData: PropTypes.func,
};
export function validateBigAnimal() {
return new Promise((resolve, reject)=>{
let _url = url_for('biganimal.verification') ;
axiosApi.get(_url)
.then((res) => {
if (res.data.data) {
resolve(res.data.data);
}
})
.catch((error) => {
reject(`Error while fetchng EDB BigAnimal verification uri: ${error.response.data.errormsg}`);
});
});
}
function createData(name, value) {
return { name, value };
}
export function getBigAnimalSummary(cloud, bigAnimalInstanceData, bigAnimalDatabaseData) {
const rows1 = [
createData('Cloud', cloud),
createData('Instance name', bigAnimalInstanceData.name),
createData('Region', bigAnimalInstanceData.region),
createData('Cluster type', bigAnimalInstanceData.cloud_type),
createData('Public IPs', bigAnimalInstanceData.public_ip),
];
let instance_size = bigAnimalInstanceData.instance_size.split('||');
const rows2 = [
createData('Instance type', bigAnimalInstanceData.instance_type),
createData('Instance series', bigAnimalInstanceData.instance_series),
createData('Instance size', instance_size[0]),
];
const rows3 = [
createData('Volume type', bigAnimalInstanceData.volume_type),
createData('Volume properties', bigAnimalInstanceData.volume_properties),
];
const rows4 = [
createData('Password', 'xxxxxxx'),
createData('Database Type', bigAnimalDatabaseData.database_type),
createData('Database Version', bigAnimalDatabaseData.postgres_version),
];
return [rows1, rows2, rows3, rows4];
}
export function validateBigAnimalStep2(cloudInstanceDetails) {
let isError = false;
if (isEmptyString(cloudInstanceDetails.name) ||
isEmptyString(cloudInstanceDetails.region) || isEmptyString(cloudInstanceDetails.instance_type) ||
isEmptyString(cloudInstanceDetails.instance_series)|| isEmptyString(cloudInstanceDetails.instance_size) ||
isEmptyString(cloudInstanceDetails.volume_type)|| isEmptyString(cloudInstanceDetails.volume_properties) ||
isEmptyString(cloudInstanceDetails.cloud_type)) {
isError = true;
}
return isError;
}
export function validateBigAnimalStep3(cloudDBDetails, nodeInfo) {
let isError = false;
if (isEmptyString(cloudDBDetails.password) ||
isEmptyString(cloudDBDetails.database_type) || isEmptyString(cloudDBDetails.postgres_version)) {
isError = true;
}
if (isEmptyString(cloudDBDetails.gid)) cloudDBDetails.gid = nodeInfo['server_group']._id;
return isError;
}

View File

@@ -0,0 +1,106 @@
/////////////////////////////////////////////////////////////
//
// pgAdmin 4 - PostgreSQL Tools
//
// Copyright (C) 2013 - 2022, The pgAdmin Development Team
// This software is released under the PostgreSQL Licence
//
//////////////////////////////////////////////////////////////
import React from 'react';
import { ToggleButton, ToggleButtonGroup } from '@material-ui/lab';
import CheckRoundedIcon from '@material-ui/icons/CheckRounded';
import { DefaultButton, PrimaryButton } from '../../../../static/js/components/Buttons';
import { makeStyles } from '@material-ui/core/styles';
import { AWSIcon } from '../../../../static/js/components/ExternalIcon';
import PropTypes from 'prop-types';
import { getAWSSummary } from './aws';
import { getBigAnimalSummary } from './biganimal';
import { commonTableStyles } from '../../../../static/js/Theme';
import { Table, TableBody, TableCell, TableHead, TableRow } from '@material-ui/core';
import clsx from 'clsx';
import gettext from 'sources/gettext';
const useStyles = makeStyles(() =>
({
toggleButton: {
height: '100px',
},
}),
);
export function ToggleButtons(props) {
const classes = useStyles();
const handleCloudProvider = (event, provider) => {
if (provider) props.setCloudProvider(provider);
};
return (
<ToggleButtonGroup
color="primary"
value={props.cloudProvider}
onChange={handleCloudProvider}
className={classes.toggleButton}
exclusive>
{
(props.options||[]).map((option)=>{
return (<ToggleButton value={option.value} key={option.label} aria-label={option.label} component={props.cloudProvider == option.value ? PrimaryButton : DefaultButton}>
<CheckRoundedIcon style={{visibility: props.cloudProvider == option.value ? 'visible': 'hidden'}}/>&nbsp;
{option.value == 'rds' ? <AWSIcon className={classes.icon} /> : ''}&nbsp;&nbsp;{option.label}
</ToggleButton>);
})
}
</ToggleButtonGroup>
);
}
ToggleButtons.propTypes = {
setCloudProvider: PropTypes.func,
cloudProvider: PropTypes.string,
options: PropTypes.array,
};
export function FinalSummary(props) {
const tableClasses = commonTableStyles();
let summary = [],
summaryHeader = ['Cloud Details', 'Version and Instance Details', 'Storage Details', 'Database Details'];
if (props.cloudProvider == 'biganimal') {
summary = getBigAnimalSummary(props.cloudProvider, props.instanceData, props.databaseData);
summaryHeader[1] = 'Version Details'
} else {
summary = getAWSSummary(props.cloudProvider, props.instanceData, props.databaseData);
}
const displayTableRows = (rows) => {
return rows.map((row) => (
<TableRow key={row.name} >
<TableCell scope="row">{row.name}</TableCell>
<TableCell align="right">{row.value}</TableCell>
</TableRow>
));
};
return summary.map((item, index) => {
return (
<Table key={index} className={clsx(tableClasses.table)}>
<TableHead>
<TableRow>
<TableCell colSpan={2}>{gettext(summaryHeader[index])}</TableCell>
</TableRow>
</TableHead>
<TableBody>
{displayTableRows(item)}
</TableBody>
</Table>
);
});
}
FinalSummary.propTypes = {
cloudProvider: PropTypes.string,
instanceData: PropTypes.object,
databaseData: PropTypes.object,
};

View File

@@ -16,8 +16,8 @@ class CloudInstanceDetailsSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: undefined,
aws_name: '',
aws_public_ip: initValues.hostIP,
name: '',
public_ip: initValues.hostIP,
...initValues
});
@@ -34,10 +34,10 @@ class CloudInstanceDetailsSchema extends BaseUISchema {
get baseFields() {
return [
{
id: 'aws_name', label: gettext('Instance name'), type: 'text',
id: 'name', label: gettext('Instance name'), type: 'text',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_public_ip', label: gettext('Public IP range'), type: 'text',
id: 'public_ip', label: gettext('Public IP range'), type: 'text',
mode: ['create'],
helpMessage: gettext('IP Address range for permitting the inbound traffic. Ex: 127.0.0.1/32, add multiple ip addresses/ranges by comma separated.'),
}, {
@@ -60,10 +60,10 @@ class CloudDBCredSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: null,
aws_region: '',
aws_access_key: '',
aws_secret_access_key: '',
aws_session_token: '',
region: '',
access_key: '',
secret_access_key: '',
session_token: '',
is_valid_cred: false,
...initValues
});
@@ -81,20 +81,20 @@ class CloudDBCredSchema extends BaseUISchema {
get baseFields() {
return [
{
id: 'aws_region', label: gettext('Region'),
id: 'region', label: gettext('Region'),
type: 'select',
options: this.fieldOptions.regions,
controlProps: { allowClear: false },
noEmpty: true,
helpMessage: gettext('The cloud instance will be deployed in the selected region.')
},{
id: 'aws_access_key', label: gettext('AWS access key'), type: 'text',
id: 'access_key', label: gettext('AWS access key'), type: 'text',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_secret_access_key', label: gettext('AWS secret access key'), type: 'password',
id: 'secret_access_key', label: gettext('AWS secret access key'), type: 'password',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_session_token', label: gettext('AWS session token'), type: 'multiline',
id: 'session_token', label: gettext('AWS session token'), type: 'multiline',
mode: ['create'], noEmpty: false,
helpMessage: gettext('Temporary AWS session required session token.')
}
@@ -108,11 +108,11 @@ class DatabaseSchema extends BaseUISchema {
super({
oid: undefined,
gid: undefined,
aws_db_name: '',
aws_db_username: '',
aws_db_password: '',
aws_db_confirm_password: '',
aws_db_port: 5432,
db_name: '',
db_username: '',
db_password: '',
db_confirm_password: '',
db_port: 5432,
...initValues,
});
@@ -123,18 +123,18 @@ class DatabaseSchema extends BaseUISchema {
}
validate(data, setErrMsg) {
if(!isEmptyString(data.aws_db_password) && !isEmptyString(data.aws_db_confirm_password)
&& data.aws_db_password != data.aws_db_confirm_password) {
setErrMsg('aws_db_confirm_password', gettext('Passwords do not match.'));
if(!isEmptyString(data.db_password) && !isEmptyString(data.db_confirm_password)
&& data.db_password != data.db_confirm_password) {
setErrMsg('db_confirm_password', gettext('Passwords do not match.'));
return true;
}
if (!isEmptyString(data.aws_db_confirm_password) && data.aws_db_confirm_password.length < 8) {
setErrMsg('aws_db_confirm_password', gettext('Password must be 8 characters or more.'));
if (!isEmptyString(data.db_confirm_password) && data.db_confirm_password.length < 8) {
setErrMsg('db_confirm_password', gettext('Password must be 8 characters or more.'));
return true;
}
if (data.aws_db_confirm_password.includes('\'') || data.aws_db_confirm_password.includes('"') ||
data.aws_db_confirm_password.includes('@') || data.aws_db_confirm_password.includes('/')) {
setErrMsg('aws_db_confirm_password', gettext('Invalid passowrd.'));
if (data.db_confirm_password.includes('\'') || data.db_confirm_password.includes('"') ||
data.db_confirm_password.includes('@') || data.db_confirm_password.includes('/')) {
setErrMsg('db_confirm_password', gettext('Invalid passowrd.'));
return true;
}
@@ -153,32 +153,33 @@ class DatabaseSchema extends BaseUISchema {
controlProps: { allowClear: false },
noEmpty: true,
}, {
id: 'aws_db_name', label: gettext('Database name'), type: 'text',
id: 'db_name', label: gettext('Database name'), type: 'text',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_db_username', label: gettext('Username'), type: 'text',
id: 'db_username', label: gettext('Username'), type: 'text',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_db_password', label: gettext('Password'), type: 'password',
id: 'db_password', label: gettext('Password'), type: 'password',
mode: ['create'], noEmpty: true,
helpMessage: gettext('At least 8 printable ASCII characters. Can not contain any of the following: / \(slash\), \'\(single quote\), "\(double quote\) and @ \(at sign\).')
}, {
id: 'aws_db_confirm_password', label: gettext('Confirm password'),
id: 'db_confirm_password', label: gettext('Confirm password'),
type: 'password',
mode: ['create'], noEmpty: true,
}, {
id: 'aws_db_port', label: gettext('Port'), type: 'text',
id: 'db_port', label: gettext('Port'), type: 'text',
mode: ['create'], noEmpty: true,
}];
}
}
export class InstanceSchema extends BaseUISchema {
constructor(versionOpts, instanceOpts, getInstances) {
super({
aws_db_version: '',
aws_db_instance_class: 'm',
aws_instance_type: '',
db_version: '',
db_instance_class: 'm',
instance_type: '',
reload_instances: true,
});
this.versionOpts = versionOpts;
@@ -189,14 +190,14 @@ export class InstanceSchema extends BaseUISchema {
get baseFields() {
return [{
id: 'aws_db_version', label: gettext('Database version'),
id: 'db_version', label: gettext('Database version'),
type: 'select',
options: this.versionOpts,
controlProps: { allowClear: false },
deps: ['aws_name'],
deps: ['name'],
noEmpty: true,
},{
id: 'aws_db_instance_class', label: gettext('Instance class'),
id: 'db_instance_class', label: gettext('Instance class'),
type: 'toggle',
options: [
{'label': gettext('Standard classes (includes m classes)'), value: 'm'},
@@ -204,11 +205,11 @@ export class InstanceSchema extends BaseUISchema {
{'label': gettext('Burstable classes (includes t classes)'), value: 't'},
], noEmpty: true, orientation: 'vertical',
},{
id: 'aws_instance_type', label: gettext('Instance type'),
id: 'instance_type', label: gettext('Instance type'),
options: this.instanceOpts,
deps: ['aws_db_version', 'aws_db_instance_class'],
deps: ['db_version', 'db_instance_class'],
depChange: (state, source)=> {
if (source[0] == 'aws_db_instance_class') {
if (source[0] == 'db_instance_class') {
return {reload_instances: false};
} else {
state.instanceData = [];
@@ -218,10 +219,10 @@ export class InstanceSchema extends BaseUISchema {
type: (state) => {
return {
type: 'select',
options: ()=>this.getInstances(state.aws_db_version,
options: ()=>this.getInstances(state.db_version,
state.reload_instances, state.instanceData),
optionsLoaded: (options) => { state.instanceData = options; },
optionsReloadBasis: state.aws_db_version + (state.aws_db_instance_class || 'm'),
optionsReloadBasis: state.db_version + (state.db_instance_class || 'm'),
controlProps: {
allowClear: false,
filter: (options) => {
@@ -229,11 +230,11 @@ export class InstanceSchema extends BaseUISchema {
let pattern = 'db.m';
let pattern_1 = 'db.m';
if (state.aws_db_instance_class) {
pattern = 'db.' + state.aws_db_instance_class;
pattern_1 = 'db.' + state.aws_db_instance_class;
if (state.db_instance_class) {
pattern = 'db.' + state.db_instance_class;
pattern_1 = 'db.' + state.db_instance_class;
}
if (state.aws_db_instance_class == 'x') {
if (state.db_instance_class == 'x') {
pattern_1 = 'db.' + 'r';
}
return options.filter((option) => {
@@ -251,17 +252,17 @@ export class InstanceSchema extends BaseUISchema {
export class StorageSchema extends BaseUISchema {
constructor() {
super({
aws_storage_type: 'io1',
aws_storage_size: 100,
aws_storage_IOPS: 3000,
aws_storage_msg: 'Minimum: 20 GiB. Maximum: 16,384 GiB.'
storage_type: 'io1',
storage_size: 100,
storage_IOPS: 3000,
storage_msg: 'Minimum: 20 GiB. Maximum: 16,384 GiB.'
});
}
get baseFields() {
return [
{
id: 'aws_storage_type', label: gettext('Storage type'), type: 'select',
id: 'storage_type', label: gettext('Storage type'), type: 'select',
mode: ['create'],
options: [
{'label': gettext('General Purpose SSD (gp2)'), 'value': 'gp2'},
@@ -269,30 +270,30 @@ export class StorageSchema extends BaseUISchema {
{'label': gettext('Magnetic'), 'value': 'standard'}
], noEmpty: true,
},{
id: 'aws_storage_size', label: gettext('Allocated storage'), type: 'text',
mode: ['create'], noEmpty: true, deps: ['aws_storage_type'],
id: 'storage_size', label: gettext('Allocated storage'), type: 'text',
mode: ['create'], noEmpty: true, deps: ['storage_type'],
depChange: (state, source)=> {
if (source[0] !== 'aws_storage_size')
if(state.aws_storage_type === 'io1') {
return {aws_storage_size: 100};
} else if(state.aws_storage_type === 'gp2') {
return {aws_storage_size: 20};
if (source[0] !== 'storage_size')
if(state.storage_type === 'io1') {
return {storage_size: 100};
} else if(state.storage_type === 'gp2') {
return {storage_size: 20};
} else {
return {aws_storage_size: 5};
return {storage_size: 5};
}
},
helpMessage: gettext('Size in GiB.')
}, {
id: 'aws_storage_IOPS', label: gettext('Provisioned IOPS'), type: 'text',
id: 'storage_IOPS', label: gettext('Provisioned IOPS'), type: 'text',
mode: ['create'],
visible: (state) => {
if(state.aws_storage_type === 'io1') return true;
if(state.storage_type === 'io1') return true;
return false;
} , deps: ['aws_storage_type'],
} , deps: ['storage_type'],
depChange: (state, source) => {
if (source[0] !== 'aws_storage_IOPS') {
if(state.aws_storage_type === 'io1') {
return {aws_storage_IOPS: 3000};
if (source[0] !== 'storage_IOPS') {
if(state.storage_type === 'io1') {
return {storage_IOPS: 3000};
}
}
},
@@ -301,8 +302,338 @@ export class StorageSchema extends BaseUISchema {
}
}
class BigAnimalInstanceSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues={}) {
super({
oid: undefined,
instance_type: '',
instance_series: '',
instance_size: '',
...initValues
});
this.fieldOptions = {
...fieldOptions,
};
this.initValues = initValues;
}
get idAttribute() {
return 'oid';
}
get baseFields() {
return [
{
id: 'instance_type', label: gettext('Instance type'),
mode: ['create'],
deps: [['region']],
type: (state) => {
return {
type: 'select',
options: ()=>this.fieldOptions.instance_types(state.region),
optionsReloadBasis: state.region,
optionsLoaded: (options) => { state.instanceData = options; },
controlProps: {
allowClear: false,
filter: (options) => {
if (options.length == 0) return;
let _types = _.uniq(_.map(options, 'category')),
_options = [];
_.forEach(_types, (region) => {
_options.push({
'label': region,
'value': region
});
});
return _options;
},
}
};
},
noEmpty: true,
},{
id: 'instance_series', label: gettext('Instance series'),
mode: ['create'], deps: ['instance_type'],
type: (state) => {
return {
type: 'select',
options: state.instanceData,
optionsReloadBasis: state.instance_type,
controlProps: {
allowClear: false,
filter: (options) => {
if (options.length == 0) return;
let _types = _.filter(options, {'category': state.instance_type}),
_options = [];
_types = _.uniq(_.map(_types, 'familyName'));
_.forEach(_types, (value) => {
_options.push({
'label': value,
'value': value
});
});
return _options;
},
}
};
},
noEmpty: true,
},{
id: 'instance_size', label: gettext('Instance size'),
mode: ['create'], deps: ['instance_series'],
type: (state) => {
return {
type: 'select',
options: state.instanceData,
optionsReloadBasis: state.instance_series,
controlProps: {
allowClear: false,
filter: (options) => {
if (options.length == 0) return;
let _types = _.filter(options, {'familyName': state.instance_series}),
_options = [];
_.forEach(_types, (value) => {
_options.push({
'label': value.instanceType + ' (' + value.cpu + 'vCPU, ' + value.ram + 'GB RAM)',
'value': value.instanceType + ' (' + value.cpu + 'vCPU, ' + value.ram + 'GB RAM)' + '||' + value.id,
});
});
return _options;
},
}
};
}, noEmpty: true,
},
];
}
}
class BigAnimalVolumeSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: undefined,
volume_type: '',
volume_properties: '',
...initValues
});
this.fieldOptions = {
...fieldOptions,
};
this.initValues = initValues;
}
get idAttribute() {
return 'oid';
}
get baseFields() {
return [
{
id: 'volume_type', label: gettext('Volume type'),
mode: ['create'], deps: [['region']],
type: (state) => {
return {
type: 'select',
options: ()=>this.fieldOptions.volume_types(state.region),
optionsReloadBasis: state.region,
};
}, noEmpty: true,
},{
id: 'volume_properties', label: gettext('Volume properties'),
mode: ['create'], deps: ['volume_type'],
type: (state) => {
return {
type: 'select',
options: ()=>this.fieldOptions.volume_properties(state.region, state.volume_type),
optionsReloadBasis: state.volume_type,
};
}, noEmpty: true,
},
];
}
}
class BigAnimalNetworkSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: undefined,
cloud_type: '',
public_ip: '',
...initValues
});
this.fieldOptions = {
...fieldOptions,
};
this.initValues = initValues;
}
get idAttribute() {
return 'oid';
}
get baseFields() {
var obj = this;
return [
{
id: 'cloud_type', label: gettext('Cloud type'), type: 'toggle',
mode: ['create'],
options: [
{'label': gettext('Private'), 'value': 'private'},
{'label': gettext('Public'), 'value': 'public'},
], noEmpty: true,
},{
id: 'public_ip', label: gettext('Public IP range'), type: 'text',
mode: ['create'], deps: ['cloud_type'],
disabled: (state) => {
if (state.cloud_type == 'public') return false;
return true;
},
depChange: (state, source)=> {
if(source[0] == 'cloud_type') {
if (state.cloud_type == 'public') {
return {public_ip: obj.initValues.hostIP};
} else {
return {public_ip: ''};
}
}
},
helpMessage: gettext('IP Address range for permitting the inbound traffic. Ex: 127.0.0.1/32, add multiple ip addresses/ranges by comma separated. Leave it blank for 0.0.0.0/0'),
},
];
}
}
class BigAnimalDatabaseSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: undefined,
password: '',
confirm_password: '',
database_type: '',
postgres_version: '',
...initValues
});
this.fieldOptions = {
...fieldOptions,
};
this.initValues = initValues;
}
validate(data, setErrMsg) {
if(!isEmptyString(data.password) && !isEmptyString(data.confirm_password)
&& data.password != data.confirm_password) {
setErrMsg('confirm_password', gettext('Passwords do not match.'));
return true;
}
if (!isEmptyString(data.confirm_password) && data.confirm_password.length < 12) {
setErrMsg('confirm_password', gettext('Password must be 12 characters or more.'));
return true;
}
return false;
}
get idAttribute() {
return 'oid';
}
get baseFields() {
return [
{
id: 'gid', label: gettext('Server group'), type: 'select',
options: this.fieldOptions.server_groups,
mode: ['create'],
controlProps: { allowClear: false },
noEmpty: true,
}, {
id: 'database_type', label: gettext('Database type'), mode: ['create'],
type: 'select',
options: this.fieldOptions.db_types,
noEmpty: true, orientation: 'vertical',
},{
id: 'postgres_version', label: gettext('PostgreSQL version'), type: 'select',
mode: ['create'], noEmpty: true,
options: this.fieldOptions.db_versions,
},{
id: 'password', label: gettext('Database password'), type: 'password',
mode: ['create'], noEmpty: true,
},{
id: 'confirm_password', label: gettext('Confirm password'), type: 'password',
mode: ['create'], noEmpty: true,
},
];
}
}
class BigAnimalClusterSchema extends BaseUISchema {
constructor(fieldOptions = {}, initValues = {}) {
super({
oid: undefined,
name: '',
region: '',
public_ip: initValues.hostIP,
...initValues
});
this.fieldOptions = {
...fieldOptions,
};
this.initValues = initValues;
this.instance_types = new BigAnimalInstanceSchema({
instance_types: this.fieldOptions.instance_types,
});
this.volume_types = new BigAnimalVolumeSchema({
volume_types: this.fieldOptions.volume_types,
volume_properties: this.fieldOptions.volume_properties
});
}
get idAttribute() {
return 'oid';
}
get baseFields() {
return [
{
id: 'name', label: gettext('Cluster name'), type: 'text',
mode: ['create'], noEmpty: true,
},{
id: 'region', label: gettext('Region'), type: 'select',
options: this.fieldOptions.regions,
controlProps: { allowClear: false },
noEmpty: true,
mode: ['create'],
},{
type: 'nested-fieldset', label: gettext('Instance Type'),
mode: ['create'], deps: ['region'],
schema: this.instance_types,
},{
type: 'nested-fieldset', label: gettext('Storage'),
mode: ['create'], deps: ['region'],
schema: this.volume_types,
}, {
type: 'nested-fieldset', label: gettext('Network Connectivity'),
mode: ['create'],
schema: new BigAnimalNetworkSchema({}, this.initValues),
}
];
}
}
export {
CloudInstanceDetailsSchema,
CloudDBCredSchema,
DatabaseSchema,
BigAnimalClusterSchema,
BigAnimalDatabaseSchema
};

View File

@@ -9,16 +9,20 @@
import urllib3
import ipaddress
from flask_security import current_user
from pgadmin.misc.bgprocess.processes import IProcessDesc
from pgadmin.utils import html
from pgadmin.model import db, Server
def get_my_ip():
""" Return the public IP of this host """
http = urllib3.PoolManager()
try:
external_ip = http.request('GET', 'https://ident.me').data
external_ip = http.request('GET', 'http://ident.me').data
except Exception:
try:
external_ip = http.request('GET', 'https://ifconfig.me/ip').data
external_ip = http.request('GET', 'http://ifconfig.me/ip').data
except Exception:
external_ip = '127.0.0.1'
@@ -32,3 +36,56 @@ def get_my_ip():
return '{}/{}'.format(external_ip, 128)
return '{}/{}'.format(external_ip, 32)
def _create_server(data):
"""Create Server"""
server = Server(
user_id=current_user.id,
servergroup_id=data.get('gid'),
name=data.get('name'),
maintenance_db=data.get('db'),
username=data.get('username'),
ssl_mode='prefer',
cloud_status=data.get('cloud_status'),
connect_timeout=30,
)
db.session.add(server)
db.session.commit()
return server.id
class CloudProcessDesc(IProcessDesc):
"""Cloud Server Process Description."""
def __init__(self, _sid, _cmd, _provider, _instance_name):
self.sid = _sid
self.cmd = _cmd
self.instance_name = _instance_name
self.provider = 'Amazon RDS'
if _provider == 'rds':
self.provider = 'Amazon RDS'
elif _provider == 'azure':
self.provider = 'Azure PostgreSQL'
else:
self.provider = 'EDB Big Animal'
@property
def message(self):
return "Deployment on {0} is started for instance {1}.".format(
self.provider, self.instance_name)
def details(self, cmd, args):
res = '<div>' + self.message
res += '</div><div class="py-1">'
res += '<div class="pg-bg-cmd enable-selection p-1">'
res += html.safe_str(self.cmd)
res += '</div></div>'
return res
@property
def type_desc(self):
return "Cloud Deployment"

View File

@@ -1,175 +0,0 @@
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2022, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
# AWS RDS PostgreSQL provider
import boto3
import pickle
from flask import session
from boto3.session import Session
from .aws_regions import AWS_REGIONS
class RDS():
def __init__(self, access_key, secret_key, session_token=None,
default_region='ap-south-1'):
self._clients = {}
self._access_key = access_key
self._secret_key = secret_key
self._session_token = session_token
self._default_region = default_region
##########################################################################
# AWS Helper functions
##########################################################################
def _get_aws_client(self, type):
""" Create/cache/return an AWS client object """
if type in self._clients:
return self._clients[type]
session = boto3.Session(
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
aws_session_token=self._session_token
)
self._clients[type] = session.client(
type, region_name=self._default_region)
return self._clients[type]
def get_available_db_version(self, engine='postgres'):
rds = self._get_aws_client('rds')
return rds.describe_db_engine_versions(Engine=engine)
def get_available_db_instance_class(self, engine='postgres',
engine_version='9.6'):
rds = self._get_aws_client('rds')
_instances = rds.describe_orderable_db_instance_options(
Engine=engine,
EngineVersion=engine_version)
_instances_list = _instances['OrderableDBInstanceOptions']
_marker = _instances['Marker'] if 'Marker' in _instances else None
while _marker:
_tmp_instances = rds.describe_orderable_db_instance_options(
Engine=engine,
EngineVersion=engine_version,
Marker=_marker)
_instances_list = [*_instances_list,
*_tmp_instances['OrderableDBInstanceOptions']]
_marker = _tmp_instances['Marker'] if 'Marker'\
in _tmp_instances else None
return _instances_list
def get_db_instance(self, instance_name):
rds = self._get_aws_client('rds')
return rds.describe_db_instances(
DBInstanceIdentifier=instance_name)
def validate_credentials(self):
client = self._get_aws_client('sts')
try:
identity = client.get_caller_identity()
return True, identity
except Exception as e:
return False, str(e)
finally:
self._clients.pop('sts')
def verify_aws_credentials(data):
"""Verify Credentials"""
session_token = data['secret']['aws_session_token'] if\
'aws_session_token' in data['secret'] else None
if 'aws' not in session:
session['aws'] = {}
if 'aws_rds_obj' not in session['aws'] or\
session['aws']['secret'] != data['secret']:
_rds = RDS(
access_key=data['secret']['aws_access_key'],
secret_key=data['secret']['aws_secret_access_key'],
session_token=session_token,
default_region=data['secret']['aws_region'])
status, identity = _rds.validate_credentials()
if status:
session['aws']['secret'] = data['secret']
session['aws']['aws_rds_obj'] = pickle.dumps(_rds, -1)
return status, identity
return True, None
def clear_aws_session():
"""Clear AWS Session"""
if 'aws' in session:
session.pop('aws')
def get_aws_db_instances(eng_version):
"""Get AWS DB Instances"""
if 'aws' not in session:
return False, 'Session has not created yet.'
if not eng_version or eng_version == '' or eng_version == 'undefined':
eng_version = '10.17'
rds_obj = pickle.loads(session['aws']['aws_rds_obj'])
res = rds_obj.get_available_db_instance_class(
engine_version=eng_version)
versions_set = set()
versions = []
for value in res:
versions_set.add(value['DBInstanceClass'])
for value in versions_set:
versions.append({
'label': value,
'value': value
})
return True, versions
def get_aws_db_versions():
"""Get AWS DB Versions"""
if 'aws' not in session:
return False, 'Session has not created yet.'
rds_obj = pickle.loads(session['aws']['aws_rds_obj'])
db_versions = rds_obj.get_available_db_version()
res = list(filter(lambda val: not val['EngineVersion'].startswith('9.6'),
db_versions['DBEngineVersions']))
versions = []
for value in res:
versions.append({
'label': value['DBEngineVersionDescription'],
'value': value['EngineVersion']
})
return True, versions
def get_aws_regions():
"""Get AWS DB Versions"""
clear_aws_session()
_session = Session()
res = _session.get_available_regions('rds')
regions = []
for value in res:
if value in AWS_REGIONS:
regions.append({
'label': AWS_REGIONS[value] + ' | ' + value,
'value': value
})
return True, regions