From c75fd4db9222f21a66bbcf9b077fee258eb05096 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Wed, 27 May 2020 20:58:03 +0300 Subject: [PATCH] Removed CI and docker scripts (#622) --- ngraph/.ci/buildkite/test_ngtf_build.py | 82 ----- ngraph/.ci/jenkins/jenkins-trigger.groovy | 71 ---- ngraph/.ci/onnx/jenkins/Jenkinsfile | 157 --------- ngraph/.ci/onnx/jenkins/README.md | 26 -- ngraph/.ci/onnx/jenkins/runCI.sh | 119 ------- ngraph/.ci/onnx/onnxruntime/Jenkinsfile | 133 ------- ngraph/.ci/onnx/onnxruntime/proxy.patch | 23 -- ngraph/.ci/travis/centos/Dockerfile | 34 -- ngraph/.ci/travis/run_test.sh | 37 -- ngraph/.ci/travis/ubuntu/Dockerfile | 50 --- ngraph/contrib/docker/Dockerfile.ngraph | 44 --- .../contrib/docker/Dockerfile.ngraph.centos74 | 69 ---- .../docker/Dockerfile.ngraph.centos74_gpu | 48 --- .../docker/Dockerfile.ngraph.ubuntu1604 | 47 --- .../docker/Dockerfile.ngraph.ubuntu1604_gpu | 63 ---- ngraph/contrib/docker/Makefile | 327 ------------------ ngraph/contrib/docker/README.md | 146 -------- .../contrib/docker/build-ngraph-and-test.sh | 132 ------- ngraph/contrib/docker/build-ngraph-docs.sh | 61 ---- ngraph/contrib/docker/docker_cleanup.sh | 36 -- ngraph/contrib/docker/make-dimage.sh | 85 ----- ngraph/contrib/docker/run_as_centos_user.sh | 99 ------ ngraph/contrib/docker/run_as_ubuntu_user.sh | 98 ------ ngraph/contrib/docker/run_as_user.sh | 98 ------ 24 files changed, 2085 deletions(-) delete mode 100644 ngraph/.ci/buildkite/test_ngtf_build.py delete mode 100644 ngraph/.ci/jenkins/jenkins-trigger.groovy delete mode 100644 ngraph/.ci/onnx/jenkins/Jenkinsfile delete mode 100644 ngraph/.ci/onnx/jenkins/README.md delete mode 100644 ngraph/.ci/onnx/jenkins/runCI.sh delete mode 100644 ngraph/.ci/onnx/onnxruntime/Jenkinsfile delete mode 100644 ngraph/.ci/onnx/onnxruntime/proxy.patch delete mode 100644 ngraph/.ci/travis/centos/Dockerfile delete mode 100644 ngraph/.ci/travis/run_test.sh delete mode 100644 ngraph/.ci/travis/ubuntu/Dockerfile delete mode 100644 ngraph/contrib/docker/Dockerfile.ngraph delete mode 100644 ngraph/contrib/docker/Dockerfile.ngraph.centos74 delete mode 100644 ngraph/contrib/docker/Dockerfile.ngraph.centos74_gpu delete mode 100644 ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604 delete mode 100644 ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604_gpu delete mode 100644 ngraph/contrib/docker/Makefile delete mode 100644 ngraph/contrib/docker/README.md delete mode 100644 ngraph/contrib/docker/build-ngraph-and-test.sh delete mode 100644 ngraph/contrib/docker/build-ngraph-docs.sh delete mode 100644 ngraph/contrib/docker/docker_cleanup.sh delete mode 100644 ngraph/contrib/docker/make-dimage.sh delete mode 100644 ngraph/contrib/docker/run_as_centos_user.sh delete mode 100644 ngraph/contrib/docker/run_as_ubuntu_user.sh delete mode 100644 ngraph/contrib/docker/run_as_user.sh diff --git a/ngraph/.ci/buildkite/test_ngtf_build.py b/ngraph/.ci/buildkite/test_ngtf_build.py deleted file mode 100644 index d23c50469d5..00000000000 --- a/ngraph/.ci/buildkite/test_ngtf_build.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 -# ============================================================================== -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -import argparse -import errno -import os -from subprocess import check_output, call -import sys -import shutil -import glob -import platform -import json -import shlex - -def command_executor(cmd, verbose=False, msg=None, stdout=None): - ''' - Executes the command. - Example: - - command_executor('ls -lrt') - - command_executor(['ls', '-lrt']) - ''' - if type(cmd) == type([]): #if its a list, convert to string - cmd = ' '.join(cmd) - if verbose: - tag = 'Running COMMAND: ' if msg is None else msg - print(tag + cmd) - if (call(shlex.split(cmd), stdout=stdout) != 0): - raise Exception("Error running command: " + cmd) - -def download(target_name, repo, version): - - # First download to a temp folder - call(["git", "clone", repo, target_name]) - - # Next goto this folder nd determone the name of the root folder - pwd = os.getcwd() - - # Go to the tree - os.chdir(target_name) - - # checkout the specified branch - command_executor(["git", "fetch"], verbose=True) - command_executor(["git", "checkout", version], verbose=True) - - os.chdir(pwd) - -# Get me the current sha for this commit -current_sha = check_output(['git', 'rev-parse', 'HEAD']).strip().decode("utf-8") -print("nGraph SHA: ", current_sha) - -# Download ngraph-bridge -download('ngraph-bridge', 'https://github.com/tensorflow/ngraph-bridge.git', 'master') - -# Run ngraph-bridge-build -pwd = os.getcwd() -os.chdir('ngraph-bridge') -command_executor(['./build_ngtf.py', '--ngraph_version', current_sha]) - -# Now run the tests -os.environ['PYTHONPATH'] = os.getcwd() -command_executor([ - 'python3', - 'test/ci/buildkite/test_runner.py', - '--artifacts', - 'build_cmake/artifacts', - '--test_cpp' -]) - -os.chdir(pwd) diff --git a/ngraph/.ci/jenkins/jenkins-trigger.groovy b/ngraph/.ci/jenkins/jenkins-trigger.groovy deleted file mode 100644 index ace0d796a79..00000000000 --- a/ngraph/.ci/jenkins/jenkins-trigger.groovy +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This script acts as a trigger script for the main ngraph-unittest.groovy -// Jenkins job. This script is part of a Jenkins multi-branch pipeline job -// which can trigger GitHub jobs more effectively than the GitHub Pull -// Request Builder (GHPRB) plugin, in our environment. - -// The original ngraph-unittest job required the following parameters. We -// set these up below as global variables, so we do not need to rewrite the -// original script -- we only need to provide this new trigger hook. -// - -String JENKINS_BRANCH = "master" -String TIMEOUTTIME = "3600" - -// Constants -JENKINS_DIR = '.' - -timestamps { - - node("trigger") { - - deleteDir() // Clear the workspace before starting - - // Clone the cje-algo directory which contains our Jenkins groovy scripts - def sleeptime=0 - retry(count: 3) { - sleep sleeptime; sleeptime = 10 - sh "git clone -b $JENKINS_BRANCH https://gitlab.devtools.intel.com/AIPG/AlgoVal/cje-algo ." - } - - // Call the main job script. - // - // NOTE: We keep the main job script in github.intel.com because it may - // contain references to technology which has not yet been released. - // - - echo "Calling ngraph-ci-premerge.groovy" - def ngraphCIPreMerge = load("${JENKINS_DIR}/ngraph-ci-premerge.groovy") - - ngraphCIPreMerge(premerge: 'true', - prURL: CHANGE_URL, - prTitle: CHANGE_TITLE, - prTarget: CHANGE_TARGET, - prAuthor: CHANGE_AUTHOR, - jenkinsBranch: JENKINS_BRANCH, - timeoutTime: TIMEOUTTIME, - useMBPipelineSCM: 'true', - checkoutBranch: '-UNDEFINED-BRANCH-' - ) - - echo "ngraph-ci-premerge.groovy completed" - - } // End: node - -} // End: timestamps - -echo "Done" - diff --git a/ngraph/.ci/onnx/jenkins/Jenkinsfile b/ngraph/.ci/onnx/jenkins/Jenkinsfile deleted file mode 100644 index 94f88ea9926..00000000000 --- a/ngraph/.ci/onnx/jenkins/Jenkinsfile +++ /dev/null @@ -1,157 +0,0 @@ -// ****************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ****************************************************************************** - -// workaround for aborting previous builds on PR update -// TODO: Move to plugin based solution as soon as it's available -@NonCPS -def killPreviousRunningJobs() { - def jobname = env.JOB_NAME - def buildnum = env.BUILD_NUMBER.toInteger() - - def job = Jenkins.instance.getItemByFullName(jobname) - for (build in job.builds) { - if (!build.isBuilding()){ - continue; - } - if (buildnum == build.getNumber().toInteger()){ - continue; - } - echo "Kill task = ${build}" - build.doStop(); - } -} - -def notifyByEmail(def gitPrInfo) { - stage('Notify') { - String notifyPeople = "${gitPrInfo.prAuthorEmail}, ${gitPrInfo.commitAuthorEmail}" - emailext ( - subject: "nGraph-Onnx CI: PR ${CHANGE_ID} ${currentBuild.result}!", - body: """ - - - - - - - - -
Status: ${currentBuild.result}
Pull Request Title: ${CHANGE_TITLE}
Pull Request: ${CHANGE_ID}
Branch: ${CHANGE_BRANCH}
Commit Hash: ${gitPrInfo.commitHash}
Commit Subject: ${gitPrInfo.commitSubject}
Jenkins Build: ${BUILD_NUMBER}
- """, - to: "${notifyPeople}" - ) - } -} - -def getGitPrInfo(String project) { - def gitPrInfo = [ - prAuthorEmail : "", - commitAuthorEmail : "", - commitHash : "", - commitSubject : "" - ] - try { - dir ("${WORKDIR}/${project}") { - gitPrInfo.prAuthorEmail = sh (script: 'git log -1 --pretty="format:%ae" ', returnStdout: true).trim() - gitPrInfo.commitAuthorEmail = sh (script: 'git log -1 --pretty="format:%ce" ', returnStdout: true).trim() - gitPrInfo.commitHash = sh (script: 'git log -1 --pretty="format:%H" ', returnStdout: true).trim() - gitPrInfo.commitSubject = sh (script: 'git log -1 --pretty="format:%s" ', returnStdout: true).trim() - } - } - catch(e) { - echo "Failed to retrieve ${project} git repository information!" - echo "ERROR: ${e}" - } - return gitPrInfo -} - -def checkoutSources() { - branchExists = sh (script: "git ls-remote --heads ${NGRAPH_ONNX_REPO_ADDRESS} ${NGRAPH_ONNX_BRANCH}", - returnStdout: true) - if(!branchExists) { - NGRAPH_ONNX_BRANCH = "master" - } - sh "rm -rf ${WORKSPACE}/*" - dir ("${WORKDIR}/ngraph") { - retry(3) { - checkout([$class: 'GitSCM', - branches: [[name: "${NGRAPH_BRANCH}"]], - doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'CloneOption', timeout: 120]], submoduleCfg: [], - userRemoteConfigs: [[credentialsId: "${JENKINS_GITHUB_CREDENTIAL_ID}", - url: "${NGRAPH_REPO_ADDRESS}"]]]) - } - } - dir ("${WORKDIR}/ngraph-onnx") { - retry(3) { - checkout([$class: 'GitSCM', - branches: [[name: "${NGRAPH_ONNX_BRANCH}"]], - doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'CloneOption', timeout: 120]], submoduleCfg: [], - userRemoteConfigs: [[credentialsId: "${JENKINS_GITHUB_CREDENTIAL_ID}", - url: "${NGRAPH_ONNX_REPO_ADDRESS}"]]]) - } - } -} - -pipeline { - agent { - label "ngraph_onnx && controller" - } - environment { - PROJECT_NAME = "ngraph" - WORKDIR = "${WORKSPACE}/${BUILD_NUMBER}" - JENKINS_GITHUB_CREDENTIAL_ID = "7157091e-bc04-42f0-99fd-dc4da2922a55" - CI_DIR = "ngraph-onnx/.ci/jenkins" - NGRAPH_ONNX_REPO_ADDRESS = "git@github.com:NervanaSystems/ngraph-onnx.git" - NGRAPH_REPO_ADDRESS = "git@github.com:NervanaSystems/ngraph.git" - NGRAPH_ONNX_BRANCH = "${CHANGE_BRANCH}" - NGRAPH_BRANCH = "${CHANGE_BRANCH}" - } - options { - skipDefaultCheckout true - } - stages { - stage ("Checkout") { - steps { - script { - killPreviousRunningJobs() - checkoutSources() - } - } - } - stage ("Parallel CI") { - steps { - script { - dir("${WORKDIR}/${CI_DIR}") { - CI_FUNCTIONS = load "ci.groovy" - dockerfilesDir = "./dockerfiles" - parallelStagesMap = CI_FUNCTIONS.getConfigurationsMap(dockerfilesDir, NGRAPH_ONNX_BRANCH, NGRAPH_BRANCH) - parallel parallelStagesMap - } - } - } - } - } - post { - failure { - script { - gitPrInfo = getGitPrInfo(PROJECT_NAME) - notifyByEmail(gitPrInfo) - } - } - cleanup { - deleteDir() - } - } -} diff --git a/ngraph/.ci/onnx/jenkins/README.md b/ngraph/.ci/onnx/jenkins/README.md deleted file mode 100644 index 37e18ba0488..00000000000 --- a/ngraph/.ci/onnx/jenkins/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# nGraph-ONNX Continuous Integration Script -The proper script running nGraph-ONNX tests can be found in ngraph-onnx repository: -https://github.com/NervanaSystems/ngraph-onnx/tree/master/.ci/jenkins/ci.groovy - -Jenkinsfile in this directory just downloads and runs CI stored in repository mentioned above. -This is due to how Jenkins Multibranch Pipeline jobs are implemented, which don't provide an option to automatically clone different repository than the one for which the build is triggered. - -# MANUAL REPRODUCTION INSTRUCTION -From directory containing CI scripts execute runCI.sh bash script: - -``` -cd /.ci/onnx/jenkins/ -./runCI.sh -``` - -To remove all items created during script execution (files, directories, docker images and containers), run: - -``` -./runCI.sh --cleanup -``` - -After first run, executing the script will rerun tox tests. To rebuild nGraph and run tests use: - -``` -./runCI.sh --rebuild -``` diff --git a/ngraph/.ci/onnx/jenkins/runCI.sh b/ngraph/.ci/onnx/jenkins/runCI.sh deleted file mode 100644 index 3a04bcc7dc5..00000000000 --- a/ngraph/.ci/onnx/jenkins/runCI.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash - -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -NGRAPH_ONNX_REPO="https://github.com/NervanaSystems/ngraph-onnx" -CI_PATH="$(pwd)" -CI_ROOT=".ci/onnx/jenkins" -REPO_ROOT="${CI_PATH%$CI_ROOT}" -DOCKER_CONTAINER="ngraph-onnx_ci_reproduction" - -# Function run() builds image with requirements needed to build ngraph and run onnx tests, runs container and executes tox tests -function run() { - set -x - set -e - - cd ./dockerfiles - docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f=./ubuntu_16_04.dockerfile -t ngraph-onnx:ubuntu-16_04 . - - cd "${CI_PATH}" - if [[ -z $(docker ps -a | grep -i "${DOCKER_CONTAINER}") ]]; - then - docker run -h "$(hostname)" --privileged --name "${DOCKER_CONTAINER}" -v "${REPO_ROOT}":/root \ - -d ngraph-onnx:ubuntu-16_04 tail -f /dev/null - BUILD="TRUE" - fi - - if [[ "${BUILD}" == "TRUE" ]]; - then - BUILD_NGRAPH_CMD='cd /root && \ - mkdir -p ./build && \ - cd ./build && \ - cmake ../ -DNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE \ - -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX=/root/ngraph_dist && \ - make -j $(lscpu --parse=CORE | grep -v '"'#'"' | sort | uniq | wc -l) && \ - make install && \ - cd /root/python && \ - if [[ -z $(ls /root/ngraph-onnx 2>/dev/null) ]]; then - git clone --recursive https://github.com/pybind/pybind11.git; - fi - export PYBIND_HEADERS_PATH=/root/python/pybind11 && \ - export NGRAPH_CPP_BUILD_PATH=/root/ngraph_dist && \ - export NGRAPH_ONNX_IMPORT_ENABLE=TRUE && \ - python3 setup.py bdist_wheel && \ - cd /root' - docker exec "${DOCKER_CONTAINER}" bash -c "${BUILD_NGRAPH_CMD}" - fi - - CLONE_CMD='cd /root &&\ - if [[ -z $(ls /root/ngraph-onnx 2>/dev/null) ]]; then - git clone '"${NGRAPH_ONNX_REPO}"'; - fi' - docker exec "${DOCKER_CONTAINER}" bash -c "${CLONE_CMD}" - NGRAPH_WHL=$(docker exec ${DOCKER_CONTAINER} find /root/python/dist/ -name "ngraph*.whl") - docker exec -e TOX_INSTALL_NGRAPH_FROM="${NGRAPH_WHL}" -e NGRAPH_BACKEND=CPU "${DOCKER_CONTAINER}" tox -c /root/ngraph-onnx/ - docker exec -e TOX_INSTALL_NGRAPH_FROM="${NGRAPH_WHL}" -e NGRAPH_BACKEND=INTEPRETER "${DOCKER_CONTAINER}" tox -c /root/ngraph-onnx/ -} - -# Function cleanup() removes items related to nGraph, created during script execution -function cleanup_ngraph() { - set -x - - docker exec "${DOCKER_CONTAINER}" bash -c 'rm -rf /root/build/* /root/ngraph_dist /root/python/dist' -} - -# Function cleanup() removes items created during script execution -function cleanup() { - set -x - - docker exec "${DOCKER_CONTAINER}" bash -c "rm -rf /root/ngraph_dist /root/ngraph-onnx/.tox /root/ngraph-onnx/.onnx \ - /root/ngraph-onnx/__pycache__ /root/ngraph-onnx/ngraph_onnx.egg-info /root/ngraph-onnx/cpu_codegen" - docker exec "${DOCKER_CONTAINER}" bash -c 'rm -rf $(find /root/ -user root)' - docker rm -f "${DOCKER_CONTAINER}" -} - -PATTERN='[-a-zA-Z0-9_]*=' -for i in "$@" -do - case $i in - --help*) - printf "Script builds nGraph and runs tox tests inside docker container. - Every execution after first run is going to run tox tests again. - To rebuild nGraph and run tests again use --rebuild parameter. - - Following parameters are available: - - --help displays this message - --cleanup removes docker container and files created during script execution - --rebuild rebuilds nGraph and runs tox tests - " - exit 0 - ;; - --cleanup*) - cleanup - exit 0 - ;; - --rebuild*) - cleanup_ngraph - BUILD="TRUE" - ;; - esac -done - -set -x - -run diff --git a/ngraph/.ci/onnx/onnxruntime/Jenkinsfile b/ngraph/.ci/onnx/onnxruntime/Jenkinsfile deleted file mode 100644 index 1ff065f52ce..00000000000 --- a/ngraph/.ci/onnx/onnxruntime/Jenkinsfile +++ /dev/null @@ -1,133 +0,0 @@ -// ****************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ****************************************************************************** - -try{ if(LABEL.trim() == "") {throw new Exception();} }catch(Exception e){LABEL="onnx && ci"}; echo "${LABEL}" - -NGRPAH_REPOSITORY = "https://github.com/NervanaSystems/ngraph.git" -NGRAPH_COMMIT_HASH = "${ghprbActualCommit}" // particular nGraph PR commit hash - -ONNX_REPOSITORY = "https://github.com/NervanaSystems/onnxruntime.git" -ONNX_RUNTIME_BRANCH = "release" - -def main(){ - timeout(activity: true, time: 15) { - try{ - stage("CloneRepos"){ - CloneRepos() - } - stage("Apply Patch"){ - ApplyPatch() - } - stage("Onnx Models"){ - BuildAndTest() - } - } - catch(e) { - // Set result to ABORTED if exception contains exit code of a process interrupted by SIGTERM - if ("$e".contains("143")) { - currentBuild.result = "ABORTED" - } else { - currentBuild.result = "FAILURE" - } - } - stage("Clean"){ - Clean() - } - } -} - - -def CloneRepos() { - dir("ngraph"){ - checkout([ - $class: 'GitSCM', - branches: [[name: "${NGRAPH_COMMIT_HASH}"]], - doGenerateSubmoduleConfigurations: false, - extensions: [[ - $class: 'SubmoduleOption', - disableSubmodules: false, - parentCredentials: true, - recursiveSubmodules: true, - reference: '', - trackingSubmodules: false, - timeout: 15 - ]], - submoduleCfg: [], - userRemoteConfigs: [[ - refspec: '+refs/pull/*:refs/remotes/origin/pr/*', - url: "${NGRPAH_REPOSITORY}" - ]] - ]) - } - dir("onnxruntime") { - checkout([ - $class: 'GitSCM', - branches: [[name: "${ONNX_RUNTIME_BRANCH}"]], - doGenerateSubmoduleConfigurations: false, - extensions: [[ - $class: 'SubmoduleOption', - disableSubmodules: false, - parentCredentials: true, - recursiveSubmodules: true, - reference: '', - trackingSubmodules: false, - timeout: 15 - ]], - submoduleCfg: [], - userRemoteConfigs: [[ - url: "${ONNX_REPOSITORY}" - ]] - ]) - } -} - -def ApplyPatch(){ - dir("onnxruntime"){ - echo "Update cmake/external/ngraph.cmake with ${NGRAPH_COMMIT_HASH}" - sh """ - sed -i 's/set(ngraph_TAG ".*")/set(ngraph_TAG "${NGRAPH_COMMIT_HASH}")/g' cmake/external/ngraph.cmake - grep -q "${NGRAPH_COMMIT_HASH}" cmake/external/ngraph.cmake - """ - echo "Add proxy to tools/ci_build/github/linux/docker/Dockerfile.ubuntu" - sh """ - sed -i 's|{HTTP_PROXY}|${env.http_proxy}|g' ../ngraph/.ci/onnx/onnxruntime/proxy.patch - sed -i 's|{SOCKS_PROXY}|${env.socks_proxy}|g' ../ngraph/.ci/onnx/onnxruntime/proxy.patch - grep -q "${env.http_proxy}" ../ngraph/.ci/onnx/onnxruntime/proxy.patch - git apply ../ngraph/.ci/onnx/onnxruntime/proxy.patch - """ - } -} - -def BuildAndTest(){ - dir("onnxruntime"){ - sh "mkdir -p `pwd`/build/models && chmod 777 `pwd`/build/models" - sh """ - //!/bin/bash - ./tools/ci_build/github/linux/run_dockerbuild.sh \ - -o ubuntu16.04 \ - -d ngraph \ - -r `pwd`/build -x '--use_ngraph --use_full_protobuf --test_data_url https://onnxruntimetestdata.blob.core.windows.net/models/20190327.zip --test_data_checksum 45166d81c021c8aae212b53c92101792' - """ - } -} - -def Clean(){ - deleteDir() -} - -node(LABEL) { - main() -} diff --git a/ngraph/.ci/onnx/onnxruntime/proxy.patch b/ngraph/.ci/onnx/onnxruntime/proxy.patch deleted file mode 100644 index 41a5971f6cc..00000000000 --- a/ngraph/.ci/onnx/onnxruntime/proxy.patch +++ /dev/null @@ -1,23 +0,0 @@ -diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu -index bdff95e1..cd9c0008 100644 ---- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu -+++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu -@@ -3,6 +3,18 @@ FROM ubuntu:${OS_VERSION} - - ARG PYTHON_VERSION=3.5 - -+ENV http_proxy={HTTP_PROXY} -+ENV socks_proxy={SOCKS_PROXY} -+ENV https_proxy={HTTP_PROXY} -+ENV ftp_proxy={HTTP_PROXY} -+ENV rsync_proxy={HTTP_PROXY} -+ENV no_proxy=intel.com,.intel.com,localhost -+ENV HTTP_PROXY={HTTP_PROXY} -+ENV HTTPS_PROXY={HTTP_PROXY} -+ENV FTP_PROXY={HTTP_PROXY} -+ENV SOCKS_PROXY={SOCKS_PROXY} -+ENV NO_PROXY=intel.com,.intel.com,localhost -+ - ADD scripts /tmp/scripts - RUN /tmp/scripts/install_ubuntu.sh -p ${PYTHON_VERSION} && /tmp/scripts/install_deps.sh && rm -rf /tmp/scripts - diff --git a/ngraph/.ci/travis/centos/Dockerfile b/ngraph/.ci/travis/centos/Dockerfile deleted file mode 100644 index 1dd7865deac..00000000000 --- a/ngraph/.ci/travis/centos/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM centos:7 - -RUN yum -y update && \ - yum -y --enablerepo=extras install epel-release && \ - yum -y install \ - gcc gcc-c++ \ - cmake3 make \ - git \ - curl unzip \ - autoconf automake autogen libtool \ - wget patch diffutils zlib-devel ncurses-devel \ - python python-devel python-setuptools \ - doxygen graphviz \ - which \ - 'perl(Data::Dumper)' - -RUN ln -s /usr/bin/cmake3 /usr/bin/cmake - -RUN cmake --version -RUN make --version -RUN gcc --version -RUN c++ --version - -# Install nGraph in /root/ngraph -COPY . /root/ngraph -RUN mkdir /root/ngraph/build -WORKDIR /root/ngraph/build - -RUN cmake .. -L -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE - -RUN make -j"$(nproc)" -RUN make install - -CMD cd /root/ngraph/build && make unit-test-check diff --git a/ngraph/.ci/travis/run_test.sh b/ngraph/.ci/travis/run_test.sh deleted file mode 100644 index 770ec38988a..00000000000 --- a/ngraph/.ci/travis/run_test.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e - -echo "--TRAVIS VARIABLES--" -echo "TRAVIS_OS_NAME:" ${TRAVIS_OS_NAME} -echo "TRAVIS_BUILD_DIR:" ${TRAVIS_BUILD_DIR} - -echo "--CUSTOM VARIABLES--" -echo "TASK:" ${TASK} -echo "OS:" ${OS} - -# LINUX TASKS -if [ ${TRAVIS_OS_NAME} == "linux" ]; then - - if [ ${TASK} == "cpp_test" ]; then - docker run -w '/root/ngraph/build' test_ngraph make unit-test-check - fi - - if [ ${TASK} == "python2_test" ]; then - docker run -w '/root/ngraph/python' -e NGRAPH_ONNX_IMPORT_ENABLE=TRUE test_ngraph tox -e py27 - fi - - if [ ${TASK} == "python3_test" ]; then - docker run -w '/root/ngraph/python' -e NGRAPH_ONNX_IMPORT_ENABLE=TRUE test_ngraph tox -e py3 - fi - -fi - -# MacOS TASKS -if [ ${TRAVIS_OS_NAME} == "osx" ]; then - - if [ ${TASK} == "cpp_test" ]; then - cd ${TRAVIS_BUILD_DIR}/build - make unit-test-check - fi - -fi diff --git a/ngraph/.ci/travis/ubuntu/Dockerfile b/ngraph/.ci/travis/ubuntu/Dockerfile deleted file mode 100644 index f807f58f847..00000000000 --- a/ngraph/.ci/travis/ubuntu/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM ubuntu:16.04 - -# nGraph dependencies -RUN apt-get update && apt-get install -y \ - build-essential \ - cmake \ - clang-3.9 \ - clang-format-3.9 \ - git \ - curl \ - zlib1g \ - zlib1g-dev \ - libtinfo-dev \ - unzip \ - autoconf \ - automake \ - libtool && \ - apt-get clean autoclean && apt-get autoremove -y - -# Python dependencies -RUN apt-get update && apt-get install -y \ - python \ - python3 \ - python-dev \ - python3-dev \ - python-pip && \ - apt-get clean autoclean && apt-get autoremove -y - -RUN pip install --upgrade pip setuptools wheel -RUN pip install tox - -# Prepare nGraph -COPY . /root/ngraph -RUN mkdir /root/ngraph/build -WORKDIR /root/ngraph/build -RUN cmake .. -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="~/ngraph_dist" -RUN make -j"$(nproc)" -RUN make install - -# Prepare nGraph Python API -WORKDIR /root/ngraph/python -RUN git clone --recursive https://github.com/pybind/pybind11.git -ENV NGRAPH_CPP_BUILD_PATH /root/ngraph_dist -ENV LD_LIBRARY_PATH /root/ngraph_dist/lib -ENV PYBIND_HEADERS_PATH /root/ngraph/python/pybind11 - -RUN df -k - -# Test nGraph and nGraph Python API -CMD cd /root/ngraph/build && make unit-test-check ; cd /root/ngraph/python && NGRAPH_ONNX_IMPORT_ENABLE=TRUE tox diff --git a/ngraph/contrib/docker/Dockerfile.ngraph b/ngraph/contrib/docker/Dockerfile.ngraph deleted file mode 100644 index 20ab6c3dd13..00000000000 --- a/ngraph/contrib/docker/Dockerfile.ngraph +++ /dev/null @@ -1,44 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# Environment to build and unit-test ngraph - -FROM ubuntu:16.04 - -RUN apt-get update && apt-get install -y \ - build-essential cmake \ - clang-3.9 clang-format-3.9 \ - git \ - wget patch diffutils zlib1g-dev libtinfo-dev \ - doxygen graphviz \ - python-sphinx python3-sphinx \ - python-pip - -RUN apt-get clean autoclean && \ - apt-get autoremove -y -RUN pip install --upgrade pip - -# allows for make html build under the doc/source directory as an interim build process -RUN pip install sphinx -RUN pip install breathe - -# need numpy to successfully build docs for python_api -RUN pip install numpy - -# release notes need this markdown extension -# RUN python3 -m pip install m2r - -WORKDIR /home diff --git a/ngraph/contrib/docker/Dockerfile.ngraph.centos74 b/ngraph/contrib/docker/Dockerfile.ngraph.centos74 deleted file mode 100644 index 7e279ec0082..00000000000 --- a/ngraph/contrib/docker/Dockerfile.ngraph.centos74 +++ /dev/null @@ -1,69 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# Environment to build and unit-test ngraph on centos74 -# with gcc 4.8.5 -# with python 2.7 -# with cmake3 -# LLVM/clang will be built from source - -FROM centos:7 - -# Added install for perl Data::Dumper to avoid a compile error -# Sphinx docs specify python-sphinx package -RUN yum -y update && \ - yum -y --enablerepo=extras install epel-release && \ - yum -y install \ - gcc gcc-c++ \ - cmake3 make \ - git \ - wget patch diffutils zlib-devel ncurses-devel libtinfo-dev \ - python python-devel python-sphinx python-setuptools \ - doxygen graphviz \ - which \ - 'perl(Data::Dumper)' - -RUN ln -s /usr/bin/cmake3 /usr/bin/cmake - -RUN cmake --version -RUN make --version -RUN gcc --version -RUN c++ --version - -RUN easy_install pip -RUN pip install virtualenv==16.7.10 - - -# Install some pip packages - -# need to use sphinx version 1.6 to build docs -# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only -# added install for python-pip above and -# installed sphinx with pip to get the updated version 1.6.5 -# allows for make html build under the doc/source directory as an interim build process -RUN pip install sphinx - -# breathe package required to build documentation -RUN pip install breathe - -# need numpy to successfully build docs for python_api -RUN pip install numpy - -# release notes need this markdown extension -# RUN python3 -m pip install m2r - - -WORKDIR /home diff --git a/ngraph/contrib/docker/Dockerfile.ngraph.centos74_gpu b/ngraph/contrib/docker/Dockerfile.ngraph.centos74_gpu deleted file mode 100644 index 9886a592ea8..00000000000 --- a/ngraph/contrib/docker/Dockerfile.ngraph.centos74_gpu +++ /dev/null @@ -1,48 +0,0 @@ -# Environment to build and unit-test ngraph on centos74 for GPU backend -# with gcc 4.8.5 -# with python 2.7 -# with pre-built cmake3 - -FROM nvidia/cuda:8.0-cudnn7-devel-centos7 - -RUN yum -y update && \ - yum -y --enablerepo=extras install epel-release && \ - yum -y install \ - gcc gcc-c++ \ - cmake3 make \ - git \ - wget patch diffutils zlib-devel ncurses-devel libtinfo-dev \ - python python-devel python-setuptools \ - doxygen graphviz \ - which \ - 'perl(Data::Dumper)' - -RUN ln -s /usr/bin/cmake3 /usr/bin/cmake -RUN ln -s /usr/local/cuda/include/cudnn.h /usr/local/cuda/include/cudnn_v7.h - -RUN cmake --version -RUN make --version -RUN gcc --version -RUN c++ --version - -RUN easy_install pip -RUN pip install virtualenv==16.7.10 - -# Install some pip packages - -# need to use sphinx version 1.6 to build docs -# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only -# added install for python-pip above and -# installed sphinx with pip to get the updated version 1.6.5 -# allows for make html build under the doc/source directory as an interim build process -RUN pip install sphinx - -# breathe package required to build documentation -RUN pip install breathe - -# need numpy to successfully build docs for python_api -RUN pip install numpy - -# RUN python3 -m pip install m2r - -WORKDIR /home diff --git a/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604 b/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604 deleted file mode 100644 index ac395763300..00000000000 --- a/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604 +++ /dev/null @@ -1,47 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# Environment to build and unit-test ngraph - -FROM ubuntu:16.04 - -RUN apt-get update && apt-get install -y \ - build-essential cmake \ - clang-3.9 clang-format-3.9 \ - git \ - wget patch diffutils zlib1g-dev libtinfo-dev \ - doxygen graphviz \ - python-pip python3-sphinx - -RUN apt-get clean autoclean && \ - apt-get autoremove -y -RUN pip install --upgrade pip - -# need to use sphinx version 1.6 to build docs -# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only -# added install for python-pip above and -# installed sphinx with pip to get the updated version 1.6.5 -# allows for make html build under the doc/source directory as an interim build process -RUN pip install sphinx -RUN pip install breathe - -# need numpy to successfully build docs for python_api -RUN pip install numpy - -# release notes need this markdown extension -# RUN python3 -m pip install m2r - -WORKDIR /home diff --git a/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604_gpu b/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604_gpu deleted file mode 100644 index d3aeb8551d1..00000000000 --- a/ngraph/contrib/docker/Dockerfile.ngraph.ubuntu1604_gpu +++ /dev/null @@ -1,63 +0,0 @@ -# ngraph-neon.cpu dockerfile used to build and test ngraph-neon on gpu platforms - -FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 - -# try to get around issue with badsig -#https://github.com/NVIDIA/nvidia-docker/issues/619 (with devel image) (based on this issue added this) -RUN rm /etc/apt/sources.list.d/cuda.list - -# removing nvidia-ml.list file to avoid apt-get update error -# "The method driver /usr/lib/apt/methods/https could not be found." -RUN rm /etc/apt/sources.list.d/nvidia-ml.list - -RUN apt-get update && \ - apt-get install -y sudo curl apt-transport-https && \ - apt-get clean autoclean && \ - apt-get autoremove -y -RUN curl http://developer.download.nvidia.com/compute/cuda/repos/GPGKEY | sudo apt-key add - - -# install standard python 2 and 3 environment stuff -RUN apt-get update && \ - apt-get install -y python-dev python-pip software-properties-common && \ - apt-get clean autoclean && \ - apt-get autoremove -y -RUN pip install --upgrade pip -RUN pip install virtualenv==16.7.10 pytest - -RUN apt-get update && \ - apt-get install -y python3 python3-pip python3-dev python3-venv && \ - apt-get clean autoclean && \ - apt-get autoremove -y -RUN pip3 install virtualenv pytest - -#install onnx dependencies to install ngraph -RUN apt-get update && apt-get install -y protobuf-compiler libprotobuf-dev - -RUN apt-get update && apt-get install -y \ - build-essential cmake \ - clang-3.9 clang-format-3.9 \ - git \ - wget patch diffutils zlib1g-dev libtinfo-dev \ - doxygen graphviz && \ - apt-get clean autoclean && \ - apt-get autoremove -y - -# create a symbolic link for gmake command -RUN ln -s /usr/bin/make /usr/bin/gmake - -# need to use sphinx version 1.6 to build docs -# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only -# added install for python-pip above and -# installed sphinx with pip to get the updated version 1.6.5 -# allows for make html build under the doc/source directory as an interim build process -RUN pip install sphinx - -# breathe package required to build documentation -RUN pip install breathe - -# need numpy to successfully build docs for python_api -RUN pip install numpy - -# RUN python3 -m pip install m2r - -WORKDIR /home diff --git a/ngraph/contrib/docker/Makefile b/ngraph/contrib/docker/Makefile deleted file mode 100644 index 7383b460f89..00000000000 --- a/ngraph/contrib/docker/Makefile +++ /dev/null @@ -1,327 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# Basic Makefile for contrib/docker. This can be expanded later as more targets -# are added. - -# Building LLVM from source has been observed to trigger the oom-killer -# on systems with a large number of cores -# running with make -j -# -# Default is to build with -j 22 for parallel cmake/make. -# Override with make PARALLEL="-j " where -# = the number of make processes to run in parallel -# Turn off with make PARALLEL= -PARALLEL=22 - -# DIR is an internal variable that serves as an anchor to this cloned git -# repository. DIR is mounted into the docker container, so that builds -# can occur within the container on this cloned git repository. DIR should -# not be modified - if it is, then the build system will not work. -DIR = $(realpath ../..) - -# DOCKUSER_HOME is the location of the home directory of the fabricated -# "dockuser" user, used only within the docker containers. "dockuser" is -# created (from the passed-in RUN_UID) to map the docker-caller user's UID to a -# first-class user (/etc/passwd entry, member of sudo group, proper home dir) -# /home/dockuser is also used in other scripts, notably run_as_user.sh, so if -# changed it must be done in other areas for the builds to work. -DOCKUSER_HOME=/home/dockuser - -# Use /home/dockuser/ngraph-test, because we run as the user (and not root) -# /root/ngraph-test is not used, because /root is not accessible to user -VOLUME = -v "${DIR}:${DOCKUSER_HOME}/ngraph-test" -GIT_COMMIT = $(shell git rev-parse HEAD) -DBUILD_VERSION = ${GIT_COMMIT}_${PYTHON_VERSION} - -# Look for evidence if GPU backend is supported on the platform -NVIDIA_SMI = $(shell which nvidia-smi) - -# Enable additional options to be added on the command line -ifndef CMAKE_OPTIONS_EXTRA - CMAKE_OPTIONS_EXTRA= -endif - -# Allow linking pre-built third-party cache files (future) -ifndef THIRD_PARTY_CACHE_DIR - THIRD_PARTY_CACHE_DIR= -endif - -# OS set to 'ubuntu1604' by default -# can be overridden on the command line with 'make OS=centos74" -ifndef OS - OS="ubuntu1604" -endif - -DBUILD_DIR = ${DIR}/contrib/docker/.build-${DBUILD_VERSION}_${OS} - -# Configuration for specific reference OS in Dockerfiles -ifeq ("$(shell echo ${OS} | grep centos)","centos74") - RUN_AS_USER_SCRIPT=${DOCKUSER_HOME}/ngraph-test/contrib/docker/run_as_centos_user.sh - CPU_DOCKERFILE=Dockerfile.ngraph.centos74 -else - CPU_DOCKERFILE="Dockerfile.ngraph.ubuntu1604" - RUN_AS_USER_SCRIPT=${DOCKUSER_HOME}/ngraph-test/contrib/docker/run_as_ubuntu_user.sh -endif - -# Build GPU backend if NVIDIA_SMI command is found -# Sets CMAKE_OPTIONS_EXTRA to introduce GPU build configuration to cmake -# Configuration for GPU backend in Dockerfiles with "_gpu" suffix -# The nvidia-docker command must be used for any targets that actually utilize GPU devices -ifneq ("$(shell echo ${NVIDIA_SMI} | grep nvidia-smi)","") - DOCKERFILE=${CPU_DOCKERFILE}_gpu - DOCKER_CMD=nvidia-docker - DOCKER_CMD_MESG=GPU appears to be supported on this platform. Building for GPU and CPU backend support. -else - DOCKERFILE=${CPU_DOCKERFILE} - DOCKER_CMD=docker - DOCKER_CMD_MESG=GPU does not appear to be supported on this platform. Building for CPU backend support only. -endif - -# For gcc builds, we do NOT regard warnings as errors -# For clang builds, we DO make warnings into errors -CMAKE_OPTIONS_COMMON=-DNGRAPH_BUILD_DOXYGEN_DOCS=ON -DNGRAPH_BUILD_SPHINX_DOCS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo $(CMAKE_OPTIONS_EXTRA) -CMAKE_OPTIONS_GCC=$(CMAKE_OPTIONS_COMMON) -DCMAKE_INSTALL_PREFIX=${DOCKUSER_HOME}/ngraph-test/BUILD-GCC/ngraph_dist -CMAKE_OPTIONS_CLANG=$(MAKE_OPTIONS_COMMON)-DCMAKE_INSTALL_PREFIX=${DOCKUSER_HOME}/ngraph-test/BUILD-CLANG/ngraph_dist -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 -DNGRAPH_WARNINGS_AS_ERRORS=ON - -CALLER_UID := $(shell id -u) -CALLER_GID := $(shell id -g) - -# Default version is python 2, but can be switched to 3 from command -# line -PYTHON_VERSION = 2 - -# Some targets are DEPRECATED and will be removed at a later date: check_cpu build_ngraph_cpu -# These DEPRECATED targets are currently included for Jenkins job compatibility with older dev branches -# Please see comments for individual targets for more details -.PHONY: clean build_docker_image build_gcc check_gcc build_clang check_clang install_gcc install_clang shell check_cpu build_all build_ngraph_cpu - -DOCKER_BUILD=docker build --rm=true - -ifdef http_proxy -DOCKER_BUILD+=--build-arg http_proxy=$(http_proxy) -DOCKER_RUN_ENV+=--env "http_proxy=$(http_proxy)" -endif - -ifdef https_proxy -DOCKER_BUILD+=--build-arg https_proxy=$(https_proxy) -DOCKER_RUN_ENV+=--env "https_proxy=$(https_proxy)" -endif - -all: check_gcc check_clang - -# Docker actions - -# Isolate specific dockerfiles in a .build_* subdirectory -expand_dockerfile_templates: - @echo "OS=${OS}" - @echo "DOCKERFILE=${DOCKERFILE}" - @echo "RUN_AS_USER_SCRIPT=${RUN_AS_USER_SCRIPT}" - cd "${DIR}"/contrib/docker - mkdir "${DBUILD_DIR}" || true - sed -e 's/\(FROM ngraph.*\)/\1:${DBUILD_VERSION}/' ${DOCKERFILE} > "${DBUILD_DIR}/Dockerfile.build_ngraph_${OS}" - -build_docker_image: expand_dockerfile_templates - @echo "OS=${OS}" - @echo ${DBUILD_DIR} - export CONTEXTDIR=${DBUILD_DIR};export DOCKER_TAG=build_ngraph_${OS};./make-dimage.sh - docker tag build_ngraph_${OS}:latest build_ngraph_${OS}:${DBUILD_VERSION} - -build_docker: build_docker_image - -# Build docs -docs: sphinx_doc - -# Docs build does not depend on GPU dependencies -sphinx_doc: build_docker_image - # sphinx html docs build - docker run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-docs.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Build -build_all: build_gcc build_clang - -# Build targets ALWAYS clean build directories (BUILD-GCC, BUILD-CLANG) prior to building -# Always use docker command to build docker images -# nvidia-docker command is not appropriate -build_gcc: build_docker_image - @echo "" - @echo "${DOCKER_CMD_MESG}" - @echo "" - docker run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-GCC \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN='build_gcc' \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Build targets ALWAYS clean build directories (BUILD-GCC, BUILD-CLANG) prior to building -# Always use docker command to build docker images -# nvidia-docker command is not appropriate -build_clang: build_docker_image - @echo "" - @echo "${DOCKER_CMD_MESG}" - @echo "" - docker run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-CLANG \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN='build_clang' \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Check (run unit-tests) -check_all: check_gcc check_clang - -# Always use the platform-specific docker command to run unit tests -# ngraph make check target executes unit-test-check and style-check -check_gcc: build_gcc - ${DOCKER_CMD} run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-GCC \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN=check_gcc \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Always use the platform-specific docker command to run unit tests -# ngraph make check target executes unit-test-check and style-check -check_clang: build_clang - ${DOCKER_CMD} run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-CLANG \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN=check_clang \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Always use the platform-specific docker command to run unit tests -unit_test_check_gcc: build_gcc - ${DOCKER_CMD} run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-GCC \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN='unit-test-check_gcc' \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Always use the platform-specific docker command to run unit tests -unit_test_check_clang: build_clang - ${DOCKER_CMD} run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-CLANG \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN='unit-test-check_clang' \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -style_check_clang: build_clang - -# Install - -install_all: install_gcc install_clang - -# install targets do not depend on GPU dependencies -# no unit tests are executed -# build prerequisites include GPU dependencies in the docker image automatically -install_gcc: build_gcc - docker run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-GCC \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN=install_gcc \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# install targets do not depend on GPU dependencies -# no unit tests are executed -# build prerequisites include GPU dependencies in the docker image automatically -install_clang: build_clang - docker run --rm --tty \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env BUILD_SUBDIR=BUILD-CLANG \ - --env CMAKE_OPTIONS_EXTRA="${CMAKE_OPTIONS_EXTRA}" \ - --env PARALLEL=${PARALLEL} \ - --env THIRD_PARTY_CACHE_DIR=${THIRD_PARTY_CACHE_DIR} \ - --env CMD_TO_RUN=install_clang \ - --env RUN_UID="$(shell id -u)" \ - --env RUN_CMD="${DOCKUSER_HOME}/ngraph-test/contrib/docker/build-ngraph-and-test.sh" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Interactive shell - -# Always use the platform-specific docker command for the interactive shell -shell: build_docker_image - # "make shell" runs an interactive shell in the docker image, for debugging - @echo "${DOCKER_CMD_MESG}" - ${DOCKER_CMD} run --rm --tty --interactive \ - ${VOLUME} \ - ${DOCKER_RUN_ENV} \ - --env RUN_UID="$(shell id -u)" \ - "build_ngraph_${OS}:${DBUILD_VERSION}" \ - sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}" - -# Clean - -clean: - rm -f "${DIR}"/contrib/docker/.build-*/Dockerfile.* || echo "keep going if files are not present" - rmdir "${DIR}"/contrib/docker/.build-* || echo "keep going if directory is not present" - rm -fr "${DIR}"/BUILD-GCC - rm -fr "${DIR}"/BUILD-CLANG - rm -fr "${DIR}"/BUILD-DOCS diff --git a/ngraph/contrib/docker/README.md b/ngraph/contrib/docker/README.md deleted file mode 100644 index ac331e45b71..00000000000 --- a/ngraph/contrib/docker/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# Docker Builds for ngraph with a _Reference-OS_ - -## Introduction - -This directory contains a basic build system for creating docker images of the _reference-OS_ on which ngraph builds and unit tests are run. The purpose is to provide reference builds for _Continuous Integration_ used in developing and testing ngraph. - -The `Makefile` provides targets for: - -* Building the _reference-OS_ into a docker image -* Building ngraph and running unit tests in this cloned repo, mounted into the docker image of the _reference-OS_ -* Starting an interactive shell in the _reference-OS_ docker image, with the cloned repo available for manual builds and unit testing - -The _make_ targets are designed to handle all aspects of building the _reference-OS_ docker image, running ngraph builds and unit testing in it, and opening up a session in the docker image for interactive use. You should not need to issue any manual commands (unless you want to). In addition the `Dockerfile.ngraph.*` files provide a description of how each _reference-OS_ environment is built, should you want to build your own server or docker image. - -## Prerequisites - -In order to use the _make_ targets, you will need to do the following: - -* Have *docker* installed on your computer with the docker daemon running. -* For GPU support, also install *nvidia-docker* and start the nvidia-docker daemon. -* These scripts assume that you are able to run the `docker` command without using `sudo`. You will need to add your account to the `docker` group so this is possible. -* If your computer (running docker) sits behind a firewall, you will need to have the docker daemon properly configured to use proxies to get through the firewall, so that public docker registries and git repositories can be accessed. -* You should _not_ run `make check_*` targets from a directory in an NFS filesystem, if that NFS filesystem uses _root squash_ (see **Notes** section below). Instead, run `make check_*` targets from a cloned repo in a local filesystem. - -## Make Targets - -The _make_ targets are designed to provide easy commands to run actions using the docker image. All _make_ targets should be issued on the host OS, and _not_ in a docker image. - -GPU support will automatically be included for _make_ targets if the path of the `nvidia-smi` command is returned in response to `which nvidia-smi` on the host OS. - -Most _make_ targets are structured in the form `_`. The `` indicates what you want to do (e.g. build, check, install), while the `` indicates what you want to build with (i.e. gcc or clang). - -* In general, you simply need to run the command **`make check_all`**. This first makes the `build_docker_ngraph` target as a dependency. Then it makes the `build_*` and `check_*` targets, which will build ngraph using _cmake_ and _make_ and then run unit testing. Please keep in mind that `make check_*` targets do not work when your working directory is in an NFS filesystem that uses _root squash_ (see **Notes** section below). - -* Two builds types are supported: building with `gcc` and `clang`. Targets are named `*_gcc` and `*_clang` when they refer to building with a specific compiler, and the `*_all` targets are available to build with both compilers. Output directories are BUILD-GCC and BUILD-CLANG, at the top level. - -* You can also run the command **`make shell`** to start an interactive bash shell inside the docker image. While this is not required for normal builds and unit testing, it allows you to run interactively within the docker image with the cloned repo mounted. Again, `build_docker_ngraph` is made first as a dependency. Please keep in mind that `make shell` does not work when your working directory is in an NFS filesystem that uses _root squash_ (see **Notes** section below). - -* Running the command **`make build_docker_ngraph`** is also available, if you simply want to build the docker image. This target does work properly when your working directory is in an NFS filesystem. - -* Finally, **`make clean`** is available to clean up the BUILD-* and docker build directories. - -Note that all operations performed inside the docker image are run as a regular user, using the `run-as-user.sh` script. This is done to avoid writing root-owned files in mounted filesystems. - -## Examples/Hints - -* To build an Ubuntu 16.04 docker container, compile with gcc 5.4, and run unit tests: - -``` -cd contrib/docker -make check_gcc -``` - -* To build an Ubuntu 16.04 docker container, compile with clang 3.9, and run unit tests: - -``` -cd contrib/docker -make check_clang -``` - -* To build a CentOS 7.4 docker container, compile with gcc 4.8.5, and run unit tests: - -``` -cd contrib/docker -make check_gcc OS=centos74 -``` - -## Helper Scripts - -These helper scripts are included for use in the `Makefile` and automated (Jenkins) jobs. **These scripts should _not_ be called directly unless you understand what they do.** - -#### `build-ngraph-docs.sh` - -A helper script to simplify implentation of the make_docs target using docker images. - -#### `build-ngraph-and-test.sh` - -A helper script to simplify implementation of make targets with multiple reference OS environments with different compilers using docker images. - -#### `docker_cleanup.sh` - -A helper script for Jenkins jobs to clean up old exited docker containers and `ngraph_*` docker images. - -#### `make-dimage.sh` - -A helper script to simplify building of docker images for multiple reference OS environments. - -#### `run_as_user.sh` - -A helper script to run as a normal user within the docker container. This is done to avoid writing root-owned files in mounted filesystems. - -#### `run_as_ubuntu_user.sh` - -Same as `run_as_user.sh`, specifically called for _make_ targets with Ubuntu 16.04 docker containers. - -#### `run_as_centos_user.sh` - -A helper script to run as a normal user within a CentOS 7.4 docker container. - -## Notes - -* The top-level `Makefile` in this cloned repo can be used to build and unit-test ngraph _outside_ of docker. This directory is only for building and running unit tests for ngraph in the _reference-OS_ docker image. - -* Due to limitations in how docker mounts work, `make check_*` and `make shell` targets will fail if you try to run them while in a working directory that is in an NFS-mount that has _root squash_ enabled. The cause results from the process in the docker container running as root. When a file or directory is created by root in the mounted directory tree, from within the docker image, the NFS-mount (in the host OS) does not allow a root-created file, leading to a permissions error. This is dependent on whether the host OS performs "root squash" when mounting NFS filesystems. The fix to this is easy: run `make check_*` and `make shell` from a local filesystem. - -* The _make_ targets have been tested with the following docker containers on an Ubuntu 16.04 host OS with docker installed and the docker daemon properly configured. Some adjustments may be needed to run these on other OSes. - -#### Ubuntu 16.04 (default) - -``` -Dockerfile: Dockerfile.ngraph.ubuntu1604_gpu -Reference-OS: Ubuntu 16.04 -GPU Support: Yes -BUILD-GCC: gcc 5.4 -BUILD-CLANG: clang 3.9 -pre-built LLVM -``` -``` -Dockerfile: Dockerfile.ngraph.ubuntu1604 -Reference-OS: Ubuntu 16.04 -GPU Support: No -BUILD-GCC: gcc 5.4 -BUILD-CLANG: clang 3.9 -pre-built LLVM -``` - -#### CentOS 7.4 - -``` -Dockerfile: Dockerfile.ngraph.centos74_gpu -Reference-OS: Centos 7.4.1708 -GPU Support: Yes -BUILD-GCC: gcc 4.8 -BUILD-CLANG: not supported -pre-built cmake3 -LLVM built from source -``` -``` -Dockerfile: Dockerfile.ngraph.centos74 -Reference-OS: Centos 7.4.1708 -GPU Support: No -BUILD-GCC: gcc 4.8 -BUILD-CLANG: not supported -pre-built cmake3 -LLVM built from source -``` diff --git a/ngraph/contrib/docker/build-ngraph-and-test.sh b/ngraph/contrib/docker/build-ngraph-and-test.sh deleted file mode 100644 index 7ed0fc121c3..00000000000 --- a/ngraph/contrib/docker/build-ngraph-and-test.sh +++ /dev/null @@ -1,132 +0,0 @@ -#! /bin/bash - -set -e -# set -u # Cannot use set -u, as activate below relies on unbound variables -set -o pipefail - -# Debugging to verify builds on CentOS 7.4 and Ubuntu 16.04 -if [ -f "/etc/centos-release" ]; then - cat /etc/centos-release -fi - -if [ -f "/etc/lsb-release" ]; then - cat /etc/lsb-release -fi - -uname -a -cat /etc/os-release || true - -echo ' ' -echo 'Contents of /home:' -ls -la /home -echo ' ' -echo 'Contents of /home/dockuser:' -ls -la /home/dockuser -echo ' ' - -if [ -z ${CMAKE_OPTIONS_EXTRA} ]; then - export CMAKE_OPTIONS_EXTRA='' -fi - -# setting for make -j -if [ -z ${PARALLEL} ] ; then - PARALLEL=22 -fi - -# make command to execute -if [ -z ${CMD_TO_RUN} ] ; then - CMD_TO_RUN='check_gcc' -fi - -# directory name to use for the build -if [ -z ${BUILD_SUBDIR} ] ; then - BUILD_SUBDIR=BUILD -fi - -# Set up the environment -export NGRAPH_REPO=/home/dockuser/ngraph-test - -if [ -z ${OUTPUT_DIR} ]; then - OUTPUT_DIR="${NGRAPH_REPO}/${BUILD_SUBDIR}" -fi - -# Remove old OUTPUT_DIR directory if present for build_* targets -if [ "$(echo ${CMD_TO_RUN} | grep build | wc -l)" != "0" ] ; then - ( test -d ${OUTPUT_DIR} && rm -fr ${OUTPUT_DIR} && echo "Removed old ${OUTPUT_DIR} directory" ) || echo "Previous ${OUTPUT_DIR} directory not found" - # Make OUTPUT_DIR directory as user - mkdir -p ${OUTPUT_DIR} - chmod ug+rwx ${OUTPUT_DIR} -fi - -GCC_VERSION=` gcc --version | grep gcc | cut -f 2 -d ')' | cut -f 2 -d ' ' | cut -f 1,2 -d '.'` - -# Print the environment, for debugging -echo ' ' -echo 'Environment:' -export -echo ' ' - -cd $NGRAPH_REPO - -export CMAKE_OPTIONS_COMMON="-DNGRAPH_BUILD_DOXYGEN_DOCS=ON -DNGRAPH_BUILD_SPHINX_DOCS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo ${CMAKE_OPTIONS_EXTRA}" -export CMAKE_OPTIONS_GCC="${CMAKE_OPTIONS_COMMON} -DCMAKE_INSTALL_PREFIX=${NGRAPH_REPO}/BUILD-GCC/ngraph_dist" -export CMAKE_OPTIONS_CLANG="$CMAKE_OPTIONS_COMMON -DCMAKE_INSTALL_PREFIX=${NGRAPH_REPO}/BUILD-CLANG/ngraph_dist -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 -DNGRAPH_WARNINGS_AS_ERRORS=ON - -echo "CMD_TO_RUN=${CMD_TO_RUN}" - -# set up the cmake environment -if [ -z ${CMAKE_OPTIONS} ] ; then - if [ "$(echo ${CMD_TO_RUN} | grep gcc | wc -l)" != "0" ] ; then - export CMAKE_OPTIONS=${CMAKE_OPTIONS_GCC} - elif [ "$(echo ${CMD_TO_RUN} | grep clang | wc -l)" != "0" ] ; then - export CMAKE_OPTIONS=${CMAKE_OPTIONS_CLANG} - else - export CMAKE_OPTIONS=${CMAKE_OPTIONS_COMMON} - fi - - echo "set CMAKE_OPTIONS=${CMAKE_OPTIONS}" -fi - -# build and test -export BUILD_DIR="${NGRAPH_REPO}/${BUILD_SUBDIR}" -export GTEST_OUTPUT="xml:${BUILD_DIR}/unit-test-results.xml" -mkdir -p ${BUILD_DIR} -chmod ug+rwx ${BUILD_DIR} -cd ${BUILD_DIR} - -echo "Build and test for ${CMD_TO_RUN} in `pwd` with specific parameters:" -echo " NGRAPH_REPO=${NGRAPH_REPO}" -echo " CMAKE_OPTIONS=${CMAKE_OPTIONS}" -echo " GTEST_OUTPUT=${GTEST_OUTPUT}" - -# only run cmake/make steps for build_* make targets -if [ "$(echo ${CMD_TO_RUN} | grep build | wc -l)" != "0" ] ; then - # always run cmake/make steps - echo "Running cmake" - cmake ${CMAKE_OPTIONS} .. 2>&1 | tee ${OUTPUT_DIR}/cmake_${CMD_TO_RUN}.log - echo "Running make" - env VERBOSE=1 make -j ${PARALLEL} 2>&1 | tee ${OUTPUT_DIR}/make_${CMD_TO_RUN}.log - echo "CMD_TO_RUN=${CMD_TO_RUN} finished - cmake/make steps completed" -else - # strip off _* from CMD_TO_RUN to pass to the ngraph make targets - MAKE_CMD_TO_RUN=`echo ${CMD_TO_RUN} | sed 's/_.*//g'` - COMPILER=`echo ${CMD_TO_RUN} | sed 's/.*_//g'` - - if [ "${MAKE_CMD_TO_RUN}" == "unit-test-check" ]; then - # check style before running unit tests - if [ -f "/usr/bin/clang-3.9" ]; then - echo "Running make style-check" - env VERBOSE=1 make -j style-check 2>&1 | tee ${OUTPUT_DIR}/make_style_check_${CMD_TO_RUN}.log - fi - fi - - echo "Running make ${MAKE_CMD_TO_RUN}" - env VERBOSE=1 make ${MAKE_CMD_TO_RUN} 2>&1 | tee ${OUTPUT_DIR}/make_${CMD_TO_RUN}.log - - if [ "${MAKE_CMD_TO_RUN}" == "install" ] ; then - echo "Building ngraph_dist_${COMPILER}.tgz" - tar czf ngraph_dist_${COMPILER}.tgz ngraph_dist 2>&1 | tee make_tarball_${COMPILER}.log - ls -l ngraph_dist_*.tgz - fi -fi - diff --git a/ngraph/contrib/docker/build-ngraph-docs.sh b/ngraph/contrib/docker/build-ngraph-docs.sh deleted file mode 100644 index 4382362ad2d..00000000000 --- a/ngraph/contrib/docker/build-ngraph-docs.sh +++ /dev/null @@ -1,61 +0,0 @@ -#! /bin/bash - -#set -x -set -e -set -o pipefail - - -# Debugging -if [ -f "/etc/centos-release" ]; then - cat /etc/centos-release -fi - -if [ -f "/etc/lsb-release" ]; then - cat /etc/lsb-release -fi - -uname -a -cat /etc/os-release || true - -echo ' ' -echo 'Contents of /home:' -ls -la /home -echo ' ' -echo 'Contents of /home/dockuser:' -ls -la /home/dockuser -echo ' ' - -if [ -z ${CMD_TO_RUN} ] ; then - CMD_TO_RUN="make html" -fi - -export NGRAPH_REPO=/home/dockuser/ngraph-test - -if [ -z ${BUILD_SUBDIR} ] ; then - BUILD_DIR="${NGRAPH_REPO}/doc/sphinx" -else - BUILD_DIR="${NGRAPH_REPO}/${BUILD_SUBDIR}" -fi - -if [ -z ${OUTPUT_DIR} ]; then - OUTPUT_DIR="${NGRAPH_REPO}/BUILD-DOCS" -fi - -# Print the environment, for debugging -echo ' ' -echo 'Environment:' -export -echo ' ' - -# Remove old OUTPUT_DIR directory if present -( test -d ${OUTPUT_DIR} && rm -fr ${OUTPUT_DIR} && echo "Removed old ${OUTPUT_DIR} directory" ) || echo "Previous ${OUTPUT_DIR} directory not found" -# Make OUTPUT_DIR directory as user -mkdir -p ${OUTPUT_DIR} -chmod ug+rwx ${OUTPUT_DIR} - -# build html docs -cd ${BUILD_DIR} -echo "Building docs in `pwd`:" -env VERBOSE=1 ${CMD_TO_RUN} 2>&1 | tee ${OUTPUT_DIR}/make_docs.log -ls -l build/html/* || true -mv build/ ${OUTPUT_DIR} diff --git a/ngraph/contrib/docker/docker_cleanup.sh b/ngraph/contrib/docker/docker_cleanup.sh deleted file mode 100644 index 866f6fb8b62..00000000000 --- a/ngraph/contrib/docker/docker_cleanup.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# list active docker containers -echo "Active docker containers..." -docker ps -a -echo - -# clean up old docker containers -echo "Removing Exited docker containers..." -docker ps -a | grep Exited | cut -f 1 -d ' ' | xargs docker rm -f "${1}" -echo - -#list docker images for ngraph -echo "Docker images for ngraph..." -docker images ngraph_* -echo - -# clean up docker images no longer in use -echo "Removing docker images for ngraph..." -docker images -qa ngraph_* | xargs docker rmi -f "${1}" diff --git a/ngraph/contrib/docker/make-dimage.sh b/ngraph/contrib/docker/make-dimage.sh deleted file mode 100644 index 28b1a0fd388..00000000000 --- a/ngraph/contrib/docker/make-dimage.sh +++ /dev/null @@ -1,85 +0,0 @@ -#! /bin/bash - -### -# -# Create a docker image that includes dependencies for building ngraph -# -# Uses CONTEXTDIR as the docker build context directory -# Default value is '.' -# -# Uses ./Dockerfile.${DOCKER_TAG} -# DOCKER_TAG is set to 'ngraph' if not set -# -# Sets the docker image name as ${DOCKER_IMAGE_NAME} -# DOCKER_IMAGE_NAME is set to the ${DOCKER_TAG} if not set in the environment -# The datestamp tag is automatically appended to the DOCKER_IMAGE_NAME to create the DIMAGE_ID -# The ${DIMAGE_ID} docker image is created on the local server -# The ${DOCKER_IMAGE_NAME}:latest tag is also created by default for reference -# -### - -set -e -#set -u -set -o pipefail - -if [ -z $DOCKER_TAG ]; then - DOCKER_TAG=build_ngraph -fi - -if [ -z $DOCKER_IMAGE_NAME ]; then - DOCKER_IMAGE_NAME=${DOCKER_TAG} -fi - -echo "CONTEXTDIR=${CONTEXTDIR}" - -if [ -z ${CONTEXTDIR} ]; then - CONTEXTDIR='.' # Docker image build context -fi - -echo "CONTEXTDIR=${CONTEXTDIR}" - -if [ -n $DFILE ]; then - DFILE="${CONTEXTDIR}/Dockerfile.${DOCKER_TAG}" -fi - -CONTEXT='.' - -DIMAGE_NAME="${DOCKER_IMAGE_NAME}" -DIMAGE_VERSION=`date -Iseconds | sed -e 's/:/-/g'` - -DIMAGE_ID="${DIMAGE_NAME}:${DIMAGE_VERSION}" - -# If proxy settings are detected in the environment, make sure they are -# included on the docker-build command-line. This mirrors a similar system -# in the Makefile. - -if [ ! -z "${http_proxy}" ] ; then - DOCKER_HTTP_PROXY="--build-arg http_proxy=${http_proxy}" -else - DOCKER_HTTP_PROXY=' ' -fi - -if [ ! -z "${https_proxy}" ] ; then - DOCKER_HTTPS_PROXY="--build-arg https_proxy=${https_proxy}" -else - DOCKER_HTTPS_PROXY=' ' -fi - -cd ${CONTEXTDIR} - -echo ' ' -echo "Building docker image ${DIMAGE_ID} from Dockerfile ${DFILE}, context ${CONTEXT}" -echo ' ' - -# build the docker base image -docker build --rm=true \ - ${DOCKER_HTTP_PROXY} ${DOCKER_HTTPS_PROXY} \ - -f="${DFILE}" \ - -t="${DIMAGE_ID}" \ - ${CONTEXT} - -docker tag "${DIMAGE_ID}" "${DIMAGE_NAME}:latest" - -echo ' ' -echo 'Docker image build completed' -echo ' ' diff --git a/ngraph/contrib/docker/run_as_centos_user.sh b/ngraph/contrib/docker/run_as_centos_user.sh deleted file mode 100644 index 72de555e42c..00000000000 --- a/ngraph/contrib/docker/run_as_centos_user.sh +++ /dev/null @@ -1,99 +0,0 @@ -#! /bin/bash - -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# ****************************************************************************** -# This script is designed to simulate running as a user with a particular UID -# within a docker container. -# -# Normally a docker container runs as root, which can cause problems with file -# ownership when a host directory tree is mounted into the docker container. -# There are other problems with building and running software as root as -# well. Good practice when validating software builds in a docker container -# is to run as a normal user, since many (most?) end users will not be building -# and installing software as root. -# -# This script should be run using "docker run", with RUN_UID (set to the user -# you want to run as) passed into the docker container as an environment -# variable. The script will then add the UID as user "dockuser" to -# /etc/passwd (important for some software, like bazel), add the new dockuser -# to the sudo group (whether or not sudo is installed), and su to a new shell -# as the dockuser (passing in the existing environment, which is important). -# -# If the environment variable RUN_CMD is passed into the docker container, then -# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is -# not defined, then /bin/bash will run, which effectively provides an -# interactive shell in the docker container, for debugging. -# ****************************************************************************** - -set -e # Make sure we exit on any command that returns non-zero -set -u # No unset variables - -if [ -z "$RUN_UID" ] ; then - - # >&2 redirects echo output to stderr. - # See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr - ( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' ) - ( >&2 echo ' Running as default user (root, in docker)' ) - ( >&2 echo ' ' ) - - exit 1 - -else - - # The username used in the docker container to map the caller UID to - # - # Note 'dockuser' is used in other scripts, notably Makefile. If you - # choose to change it here, then you need to change it in all other - # scripts, or else the builds will break. - # - DOCK_USER='dockuser' - - # We will be su'ing using a non-login shell or command, and preserving - # the environment. This is done so that env. variables passed in with - # "docker run --env ..." are honored. - # Therefore, we need to reset at least HOME=/root ... - # - # Note also that /home/dockuser is used in other scripts, notably - # Makefile. If you choose to change it here, then you need to change it - # in all other scripts, or else the builds will break. - # - export HOME="/home/${DOCK_USER}" - - # Make sure the home directory is owned by the new user - if [ -d "${HOME}" ] ; then - chown "${RUN_UID}" "${HOME}" - fi - - # Add a user with UID of person running docker (in ${RUN_UID}) - # If $HOME does not yet exist, then it will be created - adduser -c 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}" - passwd -d "${DOCK_USER}" - # Add dockuser to the sudo group. Sudo *is* used for installing packages, - # so make sure dockuser can run sudo without requesting a password. - usermod -aG wheel "${DOCK_USER}" - echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - - if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser - echo 'Running interactive shell (/bin/bash) as dockuser' - su -m "${DOCK_USER}" -c "/bin/bash" - else # Run command as dockuser - echo "Running command [${RUN_CMD}] as dockuser" - su -m "${DOCK_USER}" -c "${RUN_CMD}" - fi - -fi diff --git a/ngraph/contrib/docker/run_as_ubuntu_user.sh b/ngraph/contrib/docker/run_as_ubuntu_user.sh deleted file mode 100644 index 08813e43336..00000000000 --- a/ngraph/contrib/docker/run_as_ubuntu_user.sh +++ /dev/null @@ -1,98 +0,0 @@ -#! /bin/bash - -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# ****************************************************************************** -# This script is designed to simulate running as a user with a particular UID -# within a docker container. -# -# Normally a docker container runs as root, which can cause problems with file -# ownership when a host directory tree is mounted into the docker container. -# There are other problems with building and running software as root as -# well. Good practice when validating software builds in a docker container -# is to run as a normal user, since many (most?) end users will not be building -# and installing software as root. -# -# This script should be run using "docker run", with RUN_UID (set to the user -# you want to run as) passed into the docker container as an environment -# variable. The script will then add the UID as user "dockuser" to -# /etc/passwd (important for some software, like bazel), add the new dockuser -# to the sudo group (whether or not sudo is installed), and su to a new shell -# as the dockuser (passing in the existing environment, which is important). -# -# If the environment variable RUN_CMD is passed into the docker container, then -# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is -# not defined, then /bin/bash will run, which effectively provides an -# interactive shell in the docker container, for debugging. -# ****************************************************************************** - -set -e # Make sure we exit on any command that returns non-zero -set -u # No unset variables - -if [ -z "$RUN_UID" ] ; then - - # >&2 redirects echo output to stderr. - # See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr - ( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' ) - ( >&2 echo ' Running as default user (root, in docker)' ) - ( >&2 echo ' ' ) - - exit 1 - -else - - # The username used in the docker container to map the caller UID to - # - # Note 'dockuser' is used in other scripts, notably Makefile. If you - # choose to change it here, then you need to change it in all other - # scripts, or else the builds will break. - # - DOCK_USER='dockuser' - - # We will be su'ing using a non-login shell or command, and preserving - # the environment. This is done so that env. variables passed in with - # "docker run --env ..." are honored. - # Therefore, we need to reset at least HOME=/root ... - # - # Note also that /home/dockuser is used in other scripts, notably - # Makefile. If you choose to change it here, then you need to change it - # in all other scripts, or else the builds will break. - # - export HOME="/home/${DOCK_USER}" - - # Make sure the home directory is owned by the new user - if [ -d "${HOME}" ] ; then - chown "${RUN_UID}" "${HOME}" - fi - - # Add a user with UID of person running docker (in ${RUN_UID}) - # If $HOME does not yet exist, then it will be created - adduser --disabled-password --gecos 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}" - # Add dockuser to the sudo group - adduser "${DOCK_USER}" sudo - - # If root access is needed in the docker image while running as a normal - # user, uncomment this and add 'sudo' as a package installed in Dockerfile - # echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - - if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser - su -m "${DOCK_USER}" -c /bin/bash - else # Run command as dockuser - su -m "${DOCK_USER}" -c "${RUN_CMD}" - fi - -fi diff --git a/ngraph/contrib/docker/run_as_user.sh b/ngraph/contrib/docker/run_as_user.sh deleted file mode 100644 index 08813e43336..00000000000 --- a/ngraph/contrib/docker/run_as_user.sh +++ /dev/null @@ -1,98 +0,0 @@ -#! /bin/bash - -# ****************************************************************************** -# Copyright 2017-2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -# ****************************************************************************** -# This script is designed to simulate running as a user with a particular UID -# within a docker container. -# -# Normally a docker container runs as root, which can cause problems with file -# ownership when a host directory tree is mounted into the docker container. -# There are other problems with building and running software as root as -# well. Good practice when validating software builds in a docker container -# is to run as a normal user, since many (most?) end users will not be building -# and installing software as root. -# -# This script should be run using "docker run", with RUN_UID (set to the user -# you want to run as) passed into the docker container as an environment -# variable. The script will then add the UID as user "dockuser" to -# /etc/passwd (important for some software, like bazel), add the new dockuser -# to the sudo group (whether or not sudo is installed), and su to a new shell -# as the dockuser (passing in the existing environment, which is important). -# -# If the environment variable RUN_CMD is passed into the docker container, then -# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is -# not defined, then /bin/bash will run, which effectively provides an -# interactive shell in the docker container, for debugging. -# ****************************************************************************** - -set -e # Make sure we exit on any command that returns non-zero -set -u # No unset variables - -if [ -z "$RUN_UID" ] ; then - - # >&2 redirects echo output to stderr. - # See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr - ( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' ) - ( >&2 echo ' Running as default user (root, in docker)' ) - ( >&2 echo ' ' ) - - exit 1 - -else - - # The username used in the docker container to map the caller UID to - # - # Note 'dockuser' is used in other scripts, notably Makefile. If you - # choose to change it here, then you need to change it in all other - # scripts, or else the builds will break. - # - DOCK_USER='dockuser' - - # We will be su'ing using a non-login shell or command, and preserving - # the environment. This is done so that env. variables passed in with - # "docker run --env ..." are honored. - # Therefore, we need to reset at least HOME=/root ... - # - # Note also that /home/dockuser is used in other scripts, notably - # Makefile. If you choose to change it here, then you need to change it - # in all other scripts, or else the builds will break. - # - export HOME="/home/${DOCK_USER}" - - # Make sure the home directory is owned by the new user - if [ -d "${HOME}" ] ; then - chown "${RUN_UID}" "${HOME}" - fi - - # Add a user with UID of person running docker (in ${RUN_UID}) - # If $HOME does not yet exist, then it will be created - adduser --disabled-password --gecos 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}" - # Add dockuser to the sudo group - adduser "${DOCK_USER}" sudo - - # If root access is needed in the docker image while running as a normal - # user, uncomment this and add 'sudo' as a package installed in Dockerfile - # echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - - if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser - su -m "${DOCK_USER}" -c /bin/bash - else # Run command as dockuser - su -m "${DOCK_USER}" -c "${RUN_CMD}" - fi - -fi