diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 57fca7f44cb..082482fcb74 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -4,8 +4,6 @@ Read before posting: - Checkout FAQ: https://community.grafana.com/c/howto/faq - Checkout How to troubleshoot metric query issues: https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50 -Please prefix your title with [Bug] or [Feature request]. - Please include this information: - What Grafana version are you using? - What datasource are you using? diff --git a/.gitignore b/.gitignore index ddabf2b680c..d957993f487 100644 --- a/.gitignore +++ b/.gitignore @@ -38,12 +38,14 @@ public/css/*.min.css conf/custom.ini fig.yml docker-compose.yml +docker-compose.yaml profile.cov /grafana .notouch /pkg/cmd/grafana-cli/grafana-cli /pkg/cmd/grafana-server/grafana-server /pkg/cmd/grafana-server/debug +debug.test /examples/*/dist /packaging/**/*.rpm /packaging/**/*.deb diff --git a/CHANGELOG.md b/CHANGELOG.md index c0833f60e37..ec9a9293b23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,19 @@ ## New Features * **Data Source Proxy**: Add support for whitelisting specified cookies that will be passed through to the data source when proxying data source requests [#5457](https://github.com/grafana/grafana/issues/5457), thanks [@robingustafsson](https://github.com/robingustafsson) +* **Postgres/MySQL**: add __timeGroup macro for mysql [#9596](https://github.com/grafana/grafana/pull/9596), thanks [@svenklemm](https://github.com/svenklemm) +* **Text**: Text panel are now edited in the ace editor. [#9698](https://github.com/grafana/grafana/pull/9698), thx [@mtanda](https://github.com/mtanda) +* **Teams**: Add Microsoft Teams notifier as [#8523](https://github.com/grafana/grafana/issues/8523), thx [@anthu](https://github.com/anthu) +* **Datasources**: Its now possible to configure datasources with config files [#1789](https://github.com/grafana/grafana/issues/1789) +* **Graphite**: Query editor updated to support new query by tag features [#9230](https://github.com/grafana/grafana/issues/9230) +* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671) +## Minor +* **Alert panel**: Adds placeholder text when no alerts are within the time range [#9624](https://github.com/grafana/grafana/issues/9624), thx [@straend](https://github.com/straend) +* **Mysql**: MySQL enable MaxOpenCon and MaxIdleCon regards how constring is configured. [#9784](https://github.com/grafana/grafana/issues/9784), thx [@dfredell](https://github.com/dfredell) +* **Cloudwatch**: Fixes broken query inspector for cloudwatch [#9661](https://github.com/grafana/grafana/issues/9661), thx [@mtanda](https://github.com/mtanda) +* **Dashboard**: Make it possible to start dashboards from search and dashboard list panel [#1871](https://github.com/grafana/grafana/issues/1871) +* **Annotations**: Posting annotations now return the id of the annotation [#9798](https://github.com/grafana/grafana/issues/9798) ## Tech * **RabbitMq**: Remove support for publishing events to RabbitMQ [#9645](https://github.com/grafana/grafana/issues/9645) @@ -21,6 +33,31 @@ * **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu) * **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm) +# 4.6.3 (unreleased) + +## Fixes +* **Gzip**: Fixes bug gravatar images when gzip was enabled [#5952](https://github.com/grafana/grafana/issues/5952) + +# 4.6.2 (2017-11-16) + +## Important +* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if your using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777) + +## Fixes +* **Color picker**: Bug after using textbox input field to change/paste color string [#9769](https://github.com/grafana/grafana/issues/9769) +* **Cloudwatch**: Fix for cloudwatch templating query `ec2_instance_attribute` [#9667](https://github.com/grafana/grafana/issues/9667), thanks [@mtanda](https://github.com/mtanda) +* **Heatmap**: Fixed tooltip for "time series buckets" mode [#9332](https://github.com/grafana/grafana/issues/9332) +* **InfluxDB**: Fixed query editor issue when using `>` or `<` operators in WHERE clause [#9871](https://github.com/grafana/grafana/issues/9871) + + +# 4.6.1 (2017-11-01) + +* **Singlestat**: Lost thresholds when using save dashboard as [#9681](https://github.com/grafana/grafana/issues/9681) +* **Graph**: Fix for series override color picker [#9715](https://github.com/grafana/grafana/issues/9715) +* **Go**: build using golang 1.9.2 [#9713](https://github.com/grafana/grafana/issues/9713) +* **Plugins**: Fixed problem with loading plugin js files behind auth proxy [#9509](https://github.com/grafana/grafana/issues/9509) +* **Graphite**: Annotation tooltip should render empty string when undefined [#9707](https://github.com/grafana/grafana/issues/9707) + # 4.6.0 (2017-10-26) ## Fixes diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000000..277ed149d13 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,109 @@ +FROM phusion/baseimage:0.9.22 +MAINTAINER Denys Zhdanov + +RUN apt-get -y update \ + && apt-get -y upgrade \ + && apt-get -y install vim \ + nginx \ + python-dev \ + python-flup \ + python-pip \ + python-ldap \ + expect \ + git \ + memcached \ + sqlite3 \ + libffi-dev \ + libcairo2 \ + libcairo2-dev \ + python-cairo \ + python-rrdtool \ + pkg-config \ + nodejs \ + && rm -rf /var/lib/apt/lists/* + +# choose a timezone at build-time +# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build` +ARG CONTAINER_TIMEZONE +ENV DEBIAN_FRONTEND noninteractive + +RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \ + then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \ + dpkg-reconfigure -f noninteractive tzdata; \ + fi + +# fix python dependencies (LTS Django and newer memcached/txAMQP) +RUN pip install --upgrade pip && \ + pip install django==1.8.18 \ + python-memcached==1.53 \ + txAMQP==0.6.2 + +ARG version=1.0.2 +ARG whisper_version=${version} +ARG carbon_version=${version} +ARG graphite_version=${version} + +ARG statsd_version=v0.7.2 + +# install whisper +RUN git clone -b ${whisper_version} --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper +WORKDIR /usr/local/src/whisper +RUN python ./setup.py install + +# install carbon +RUN git clone -b ${carbon_version} --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon +WORKDIR /usr/local/src/carbon +RUN pip install -r requirements.txt \ + && python ./setup.py install + +# install graphite +RUN git clone -b ${graphite_version} --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web +WORKDIR /usr/local/src/graphite-web +RUN pip install -r requirements.txt \ + && python ./setup.py install +ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/ +ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py +# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py +WORKDIR /opt/graphite/webapp +RUN mkdir -p /var/log/graphite/ \ + && PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings + +# install statsd +RUN git clone -b ${statsd_version} https://github.com/etsy/statsd.git /opt/statsd +ADD conf/opt/statsd/config.js /opt/statsd/config.js + +# config nginx +RUN rm /etc/nginx/sites-enabled/default +ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf +ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf + +# init django admin +ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp +ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh +RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp + +# logging support +RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx +ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd + +# daemons +ADD conf/etc/service/carbon/run /etc/service/carbon/run +ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run +ADD conf/etc/service/graphite/run /etc/service/graphite/run +ADD conf/etc/service/statsd/run /etc/service/statsd/run +ADD conf/etc/service/nginx/run /etc/service/nginx/run + +# default conf setup +ADD conf /etc/graphite-statsd/conf +ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh + +# cleanup +RUN apt-get clean\ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# defaults +EXPOSE 80 2003-2004 2023-2024 8125/udp 8126 +VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"] +WORKDIR / +ENV HOME /root +CMD ["/sbin/my_init"] diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000000..c64b0d85c42 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2013-2016 Nathan Hopkins + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ROADMAP.md b/ROADMAP.md index 3ce0c33f088..4273d8df6a9 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,29 +1,36 @@ -# Roadmap (2017-08-29) +# Roadmap (2017-10-31) This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change. But it will give you an idea of our current vision and plan. ### Short term (1-4 months) - - Release Grafana v4.5 with fixes and minor enhancements - Release Grafana v5 - User groups - Dashboard folders - - Dashboard permissions (on folders as well), permissions on groups or users + - Dashboard & folder permissions (assigned to users or groups) - New Dashboard layout engine - New sidemenu & nav UX - Elasticsearch alerting + - React migration foundation (core components) + - Graphite 1.1 Tags Support -### Long term +### Long term (4 - 8 months) - Backend plugins to support more Auth options, Alerting data sources & notifications -- Universal time series transformations for any data source (meta queries) -- Reporting -- Web socket & live data streams -- Migrate to Angular2 or react +- Alerting improvements (silence, per series tracking, etc) +- Dashboard as configuration and other automation / provisioning improvements +- Progress on React migration +- Change visualization (panel type) on the fly. +- Multi stat panel (vertical version of singlestat with bars/graph mode with big number etc) +- Repeat panel by query results +### In a distant future far far away + +- Meta queries +- Integrated light weight TSDB +- Web socket & live data sources ### Outside contributions We know this is being worked on right now by contributors (and we hope to merge it when it's ready). -- Clustering for alert engine (load distribution) diff --git a/appveyor.yml b/appveyor.yml index 19de1d3a793..5d67edca9d9 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana environment: nodejs_version: "6" GOPATH: c:\gopath - GOVERSION: 1.9.1 + GOVERSION: 1.9.2 install: - rmdir c:\go /s /q diff --git a/circle.yml b/circle.yml index 8380bc2a2ff..4eb600bfde3 100644 --- a/circle.yml +++ b/circle.yml @@ -9,7 +9,7 @@ machine: GOPATH: "/home/ubuntu/.go_workspace" ORG_PATH: "github.com/grafana" REPO_PATH: "${ORG_PATH}/grafana" - GODIST: "go1.9.1.linux-amd64.tar.gz" + GODIST: "go1.9.2.linux-amd64.tar.gz" post: - mkdir -p ~/download - mkdir -p ~/docker diff --git a/codecov.yml b/codecov.yml index 3d764c1a5b1..82a86e0232b 100644 --- a/codecov.yml +++ b/codecov.yml @@ -7,5 +7,7 @@ coverage: project: yes patch: yes changes: no - -comment: false + +comment: + layout: "diff" + behavior: "once" diff --git a/conf/datasources/datasources.yaml b/conf/datasources/datasources.yaml new file mode 100644 index 00000000000..d8ddc9c6bed --- /dev/null +++ b/conf/datasources/datasources.yaml @@ -0,0 +1,48 @@ +# list of datasources that should be deleted from the database +delete_datasources: + # - name: Graphite + # org_id: 1 + +# list of datasources to insert/update depending +# whats available in the datbase +datasources: +# # name of the datasource. Required +# - name: Graphite +# # datasource type. Required +# type: graphite +# # access mode. direct or proxy. Required +# access: proxy +# # org id. will default to org_id 1 if not specified +# org_id: 1 +# # url +# url: http://localhost:8080 +# # database password, if used +# password: +# # database user, if used +# user: +# # database name, if used +# database: +# # enable/disable basic auth +# basic_auth: +# # basic auth username +# basic_auth_user: +# # basic auth password +# basic_auth_password: +# # enable/disable with credentials headers +# with_credentials: +# # mark as default datasource. Max one per org +# is_default: +# # fields that will be converted to json and stored in json_data +# json_data: +# graphiteVersion: "1.1" +# tlsAuth: true +# tlsAuthWithCACert: true +# # json object of data that will be encrypted. +# secure_json_data: +# tlsCACert: "..." +# tlsClientCert: "..." +# tlsClientKey: "..." +# version: 1 +# # allow users to edit datasources from the UI. +# editable: false + diff --git a/conf/defaults.ini b/conf/defaults.ini index 404a7950bf3..2c3064ecf22 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -12,17 +12,17 @@ instance_name = ${HOSTNAME} #################################### Paths ############################### [paths] # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) -# data = data -# + # Directory where grafana can store logs -# logs = data/log -# + # Directory where grafana will automatically scan and look for plugins -# plugins = data/plugins +# Config files containing datasources that will be configured at startup +datasources = conf/datasources + #################################### Server ############################## [server] # Protocol (http, https, socket) @@ -82,6 +82,9 @@ max_idle_conn = 2 # Max conn setting default is 0 (mean not set) max_open_conn = +# Set to true to log the sql calls and execution times. +log_queries = + # For "postgres", use either "disable", "require" or "verify-full" # For "mysql", use either "true", "false", or "skip-verify". ssl_mode = disable @@ -171,6 +174,7 @@ disable_gravatar = false # data source proxy whitelist (ip_or_domain:port separated by spaces) data_source_proxy_whitelist = +#################################### Snapshots ########################### [snapshots] # snapshot sharing options external_enabled = true @@ -183,7 +187,13 @@ snapshot_remove_expired = true # remove snapshots after 90 days snapshot_TTL_days = 90 -#################################### Users #################################### +#################################### Dashboards ################## + +[dashboards] +# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1 +versions_to_keep = 20 + +#################################### Users ############################### [users] # disable user signup / registration allow_sign_up = false @@ -429,7 +439,7 @@ enabled = true execute_alerts = true #################################### Internal Grafana Metrics ############ -# Metrics available at HTTP API Url /api/metrics +# Metrics available at HTTP API Url /metrics [metrics] enabled = true interval_seconds = 10 diff --git a/conf/sample.ini b/conf/sample.ini index c7d2d1d2695..85e74451fef 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -12,18 +12,17 @@ #################################### Paths #################################### [paths] # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) -# ;data = /var/lib/grafana -# + # Directory where grafana can store logs -# ;logs = /var/log/grafana -# + # Directory where grafana will automatically scan and look for plugins -# ;plugins = /var/lib/grafana/plugins -# +# Config files containing datasources that will be configured at startup +;datasources = conf/datasources + #################################### Server #################################### [server] # Protocol (http, https, socket) @@ -91,6 +90,8 @@ # Max conn setting default is 0 (mean not set) ;max_open_conn = +# Set to true to log the sql calls and execution times. +log_queries = #################################### Session #################################### [session] @@ -161,6 +162,7 @@ # data source proxy whitelist (ip_or_domain:port separated by spaces) ;data_source_proxy_whitelist = +#################################### Snapshots ########################### [snapshots] # snapshot sharing options ;external_enabled = true @@ -173,7 +175,12 @@ # remove snapshots after 90 days ;snapshot_TTL_days = 90 -#################################### Users #################################### +#################################### Dashboards History ################## +[dashboards] +# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1 +;versions_to_keep = 20 + +#################################### Users ############################### [users] # disable user signup / registration ;allow_sign_up = true @@ -373,7 +380,7 @@ ;execute_alerts = true #################################### Internal Grafana Metrics ########################## -# Metrics available at HTTP API Url /api/metrics +# Metrics available at HTTP API Url /metrics [metrics] # Disable / Enable internal metrics ;enabled = true diff --git a/docker/blocks/collectd/docker-compose.yaml b/docker/blocks/collectd/docker-compose.yaml new file mode 100644 index 00000000000..c95827f7928 --- /dev/null +++ b/docker/blocks/collectd/docker-compose.yaml @@ -0,0 +1,11 @@ + collectd: + build: blocks/collectd + environment: + HOST_NAME: myserver + GRAPHITE_HOST: graphite + GRAPHITE_PORT: 2003 + GRAPHITE_PREFIX: collectd. + REPORT_BY_CPU: 'false' + COLLECT_INTERVAL: 10 + links: + - graphite diff --git a/docker/blocks/collectd/fig b/docker/blocks/collectd/fig deleted file mode 100644 index 99f45a66d12..00000000000 --- a/docker/blocks/collectd/fig +++ /dev/null @@ -1,11 +0,0 @@ -collectd: - build: blocks/collectd - environment: - HOST_NAME: myserver - GRAPHITE_HOST: graphite - GRAPHITE_PORT: 2003 - GRAPHITE_PREFIX: collectd. - REPORT_BY_CPU: 'false' - COLLECT_INTERVAL: 10 - links: - - graphite diff --git a/docker/blocks/elastic/docker-compose.yaml b/docker/blocks/elastic/docker-compose.yaml new file mode 100644 index 00000000000..193b8f252f6 --- /dev/null +++ b/docker/blocks/elastic/docker-compose.yaml @@ -0,0 +1,8 @@ + elasticsearch: + image: elasticsearch:2.4.1 + command: elasticsearch -Des.network.host=0.0.0.0 + ports: + - "9200:9200" + - "9300:9300" + volumes: + - ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml diff --git a/docker/blocks/elastic/fig b/docker/blocks/elastic/fig deleted file mode 100644 index fa79a9af59c..00000000000 --- a/docker/blocks/elastic/fig +++ /dev/null @@ -1,8 +0,0 @@ -elasticsearch: - image: elasticsearch:2.4.1 - command: elasticsearch -Des.network.host=0.0.0.0 - ports: - - "9200:9200" - - "9300:9300" - volumes: - - ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml diff --git a/docker/blocks/elastic1/docker-compose.yaml b/docker/blocks/elastic1/docker-compose.yaml new file mode 100644 index 00000000000..518ae76e6ee --- /dev/null +++ b/docker/blocks/elastic1/docker-compose.yaml @@ -0,0 +1,8 @@ + elasticsearch1: + image: elasticsearch:1.7.6 + command: elasticsearch -Des.network.host=0.0.0.0 + ports: + - "11200:9200" + - "11300:9300" + volumes: + - ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml diff --git a/docker/blocks/elastic1/fig b/docker/blocks/elastic1/fig deleted file mode 100644 index c33e51f16a2..00000000000 --- a/docker/blocks/elastic1/fig +++ /dev/null @@ -1,8 +0,0 @@ -elasticsearch1: - image: elasticsearch:1.7.6 - command: elasticsearch -Des.network.host=0.0.0.0 - ports: - - "11200:9200" - - "11300:9300" - volumes: - - ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml diff --git a/docker/blocks/elastic5/docker-compose.yaml b/docker/blocks/elastic5/docker-compose.yaml new file mode 100644 index 00000000000..5b12be9ada4 --- /dev/null +++ b/docker/blocks/elastic5/docker-compose.yaml @@ -0,0 +1,8 @@ +# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine + + elasticsearch5: + image: elasticsearch:5 + command: elasticsearch + ports: + - "10200:9200" + - "10300:9300" diff --git a/docker/blocks/elastic5/fig b/docker/blocks/elastic5/fig deleted file mode 100644 index 6e5cd89ab3d..00000000000 --- a/docker/blocks/elastic5/fig +++ /dev/null @@ -1,8 +0,0 @@ -# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine - -elasticsearch5: - image: elasticsearch:5 - command: elasticsearch - ports: - - "10200:9200" - - "10300:9300" diff --git a/docker/blocks/graphite/docker-compose.yaml b/docker/blocks/graphite/docker-compose.yaml new file mode 100644 index 00000000000..2bd0dc322cc --- /dev/null +++ b/docker/blocks/graphite/docker-compose.yaml @@ -0,0 +1,16 @@ + graphite: + build: blocks/graphite + ports: + - "8080:80" + - "2003:2003" + volumes: + - /etc/localtime:/etc/localtime:ro + - /etc/timezone:/etc/timezone:ro + + fake-graphite-data: + image: grafana/fake-data-gen + network_mode: bridge + environment: + FD_DATASOURCE: graphite + FD_PORT: 2003 + diff --git a/docker/blocks/graphite/fig b/docker/blocks/graphite/fig deleted file mode 100644 index b7e030e388e..00000000000 --- a/docker/blocks/graphite/fig +++ /dev/null @@ -1,16 +0,0 @@ -graphite: - build: blocks/graphite - ports: - - "8080:80" - - "2003:2003" - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - -fake-graphite-data: - image: grafana/fake-data-gen - net: bridge - environment: - FD_DATASOURCE: graphite - FD_PORT: 2003 - diff --git a/docker/blocks/graphite1/Dockerfile b/docker/blocks/graphite1/Dockerfile index a3ab7c4f4af..7bf8b15fa84 100644 --- a/docker/blocks/graphite1/Dockerfile +++ b/docker/blocks/graphite1/Dockerfile @@ -1,9 +1,10 @@ FROM phusion/baseimage:0.9.22 MAINTAINER Denys Zhdanov + RUN apt-get -y update \ && apt-get -y upgrade \ - && apt-get -y --force-yes install vim \ + && apt-get -y install vim \ nginx \ python-dev \ python-flup \ @@ -22,38 +23,67 @@ RUN apt-get -y update \ nodejs \ && rm -rf /var/lib/apt/lists/* +# choose a timezone at build-time +# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build` +ARG CONTAINER_TIMEZONE +ENV DEBIAN_FRONTEND noninteractive + +RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \ + then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \ + dpkg-reconfigure -f noninteractive tzdata; \ + fi + # fix python dependencies (LTS Django and newer memcached/txAMQP) -RUN pip install django==1.8.18 \ +RUN pip install --upgrade pip && \ + pip install django==1.8.18 \ python-memcached==1.53 \ - txAMQP==0.6.2 \ - && pip install --upgrade pip + txAMQP==0.6.2 + +ARG version=1.0.2 +ARG whisper_version=${version} +ARG carbon_version=${version} +ARG graphite_version=${version} + +RUN echo "Building Version: $version" + +ARG whisper_repo=https://github.com/graphite-project/whisper.git +ARG carbon_repo=https://github.com/graphite-project/carbon.git +ARG graphite_repo=https://github.com/graphite-project/graphite-web.git + +ARG statsd_version=v0.8.0 + +ARG statsd_repo=https://github.com/etsy/statsd.git # install whisper -RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper +RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper WORKDIR /usr/local/src/whisper RUN python ./setup.py install # install carbon -RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon +RUN git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon WORKDIR /usr/local/src/carbon RUN pip install -r requirements.txt \ && python ./setup.py install # install graphite -RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web +RUN git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web WORKDIR /usr/local/src/graphite-web RUN pip install -r requirements.txt \ && python ./setup.py install + +# install statsd +RUN git clone -b ${statsd_version} ${statsd_repo} /opt/statsd + +# config graphite ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/ ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py -ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py +# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py WORKDIR /opt/graphite/webapp RUN mkdir -p /var/log/graphite/ \ && PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings -# install statsd -RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd -ADD conf/opt/statsd/config.js /opt/statsd/config.js +# config statsd +ADD conf/opt/statsd/config.js /opt/statsd/ # config nginx RUN rm /etc/nginx/sites-enabled/default @@ -63,8 +93,7 @@ ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/g # init django admin ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh -RUN chmod +x /usr/local/bin/manage.sh \ - && /usr/local/bin/django_admin_init.exp +RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp # logging support RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx @@ -86,8 +115,10 @@ RUN apt-get clean\ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # defaults -EXPOSE 80 2003-2004 2023-2024 8125/udp 8126 +EXPOSE 80 2003-2004 2023-2024 8125 8125/udp 8126 VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"] WORKDIR / ENV HOME /root +ENV STATSD_INTERFACE udp + CMD ["/sbin/my_init"] diff --git a/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh b/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh index edd642a9c1b..0937f78cd6a 100755 --- a/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh +++ b/docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh @@ -12,7 +12,7 @@ graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit) graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit) graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit) if [[ -z $graphite_dir_contents ]]; then - git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web + # git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web cd /usr/local/src/graphite-web && python ./setup.py install fi if [[ -z $graphite_storage_dir_contents ]]; then diff --git a/docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf b/docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf index eaf6a938f27..3e10dcec9cf 100644 --- a/docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf +++ b/docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf @@ -8,18 +8,18 @@ # Defaults to ../ # GRAPHITE_CONF_DIR - Configuration directory (where this file lives). # Defaults to $GRAPHITE_ROOT/conf/ -# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files. +# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files. # Defaults to $GRAPHITE_ROOT/storage/ # # To change other directory paths, add settings to this file. The following # configuration variables are available with these default values: # # STORAGE_DIR = $GRAPHITE_STORAGE_DIR -# LOCAL_DATA_DIR = STORAGE_DIR/whisper/ -# WHITELISTS_DIR = STORAGE_DIR/lists/ -# CONF_DIR = STORAGE_DIR/conf/ -# LOG_DIR = STORAGE_DIR/log/ -# PID_DIR = STORAGE_DIR/ +# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/ +# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/ +# CONF_DIR = %(STORAGE_DIR)s/conf/ +# LOG_DIR = %(STORAGE_DIR)s/log/ +# PID_DIR = %(STORAGE_DIR)s/ # # For FHS style directory structures, use: # @@ -30,20 +30,30 @@ # #LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ -# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate +# Specify the database library used to store metric data on disk. Each database +# may have configurable options to change the behaviour of how it writes to +# persistent storage. +# +# whisper - Fixed-size database, similar in design and purpose to RRD. This is +# the default storage backend for carbon and the most rigorously tested. +# +# ceres - Experimental alternative database that supports storing data in sparse +# files of arbitrary fixed-size resolutions. +DATABASE = whisper + +# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no +# longer exists (i.e. it is removed or renamed) ENABLE_LOGROTATION = True # Specify the user to drop privileges to -# If this is blank carbon runs as the user that invokes it +# If this is blank carbon-cache runs as the user that invokes it # This user must have write access to the local data directory USER = -# -# NOTE: The above settings must be set under [relay] and [aggregator] -# to take effect for those daemons as well # Limit the size of the cache to avoid swapping or becoming CPU bound. # Sorts and serving cache queries gets more expensive as the cache grows. # Use the value "inf" (infinity) for an unlimited cache size. +# value should be an integer number of metric datapoints. MAX_CACHE_SIZE = inf # Limits the number of whisper update_many() calls per second, which effectively @@ -60,14 +70,30 @@ MAX_UPDATES_PER_SECOND = 500 # MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000 # Softly limits the number of whisper files that get created each minute. -# Setting this value low (like at 50) is a good way to ensure your graphite +# Setting this value low (e.g. 50) is a good way to ensure that your carbon # system will not be adversely impacted when a bunch of new metrics are -# sent to it. The trade off is that it will take much longer for those metrics' -# database files to all get created and thus longer until the data becomes usable. -# Setting this value high (like "inf" for infinity) will cause graphite to create -# the files quickly but at the risk of slowing I/O down considerably for a while. +# sent to it. The trade off is that any metrics received in excess of this +# value will be silently dropped, and the whisper file will not be created +# until such point as a subsequent metric is received and fits within the +# defined rate limit. Setting this value high (like "inf" for infinity) will +# cause carbon to create the files quickly but at the risk of increased I/O. MAX_CREATES_PER_MINUTE = 50 +# Set the minimum timestamp resolution supported by this instance. This allows +# internal optimisations by overwriting points with equal truncated timestamps +# in order to limit the number of updates to the database. It defaults to one +# second. +MIN_TIMESTAMP_RESOLUTION = 1 + +# Set the minimum lag in seconds for a point to be written to the database +# in order to optimize batching. This means that each point will wait at least +# the duration of this lag before being written. Setting this to 0 disable the feature. +# This currently only works when using the timesorted write strategy. +# MIN_TIMESTAMP_LAG = 0 + +# Set the interface and port for the line (plain text) listener. Setting the +# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to +# disable this listener if it is not required. LINE_RECEIVER_INTERFACE = 0.0.0.0 LINE_RECEIVER_PORT = 2003 @@ -78,11 +104,23 @@ ENABLE_UDP_LISTENER = False UDP_RECEIVER_INTERFACE = 0.0.0.0 UDP_RECEIVER_PORT = 2003 +# Set the interface and port for the pickle listener. Setting the interface to +# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this +# listener if it is not required. PICKLE_RECEIVER_INTERFACE = 0.0.0.0 PICKLE_RECEIVER_PORT = 2004 -# Set to false to disable logging of successful connections -LOG_LISTENER_CONNECTIONS = True +# Set the interface and port for the protobuf listener. Setting the interface to +# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this +# listener if it is not required. +# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0 +# PROTOBUF_RECEIVER_PORT = 2005 + +# Limit the number of open connections the receiver can handle as any time. +# Default is no limit. Setting up a limit for sites handling high volume +# traffic may be recommended to avoid running out of TCP memory or having +# thousands of TCP connections reduce the throughput of the service. +#MAX_RECEIVER_CONNECTIONS = inf # Per security concerns outlined in Bug #817247 the pickle receiver # will use a more secure and slightly less efficient unpickler. @@ -98,13 +136,19 @@ CACHE_QUERY_PORT = 7002 # data until the cache size falls below 95% MAX_CACHE_SIZE. USE_FLOW_CONTROL = True -# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and -# degrade performance if logging on the same volume as the whisper data is stored. -LOG_UPDATES = False -LOG_CACHE_HITS = False -LOG_CACHE_QUEUE_SORTS = True +# If enabled this setting is used to timeout metric client connection if no +# metrics have been sent in specified time in seconds +#METRIC_CLIENT_IDLE_TIMEOUT = None -# The thread that writes metrics to disk can use on of the following strategies +# By default, carbon-cache will log every whisper update and cache hit. +# This can be excessive and degrade performance if logging on the same +# volume as the whisper data is stored. +LOG_UPDATES = False +LOG_CREATES = False +LOG_CACHE_HITS = False +LOG_CACHE_QUEUE_SORTS = False + +# The thread that writes metrics to disk can use one of the following strategies # determining the order in which metrics are removed from cache and flushed to # disk. The default option preserves the same behavior as has been historically # available in version 0.9.10. @@ -114,6 +158,12 @@ LOG_CACHE_QUEUE_SORTS = True # moment of the list's creation. Metrics will then be flushed from the cache to # disk in that order. # +# timesorted - All metrics in the list will be looked at and sorted according +# to the timestamp of there datapoints. The metric that were the least recently +# written will be written first. This is an hybrid strategy between max and +# sorted which is particularly adapted to sets of metrics with non-uniform +# resolutions. +# # max - The writer thread will always pop and flush the metric from cache # that has the most datapoints. This will give a strong flush preference to # frequently updated metrics and will also reduce random file-io. Infrequently @@ -152,12 +202,61 @@ WHISPER_FALLOCATE_CREATE = True # Enabling this option will cause Whisper to lock each Whisper file it writes # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when -# multiple carbon-cache daemons are writing to the same files +# multiple carbon-cache daemons are writing to the same files. # WHISPER_LOCK_WRITES = False +# On systems which has a large number of metrics, an amount of Whisper write(2)'s +# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal +# disk reads occur. Enabling this option makes it possible to decrease useless +# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option. +# WHISPER_FADVISE_RANDOM = False + +# By default all nodes stored in Ceres are cached in memory to improve the +# throughput of reads and writes to underlying slices. Turning this off will +# greatly reduce memory consumption for databases with millions of metrics, at +# the cost of a steep increase in disk i/o, approximately an extra two os.stat +# calls for every read and write. Reasons to do this are if the underlying +# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM). +# Valid values are: +# all - all nodes are cached +# none - node caching is disabled +# CERES_NODE_CACHING_BEHAVIOR = all + +# Ceres nodes can have many slices and caching the right ones can improve +# performance dramatically. Note that there are many trade-offs to tinkering +# with this, and unless you are a ceres developer you *really* should not +# mess with this. Valid values are: +# latest - only the most recent slice is cached +# all - all slices are cached +# none - slice caching is disabled +# CERES_SLICE_CACHING_BEHAVIOR = latest + +# If a Ceres node accumulates too many slices, performance can suffer. +# This can be caused by intermittently reported data. To mitigate +# slice fragmentation there is a tolerance for how much space can be +# wasted within a slice file to avoid creating a new one. That tolerance +# level is determined by MAX_SLICE_GAP, which is the number of consecutive +# null datapoints allowed in a slice file. +# If you set this very low, you will waste less of the *tiny* bit disk space +# that this feature wastes, and you will be prone to performance problems +# caused by slice fragmentation, which can be pretty severe. +# If you set this really high, you will waste a bit more disk space (each +# null datapoint wastes 8 bytes, but keep in mind your filesystem's block +# size). If you suffer slice fragmentation issues, you should increase this or +# run the ceres-maintenance defrag plugin more often. However you should not +# set it to be huge because then if a large but allowed gap occurs it has to +# get filled in, which means instead of a simple 8-byte write to a new file we +# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice. +# CERES_MAX_SLICE_GAP = 80 + +# Enabling this option will cause Ceres to lock each Ceres file it writes to +# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when +# multiple carbon-cache daemons are writing to the same files. +# CERES_LOCK_WRITES = False + # Set this to True to enable whitelisting and blacklisting of metrics in -# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or -# empty, all metrics will pass through +# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is +# missing or empty, all metrics will pass through # USE_WHITELIST = False # By default, carbon itself will log statistics (such as a count, @@ -203,16 +302,25 @@ WHISPER_FALLOCATE_CREATE = True # Example: store everything # BIND_PATTERNS = # +# URL of graphite-web instance, this is used to add incoming series to the tag database +GRAPHITE_URL = http://127.0.0.1:80 + +# Tag update interval, this specifies how frequently updates to existing series will trigger +# an update to the tag index, the default setting is once every 100 updates +# TAG_UPDATE_INTERVAL = 100 + # To configure special settings for the carbon-cache instance 'b', uncomment this: #[cache:b] #LINE_RECEIVER_PORT = 2103 #PICKLE_RECEIVER_PORT = 2104 #CACHE_QUERY_PORT = 7102 # and any other settings you want to customize, defaults are inherited -# from [carbon] section. +# from the [cache] section. # You can then specify the --instance=b option to manage this instance - - +# +# In order to turn off logging of successful connections for the line +# receiver, set this to False +# LOG_LISTENER_CONN_SUCCESS = True [relay] LINE_RECEIVER_INTERFACE = 0.0.0.0 @@ -220,9 +328,6 @@ LINE_RECEIVER_PORT = 2013 PICKLE_RECEIVER_INTERFACE = 0.0.0.0 PICKLE_RECEIVER_PORT = 2014 -# Set to false to disable logging of successful connections -LOG_LISTENER_CONNECTIONS = True - # Carbon-relay has several options for metric routing controlled by RELAY_METHOD # # Use relay-rules.conf to route metrics to destinations based on pattern rules @@ -237,12 +342,24 @@ LOG_LISTENER_CONNECTIONS = True # instance. # Enable this for carbon-relays that send to a group of carbon-aggregators #RELAY_METHOD = aggregated-consistent-hashing +# +# You can also use fast-hashing and fast-aggregated-hashing which are in O(1) +# and will always redirect the metrics to the same destination but do not try +# to minimize rebalancing when the list of destinations is changing. RELAY_METHOD = rules # If you use consistent-hashing you can add redundancy by replicating every # datapoint to more than one machine. REPLICATION_FACTOR = 1 +# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas +# across distributed hosts. With this setting disabled, it's possible that replicas +# may be sent to different caches on the same host. This has been the default +# behavior since introduction of 'consistent-hashing' relay method. +# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing +# your metrics across the cluster nodes using a tool like Carbonate. +#DIVERSE_REPLICAS = True + # This is a list of carbon daemons we will send any relayed or # generated metrics to. The default provided would send to a single # carbon-cache instance on the default port. However if you @@ -261,20 +378,71 @@ REPLICATION_FACTOR = 1 # must be defined in this list DESTINATIONS = 127.0.0.1:2004 -# This defines the maximum "message size" between carbon daemons. -# You shouldn't need to tune this unless you really know what you're doing. -MAX_DATAPOINTS_PER_MESSAGE = 500 +# This define the protocol to use to contact the destination. It can be +# set to one of "line", "pickle", "udp" and "protobuf". This list can be +# extended with CarbonClientFactory plugins and defaults to "pickle". +# DESTINATION_PROTOCOL = pickle + +# When using consistent hashing it sometime makes sense to make +# the ring dynamic when you don't want to loose points when a +# single destination is down. Replication is an answer to that +# but it can be quite expensive. +# DYNAMIC_ROUTER = False + +# Controls the number of connection attempts before marking a +# destination as down. We usually do one connection attempt per +# second. +# DYNAMIC_ROUTER_MAX_RETRIES = 5 + +# This is the maximum number of datapoints that can be queued up +# for a single destination. Once this limit is hit, we will +# stop accepting new data if USE_FLOW_CONTROL is True, otherwise +# we will drop any subsequently received datapoints. MAX_QUEUE_SIZE = 10000 +# This defines the maximum "message size" between carbon daemons. If +# your queue is large, setting this to a lower number will cause the +# relay to forward smaller discrete chunks of stats, which may prevent +# overloading on the receiving side after a disconnect. +MAX_DATAPOINTS_PER_MESSAGE = 500 + +# Limit the number of open connections the receiver can handle as any time. +# Default is no limit. Setting up a limit for sites handling high volume +# traffic may be recommended to avoid running out of TCP memory or having +# thousands of TCP connections reduce the throughput of the service. +#MAX_RECEIVER_CONNECTIONS = inf + +# Specify the user to drop privileges to +# If this is blank carbon-relay runs as the user that invokes it +# USER = + +# This is the percentage that the queue must be empty before it will accept +# more messages. For a larger site, if the queue is very large it makes sense +# to tune this to allow for incoming stats. So if you have an average +# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense +# to allow stats to start flowing when you've cleared the queue to 95% since +# you should have space to accommodate the next minute's worth of stats +# even before the relay incrementally clears more of the queue +QUEUE_LOW_WATERMARK_PCT = 0.8 + +# To allow for batch efficiency from the pickle protocol and to benefit from +# other batching advantages, all writes are deferred by putting them into a queue, +# and then the queue is flushed and sent a small fraction of a second later. +TIME_TO_DEFER_SENDING = 0.0001 + # Set this to False to drop datapoints when any send queue (sending datapoints # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the # default) then sockets over which metrics are received will temporarily stop accepting -# data until the send queues fall below 80% MAX_QUEUE_SIZE. +# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE. USE_FLOW_CONTROL = True +# If enabled this setting is used to timeout metric client connection if no +# metrics have been sent in specified time in seconds +#METRIC_CLIENT_IDLE_TIMEOUT = None + # Set this to True to enable whitelisting and blacklisting of metrics in -# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or -# empty, all metrics will pass through +# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is +# missing or empty, all metrics will pass through # USE_WHITELIST = False # By default, carbon itself will log statistics (such as a count, @@ -282,7 +450,40 @@ USE_FLOW_CONTROL = True # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation # CARBON_METRIC_PREFIX = carbon # CARBON_METRIC_INTERVAL = 60 +# +# In order to turn off logging of successful connections for the line +# receiver, set this to False +# LOG_LISTENER_CONN_SUCCESS = True +# If you're connecting from the relay to a destination that's over the +# internet or similarly iffy connection, a backlog can develop because +# of internet weather conditions, e.g. acks getting lost or similar issues. +# To deal with that, you can enable USE_RATIO_RESET which will let you +# re-set the connection to an individual destination. Defaults to being off. +USE_RATIO_RESET=False + +# When there is a small number of stats flowing, it's not desirable to +# perform any actions based on percentages - it's just too "twitchy". +MIN_RESET_STAT_FLOW=1000 + +# When the ratio of stats being sent in a reporting interval is far +# enough from 1.0, we will disconnect the socket and reconnecto to +# clear out queued stats. The default ratio of 0.9 indicates that 10% +# of stats aren't being delivered within one CARBON_METRIC_INTERVAL +# (default of 60 seconds), which can lead to a queue backup. Under +# some circumstances re-setting the connection can fix this, so +# set this according to your tolerance, and look in the logs for +# "resetConnectionForQualityReasons" to observe whether this is kicking +# in when your sent queue is building up. +MIN_RESET_RATIO=0.9 + +# The minimum time between resets. When a connection is re-set, we +# need to wait before another reset is performed. +# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed +# before stats for the new connection will be available. Setting this +# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of +# reset connections for no good reason. +MIN_RESET_INTERVAL=121 [aggregator] LINE_RECEIVER_INTERFACE = 0.0.0.0 @@ -291,14 +492,17 @@ LINE_RECEIVER_PORT = 2023 PICKLE_RECEIVER_INTERFACE = 0.0.0.0 PICKLE_RECEIVER_PORT = 2024 -# Set to false to disable logging of successful connections -LOG_LISTENER_CONNECTIONS = True - # If set true, metric received will be forwarded to DESTINATIONS in addition to # the output of the aggregation rules. If set false the carbon-aggregator will # only ever send the output of aggregation. FORWARD_ALL = True +# Filenames of the configuration files to use for this instance of aggregator. +# Filenames are relative to CONF_DIR. +# +# AGGREGATION_RULES = aggregation-rules.conf +# REWRITE_RULES = rewrite-rules.conf + # This is a list of carbon daemons we will send any relayed or # generated metrics to. The default provided would send to a single # carbon-cache instance on the default port. However if you @@ -330,6 +534,10 @@ MAX_QUEUE_SIZE = 10000 # data until the send queues fall below 80% MAX_QUEUE_SIZE. USE_FLOW_CONTROL = True +# If enabled this setting is used to timeout metric client connection if no +# metrics have been sent in specified time in seconds +#METRIC_CLIENT_IDLE_TIMEOUT = None + # This defines the maximum "message size" between carbon daemons. # You shouldn't need to tune this unless you really know what you're doing. MAX_DATAPOINTS_PER_MESSAGE = 500 @@ -339,6 +547,12 @@ MAX_DATAPOINTS_PER_MESSAGE = 500 # the past MAX_AGGREGATION_INTERVALS * intervalSize seconds. MAX_AGGREGATION_INTERVALS = 5 +# Limit the number of open connections the receiver can handle as any time. +# Default is no limit. Setting up a limit for sites handling high volume +# traffic may be recommended to avoid running out of TCP memory or having +# thousands of TCP connections reduce the throughput of the service. +#MAX_RECEIVER_CONNECTIONS = inf + # By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back # aggregated data points once every rule.frequency seconds, on a per-rule basis. # Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points @@ -348,8 +562,8 @@ MAX_AGGREGATION_INTERVALS = 5 # WRITE_BACK_FREQUENCY = 0 # Set this to True to enable whitelisting and blacklisting of metrics in -# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or -# empty, all metrics will pass through +# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is +# missing or empty, all metrics will pass through # USE_WHITELIST = False # By default, carbon itself will log statistics (such as a count, @@ -357,3 +571,24 @@ MAX_AGGREGATION_INTERVALS = 5 # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation # CARBON_METRIC_PREFIX = carbon # CARBON_METRIC_INTERVAL = 60 + +# In order to turn off logging of successful connections for the line +# receiver, set this to False +# LOG_LISTENER_CONN_SUCCESS = True + +# In order to turn off logging of metrics with no corresponding +# aggregation rules receiver, set this to False +# LOG_AGGREGATOR_MISSES = False + +# Specify the user to drop privileges to +# If this is blank carbon-aggregator runs as the user that invokes it +# USER = + +# Part of the code, and particularly aggregator rules, need +# to cache metric names. To avoid leaking too much memory you +# can tweak the size of this cache. The default allow for 1M +# different metrics per rule (~200MiB). +# CACHE_METRIC_NAMES_MAX=1000000 + +# You can optionally set a ttl to this cache. +# CACHE_METRIC_NAMES_TTL=600 diff --git a/docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf b/docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf index ddadd3667bc..80a384285af 100644 --- a/docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf +++ b/docker/blocks/graphite1/conf/opt/graphite/conf/storage-aggregation.conf @@ -40,4 +40,3 @@ aggregationMethod = sum pattern = .* xFilesFactor = 0.3 aggregationMethod = average - diff --git a/docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf b/docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf index 4623058d3d8..d5300a3b22d 100644 --- a/docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf +++ b/docker/blocks/graphite1/conf/opt/graphite/conf/storage-schemas.conf @@ -1,4 +1,23 @@ # Schema definitions for Whisper files. Entries are scanned in order, +# and first match wins. This file is scanned for changes every 60 seconds. +# +# Definition Syntax: +# +# [name] +# pattern = regex +# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ... +# +# Remember: To support accurate aggregation from higher to lower resolution +# archives, the precision of a longer retention archive must be +# cleanly divisible by precision of next lower retention archive. +# +# Valid: 60s:7d,300s:30d (300/60 = 5) +# Invalid: 180s:7d,300s:30d (300/180 = 3.333) +# + +# Carbon's internal metrics. This entry should match what is specified in +# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings + [carbon] pattern = ^carbon\..* retentions = 1m:31d,10m:1y,1h:5y diff --git a/docker/blocks/graphite1/conf/usr/local/bin/manage.sh b/docker/blocks/graphite1/conf/usr/local/bin/manage.sh old mode 100644 new mode 100755 index a94fbcd3615..8915907a3ac --- a/docker/blocks/graphite1/conf/usr/local/bin/manage.sh +++ b/docker/blocks/graphite1/conf/usr/local/bin/manage.sh @@ -1,3 +1,3 @@ #!/bin/bash PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings -PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings \ No newline at end of file +# PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings \ No newline at end of file diff --git a/docker/blocks/graphite1/docker-compose.yaml b/docker/blocks/graphite1/docker-compose.yaml new file mode 100644 index 00000000000..cd10593f423 --- /dev/null +++ b/docker/blocks/graphite1/docker-compose.yaml @@ -0,0 +1,21 @@ + graphite: + build: + context: blocks/graphite1 + args: + version: master + ports: + - "8080:80" + - "2003:2003" + - "8125:8125/udp" + - "8126:8126" + volumes: + - /etc/localtime:/etc/localtime:ro + - /etc/timezone:/etc/timezone:ro + + fake-graphite-data: + image: grafana/fake-data-gen + network_mode: bridge + environment: + FD_DATASOURCE: graphite + FD_PORT: 2003 + diff --git a/docker/blocks/graphite1/fig b/docker/blocks/graphite1/fig deleted file mode 100644 index 5337376ff5c..00000000000 --- a/docker/blocks/graphite1/fig +++ /dev/null @@ -1,16 +0,0 @@ -graphite: - build: blocks/graphite1 - ports: - - "8080:80" - - "2003:2003" - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - -fake-graphite-data: - image: grafana/fake-data-gen - net: bridge - environment: - FD_DATASOURCE: graphite - FD_PORT: 2003 - diff --git a/docker/blocks/graphite1/files/carbon.conf b/docker/blocks/graphite1/files/carbon.conf deleted file mode 100644 index 50762b3fff5..00000000000 --- a/docker/blocks/graphite1/files/carbon.conf +++ /dev/null @@ -1,76 +0,0 @@ -[cache] -LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ - -# Specify the user to drop privileges to -# If this is blank carbon runs as the user that invokes it -# This user must have write access to the local data directory -USER = - -# Limit the size of the cache to avoid swapping or becoming CPU bound. -# Sorts and serving cache queries gets more expensive as the cache grows. -# Use the value "inf" (infinity) for an unlimited cache size. -MAX_CACHE_SIZE = inf - -# Limits the number of whisper update_many() calls per second, which effectively -# means the number of write requests sent to the disk. This is intended to -# prevent over-utilizing the disk and thus starving the rest of the system. -# When the rate of required updates exceeds this, then carbon's caching will -# take effect and increase the overall throughput accordingly. -MAX_UPDATES_PER_SECOND = 1000 - -# Softly limits the number of whisper files that get created each minute. -# Setting this value low (like at 50) is a good way to ensure your graphite -# system will not be adversely impacted when a bunch of new metrics are -# sent to it. The trade off is that it will take much longer for those metrics' -# database files to all get created and thus longer until the data becomes usable. -# Setting this value high (like "inf" for infinity) will cause graphite to create -# the files quickly but at the risk of slowing I/O down considerably for a while. -MAX_CREATES_PER_MINUTE = inf - -LINE_RECEIVER_INTERFACE = 0.0.0.0 -LINE_RECEIVER_PORT = 2003 - -PICKLE_RECEIVER_INTERFACE = 0.0.0.0 -PICKLE_RECEIVER_PORT = 2004 - -CACHE_QUERY_INTERFACE = 0.0.0.0 -CACHE_QUERY_PORT = 7002 - -LOG_UPDATES = False - -# Enable AMQP if you want to receve metrics using an amqp broker -# ENABLE_AMQP = False - -# Verbose means a line will be logged for every metric received -# useful for testing -# AMQP_VERBOSE = False - -# AMQP_HOST = localhost -# AMQP_PORT = 5672 -# AMQP_VHOST = / -# AMQP_USER = guest -# AMQP_PASSWORD = guest -# AMQP_EXCHANGE = graphite - -# Patterns for all of the metrics this machine will store. Read more at -# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings -# -# Example: store all sales, linux servers, and utilization metrics -# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization -# -# Example: store everything -# BIND_PATTERNS = # - -# NOTE: you cannot run both a cache and a relay on the same server -# with the default configuration, you have to specify a distinict -# interfaces and ports for the listeners. - -[relay] -LINE_RECEIVER_INTERFACE = 0.0.0.0 -LINE_RECEIVER_PORT = 2003 - -PICKLE_RECEIVER_INTERFACE = 0.0.0.0 -PICKLE_RECEIVER_PORT = 2004 - -CACHE_SERVERS = server1, server2, server3 -MAX_QUEUE_SIZE = 10000 diff --git a/docker/blocks/graphite1/files/events_views.py b/docker/blocks/graphite1/files/events_views.py deleted file mode 100644 index 6c12987af3c..00000000000 --- a/docker/blocks/graphite1/files/events_views.py +++ /dev/null @@ -1,102 +0,0 @@ -import datetime -import time - -from django.utils.timezone import get_current_timezone -from django.core.urlresolvers import get_script_prefix -from django.http import HttpResponse -from django.shortcuts import render_to_response, get_object_or_404 -from pytz import timezone - -from graphite.util import json -from graphite.events import models -from graphite.render.attime import parseATTime - - -def to_timestamp(dt): - return time.mktime(dt.timetuple()) - - -class EventEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, datetime.datetime): - return to_timestamp(obj) - return json.JSONEncoder.default(self, obj) - - -def view_events(request): - if request.method == "GET": - context = { 'events' : fetch(request), - 'slash' : get_script_prefix() - } - return render_to_response("events.html", context) - else: - return post_event(request) - -def detail(request, event_id): - e = get_object_or_404(models.Event, pk=event_id) - context = { 'event' : e, - 'slash' : get_script_prefix() - } - return render_to_response("event.html", context) - - -def post_event(request): - if request.method == 'POST': - event = json.loads(request.body) - assert isinstance(event, dict) - - values = {} - values["what"] = event["what"] - values["tags"] = event.get("tags", None) - values["when"] = datetime.datetime.fromtimestamp( - event.get("when", time.time())) - if "data" in event: - values["data"] = event["data"] - - e = models.Event(**values) - e.save() - - return HttpResponse(status=200) - else: - return HttpResponse(status=405) - -def get_data(request): - if 'jsonp' in request.REQUEST: - response = HttpResponse( - "%s(%s)" % (request.REQUEST.get('jsonp'), - json.dumps(fetch(request), cls=EventEncoder)), - mimetype='text/javascript') - else: - response = HttpResponse( - json.dumps(fetch(request), cls=EventEncoder), - mimetype="application/json") - return response - -def fetch(request): - #XXX we need to move to USE_TZ=True to get rid of naive-time conversions - def make_naive(dt): - if 'tz' in request.GET: - tz = timezone(request.GET['tz']) - else: - tz = get_current_timezone() - local_dt = dt.astimezone(tz) - if hasattr(local_dt, 'normalize'): - local_dt = local_dt.normalize() - return local_dt.replace(tzinfo=None) - - if request.GET.get("from", None) is not None: - time_from = make_naive(parseATTime(request.GET["from"])) - else: - time_from = datetime.datetime.fromtimestamp(0) - - if request.GET.get("until", None) is not None: - time_until = make_naive(parseATTime(request.GET["until"])) - else: - time_until = datetime.datetime.now() - - tags = request.GET.get("tags", None) - if tags is not None: - tags = request.GET.get("tags").split(" ") - - return [x.as_dict() for x in - models.Event.find_events(time_from, time_until, tags=tags)] diff --git a/docker/blocks/graphite1/files/initial_data.json b/docker/blocks/graphite1/files/initial_data.json deleted file mode 100644 index b3ac9b1ebb0..00000000000 --- a/docker/blocks/graphite1/files/initial_data.json +++ /dev/null @@ -1,20 +0,0 @@ -[ - { - "pk": 1, - "model": "auth.user", - "fields": { - "username": "admin", - "first_name": "", - "last_name": "", - "is_active": true, - "is_superuser": true, - "is_staff": true, - "last_login": "2011-09-20 17:02:14", - "groups": [], - "user_permissions": [], - "password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236", - "email": "root@example.com", - "date_joined": "2011-09-20 17:02:14" - } - } -] diff --git a/docker/blocks/graphite1/files/local_settings.py b/docker/blocks/graphite1/files/local_settings.py deleted file mode 100644 index 177d674e9dd..00000000000 --- a/docker/blocks/graphite1/files/local_settings.py +++ /dev/null @@ -1,42 +0,0 @@ -# Edit this file to override the default graphite settings, do not edit settings.py - -# Turn on debugging and restart apache if you ever see an "Internal Server Error" page -#DEBUG = True - -# Set your local timezone (django will try to figure this out automatically) -TIME_ZONE = 'UTC' - -# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely -#MEMCACHE_HOSTS = ['127.0.0.1:11211'] - -# Sometimes you need to do a lot of rendering work but cannot share your storage mount -#REMOTE_RENDERING = True -#RENDERING_HOSTS = ['fastserver01','fastserver02'] -#LOG_RENDERING_PERFORMANCE = True -#LOG_CACHE_PERFORMANCE = True - -# If you've got more than one backend server they should all be listed here -#CLUSTER_SERVERS = [] - -# Override this if you need to provide documentation specific to your graphite deployment -#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite" - -# Enable email-related features -#SMTP_SERVER = "mail.mycompany.com" - -# LDAP / ActiveDirectory authentication setup -#USE_LDAP_AUTH = True -#LDAP_SERVER = "ldap.mycompany.com" -#LDAP_PORT = 389 -#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com" -#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com" -#LDAP_BASE_PASS = "readonly_account_password" -#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)" - -# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!) -#DATABASE_ENGINE = 'mysql' # or 'postgres' -#DATABASE_NAME = 'graphite' -#DATABASE_USER = 'graphite' -#DATABASE_PASSWORD = 'graphite-is-awesome' -#DATABASE_HOST = 'mysql.mycompany.com' -#DATABASE_PORT = '3306' diff --git a/docker/blocks/graphite1/files/my_htpasswd b/docker/blocks/graphite1/files/my_htpasswd deleted file mode 100644 index 52a72d01b4c..00000000000 --- a/docker/blocks/graphite1/files/my_htpasswd +++ /dev/null @@ -1 +0,0 @@ -grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU// diff --git a/docker/blocks/graphite1/files/nginx.conf b/docker/blocks/graphite1/files/nginx.conf deleted file mode 100644 index 5b5c562ce9d..00000000000 --- a/docker/blocks/graphite1/files/nginx.conf +++ /dev/null @@ -1,70 +0,0 @@ -daemon off; -user www-data; -worker_processes 1; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - server_tokens off; - - server_names_hash_bucket_size 32; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; - - gzip on; - gzip_disable "msie6"; - - server { - listen 80 default_server; - server_name _; - - open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m; - - location / { - proxy_pass http://127.0.0.1:8000; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Server $host; - proxy_set_header X-Forwarded-Host $host; - proxy_set_header Host $host; - - client_max_body_size 10m; - client_body_buffer_size 128k; - - proxy_connect_timeout 90; - proxy_send_timeout 90; - proxy_read_timeout 90; - - proxy_buffer_size 4k; - proxy_buffers 4 32k; - proxy_busy_buffers_size 64k; - proxy_temp_file_write_size 64k; - } - - add_header Access-Control-Allow-Origin "*"; - add_header Access-Control-Allow-Methods "GET, OPTIONS"; - add_header Access-Control-Allow-Headers "origin, authorization, accept"; - - location /content { - alias /opt/graphite/webapp/content; - - } - - location /media { - alias /usr/share/pyshared/django/contrib/admin/media; - } - } -} diff --git a/docker/blocks/graphite1/files/statsd_config.js b/docker/blocks/graphite1/files/statsd_config.js deleted file mode 100644 index a03003dcd79..00000000000 --- a/docker/blocks/graphite1/files/statsd_config.js +++ /dev/null @@ -1,8 +0,0 @@ -{ - graphitePort: 2003, - graphiteHost: "127.0.0.1", - port: 8125, - mgmt_port: 8126, - backends: ['./backends/graphite'], - debug: true -} diff --git a/docker/blocks/graphite1/files/storage-aggregation.conf b/docker/blocks/graphite1/files/storage-aggregation.conf deleted file mode 100644 index 5c4bc19fde1..00000000000 --- a/docker/blocks/graphite1/files/storage-aggregation.conf +++ /dev/null @@ -1,19 +0,0 @@ -[min] -pattern = \.min$ -xFilesFactor = 0.1 -aggregationMethod = min - -[max] -pattern = \.max$ -xFilesFactor = 0.1 -aggregationMethod = max - -[sum] -pattern = \.count$ -xFilesFactor = 0 -aggregationMethod = sum - -[default_average] -pattern = .* -xFilesFactor = 0.5 -aggregationMethod = average diff --git a/docker/blocks/graphite1/files/storage-schemas.conf b/docker/blocks/graphite1/files/storage-schemas.conf deleted file mode 100644 index 7e28d3a6649..00000000000 --- a/docker/blocks/graphite1/files/storage-schemas.conf +++ /dev/null @@ -1,16 +0,0 @@ -[carbon] -pattern = ^carbon\..* -retentions = 1m:31d,10m:1y,1h:5y - -[highres] -pattern = ^highres.* -retentions = 1s:1d,1m:7d - -[statsd] -pattern = ^statsd.* -retentions = 1m:7d,10m:1y - -[default] -pattern = .* -retentions = 10s:1d,1m:7d,10m:1y - diff --git a/docker/blocks/graphite1/files/supervisord.conf b/docker/blocks/graphite1/files/supervisord.conf deleted file mode 100644 index c9812bb16dc..00000000000 --- a/docker/blocks/graphite1/files/supervisord.conf +++ /dev/null @@ -1,26 +0,0 @@ -[supervisord] -nodaemon = true -environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf' - -[program:nginx] -command = /usr/sbin/nginx -stdout_logfile = /var/log/supervisor/%(program_name)s.log -stderr_logfile = /var/log/supervisor/%(program_name)s.log -autorestart = true - -[program:carbon-cache] -;user = www-data -command = /opt/graphite/bin/carbon-cache.py --debug start -stdout_logfile = /var/log/supervisor/%(program_name)s.log -stderr_logfile = /var/log/supervisor/%(program_name)s.log -autorestart = true - -[program:graphite-webapp] -;user = www-data -directory = /opt/graphite/webapp -environment = PYTHONPATH='/opt/graphite/webapp' -command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py -stdout_logfile = /var/log/supervisor/%(program_name)s.log -stderr_logfile = /var/log/supervisor/%(program_name)s.log -autorestart = true - diff --git a/docker/blocks/influxdb/docker-compose.yaml b/docker/blocks/influxdb/docker-compose.yaml new file mode 100644 index 00000000000..3434f5d09b9 --- /dev/null +++ b/docker/blocks/influxdb/docker-compose.yaml @@ -0,0 +1,17 @@ + influxdb: + image: influxdb:latest + container_name: influxdb + ports: + - "2004:2004" + - "8083:8083" + - "8086:8086" + volumes: + - ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf + + fake-influxdb-data: + image: grafana/fake-data-gen + network_mode: bridge + environment: + FD_DATASOURCE: influxdb + FD_PORT: 8086 + diff --git a/docker/blocks/influxdb/fig b/docker/blocks/influxdb/fig deleted file mode 100644 index 8821c010a98..00000000000 --- a/docker/blocks/influxdb/fig +++ /dev/null @@ -1,17 +0,0 @@ -influxdb: - image: influxdb:latest - container_name: influxdb - ports: - - "2004:2004" - - "8083:8083" - - "8086:8086" - volumes: - - ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf - -fake-influxdb-data: - image: grafana/fake-data-gen - net: bridge - environment: - FD_DATASOURCE: influxdb - FD_PORT: 8086 - diff --git a/docker/blocks/jaeger/docker-compose.yaml b/docker/blocks/jaeger/docker-compose.yaml new file mode 100644 index 00000000000..2b57c863425 --- /dev/null +++ b/docker/blocks/jaeger/docker-compose.yaml @@ -0,0 +1,6 @@ + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "127.0.0.1:6831:6831/udp" + - "16686:16686" + diff --git a/docker/blocks/jaeger/fig b/docker/blocks/jaeger/fig deleted file mode 100644 index ab9e2ec599b..00000000000 --- a/docker/blocks/jaeger/fig +++ /dev/null @@ -1,6 +0,0 @@ -jaeger: - image: jaegertracing/all-in-one:latest - ports: - - "localhost:6831:6831/udp" - - "16686:16686" - diff --git a/docker/blocks/memcached/docker-compose.yaml b/docker/blocks/memcached/docker-compose.yaml new file mode 100644 index 00000000000..b3201da0f95 --- /dev/null +++ b/docker/blocks/memcached/docker-compose.yaml @@ -0,0 +1,5 @@ + memcached: + image: memcached:latest + ports: + - "11211:11211" + diff --git a/docker/blocks/memcached/fig b/docker/blocks/memcached/fig deleted file mode 100644 index a0da9df2bc2..00000000000 --- a/docker/blocks/memcached/fig +++ /dev/null @@ -1,5 +0,0 @@ -memcached: - image: memcached:latest - ports: - - "11211:11211" - diff --git a/docker/blocks/mysql/docker-compose.yaml b/docker/blocks/mysql/docker-compose.yaml new file mode 100644 index 00000000000..6eee158ac43 --- /dev/null +++ b/docker/blocks/mysql/docker-compose.yaml @@ -0,0 +1,14 @@ + mysql: + image: mysql:latest + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: grafana + MYSQL_USER: grafana + MYSQL_PASSWORD: password + ports: + - "3306:3306" + volumes: + - /etc/localtime:/etc/localtime:ro + - /etc/timezone:/etc/timezone:ro + command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all] + diff --git a/docker/blocks/mysql/fig b/docker/blocks/mysql/fig deleted file mode 100644 index 24cb47b61a7..00000000000 --- a/docker/blocks/mysql/fig +++ /dev/null @@ -1,14 +0,0 @@ -mysql: - image: mysql:latest - environment: - MYSQL_ROOT_PASSWORD: rootpass - MYSQL_DATABASE: grafana - MYSQL_USER: grafana - MYSQL_PASSWORD: password - ports: - - "3306:3306" - volumes: - - /etc/localtime:/etc/localtime:ro - - /etc/timezone:/etc/timezone:ro - command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all] - diff --git a/docker/blocks/mysql_opendata/docker-compose.yaml b/docker/blocks/mysql_opendata/docker-compose.yaml new file mode 100644 index 00000000000..594eeed284a --- /dev/null +++ b/docker/blocks/mysql_opendata/docker-compose.yaml @@ -0,0 +1,9 @@ + mysql_opendata: + build: blocks/mysql_opendata + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: testdata + MYSQL_USER: grafana + MYSQL_PASSWORD: password + ports: + - "3307:3306" diff --git a/docker/blocks/mysql_opendata/fig b/docker/blocks/mysql_opendata/fig deleted file mode 100644 index a374fbd0931..00000000000 --- a/docker/blocks/mysql_opendata/fig +++ /dev/null @@ -1,9 +0,0 @@ -mysql_opendata: - build: blocks/mysql_opendata - environment: - MYSQL_ROOT_PASSWORD: rootpass - MYSQL_DATABASE: testdata - MYSQL_USER: grafana - MYSQL_PASSWORD: password - ports: - - "3307:3306" diff --git a/docker/blocks/mysql_tests/docker-compose.yaml b/docker/blocks/mysql_tests/docker-compose.yaml new file mode 100644 index 00000000000..646cc7ee369 --- /dev/null +++ b/docker/blocks/mysql_tests/docker-compose.yaml @@ -0,0 +1,9 @@ + mysqltests: + image: mysql:latest + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: grafana_tests + MYSQL_USER: grafana + MYSQL_PASSWORD: password + ports: + - "3306:3306" diff --git a/docker/blocks/mysql_tests/fig b/docker/blocks/mysql_tests/fig deleted file mode 100644 index 880c955d218..00000000000 --- a/docker/blocks/mysql_tests/fig +++ /dev/null @@ -1,9 +0,0 @@ -mysqltests: - image: mysql:latest - environment: - MYSQL_ROOT_PASSWORD: rootpass - MYSQL_DATABASE: grafana_tests - MYSQL_USER: grafana - MYSQL_PASSWORD: password - ports: - - "3306:3306" diff --git a/docker/blocks/openldap/Dockerfile b/docker/blocks/openldap/Dockerfile index d16987cb3ab..d073e274356 100644 --- a/docker/blocks/openldap/Dockerfile +++ b/docker/blocks/openldap/Dockerfile @@ -1,6 +1,6 @@ FROM debian:jessie -MAINTAINER Christian Luginbühl +LABEL maintainer="Christian Luginbühl " ENV OPENLDAP_VERSION 2.4.40 diff --git a/docker/blocks/openldap/docker-compose.yaml b/docker/blocks/openldap/docker-compose.yaml new file mode 100644 index 00000000000..be06524a57d --- /dev/null +++ b/docker/blocks/openldap/docker-compose.yaml @@ -0,0 +1,10 @@ + openldap: + build: blocks/openldap + environment: + SLAPD_PASSWORD: grafana + SLAPD_DOMAIN: grafana.org + SLAPD_ADDITIONAL_MODULES: memberof + ports: + - "389:389" + + diff --git a/docker/blocks/openldap/fig b/docker/blocks/openldap/fig deleted file mode 100644 index b9528f2d4d7..00000000000 --- a/docker/blocks/openldap/fig +++ /dev/null @@ -1,10 +0,0 @@ -openldap: - build: blocks/openldap - environment: - SLAPD_PASSWORD: grafana - SLAPD_DOMAIN: grafana.org - SLAPD_ADDITIONAL_MODULES: memberof - ports: - - "389:389" - - diff --git a/docker/blocks/opentsdb/docker-compose.yaml b/docker/blocks/opentsdb/docker-compose.yaml new file mode 100644 index 00000000000..ee064bb107d --- /dev/null +++ b/docker/blocks/opentsdb/docker-compose.yaml @@ -0,0 +1,11 @@ + opentsdb: + image: opower/opentsdb:latest + ports: + - "4242:4242" + + fake-opentsdb-data: + image: grafana/fake-data-gen + network_mode: bridge + environment: + FD_DATASOURCE: opentsdb + diff --git a/docker/blocks/opentsdb/fig b/docker/blocks/opentsdb/fig deleted file mode 100644 index c346475e9a3..00000000000 --- a/docker/blocks/opentsdb/fig +++ /dev/null @@ -1,11 +0,0 @@ -opentsdb: - image: opower/opentsdb:latest - ports: - - "4242:4242" - -fake-opentsdb-data: - image: grafana/fake-data-gen - net: bridge - environment: - FD_DATASOURCE: opentsdb - diff --git a/docker/blocks/postgres/docker-compose.yaml b/docker/blocks/postgres/docker-compose.yaml new file mode 100644 index 00000000000..eced00aafeb --- /dev/null +++ b/docker/blocks/postgres/docker-compose.yaml @@ -0,0 +1,9 @@ + postgrestest: + image: postgres:latest + environment: + POSTGRES_USER: grafana + POSTGRES_PASSWORD: password + POSTGRES_DATABASE: grafana + ports: + - "5432:5432" + command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql diff --git a/docker/blocks/postgres/fig b/docker/blocks/postgres/fig deleted file mode 100644 index 9f39a0ffb1d..00000000000 --- a/docker/blocks/postgres/fig +++ /dev/null @@ -1,9 +0,0 @@ -postgrestest: - image: postgres:9.4.14 - environment: - POSTGRES_USER: grafana - POSTGRES_PASSWORD: password - POSTGRES_DATABASE: grafana - ports: - - "5432:5432" - command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql diff --git a/docker/blocks/postgres_tests/docker-compose.yaml b/docker/blocks/postgres_tests/docker-compose.yaml new file mode 100644 index 00000000000..3d9a82c034c --- /dev/null +++ b/docker/blocks/postgres_tests/docker-compose.yaml @@ -0,0 +1,7 @@ + postgrestest: + image: postgres:latest + environment: + POSTGRES_USER: grafanatest + POSTGRES_PASSWORD: grafanatest + ports: + - "5432:5432" diff --git a/docker/blocks/postgres_tests/fig b/docker/blocks/postgres_tests/fig deleted file mode 100644 index 049afe185c8..00000000000 --- a/docker/blocks/postgres_tests/fig +++ /dev/null @@ -1,7 +0,0 @@ -postgrestest: - image: postgres:latest - environment: - POSTGRES_USER: grafanatest - POSTGRES_PASSWORD: grafanatest - ports: - - "5432:5432" diff --git a/docker/blocks/prometheus/docker-compose.yaml b/docker/blocks/prometheus/docker-compose.yaml new file mode 100644 index 00000000000..ccb1238a179 --- /dev/null +++ b/docker/blocks/prometheus/docker-compose.yaml @@ -0,0 +1,25 @@ + prometheus: + build: blocks/prometheus + network_mode: host + ports: + - "9090:9090" + + node_exporter: + image: prom/node-exporter + network_mode: host + ports: + - "9100:9100" + + fake-prometheus-data: + image: grafana/fake-data-gen + network_mode: host + ports: + - "9091:9091" + environment: + FD_DATASOURCE: prom + + alertmanager: + image: quay.io/prometheus/alertmanager + network_mode: host + ports: + - "9093:9093" diff --git a/docker/blocks/prometheus/fig b/docker/blocks/prometheus/fig deleted file mode 100644 index 7d9bea68046..00000000000 --- a/docker/blocks/prometheus/fig +++ /dev/null @@ -1,25 +0,0 @@ -prometheus: - build: blocks/prometheus - net: host - ports: - - "9090:9090" - -node_exporter: - image: prom/node-exporter - net: host - ports: - - "9100:9100" - -fake-prometheus-data: - image: grafana/fake-data-gen - net: host - ports: - - "9091:9091" - environment: - FD_DATASOURCE: prom - -alertmanager: - image: quay.io/prometheus/alertmanager - net: host - ports: - - "9093:9093" diff --git a/docker/blocks/smtp/Dockerfile b/docker/blocks/smtp/Dockerfile index c1a3adba7c8..9326e077ed9 100644 --- a/docker/blocks/smtp/Dockerfile +++ b/docker/blocks/smtp/Dockerfile @@ -1,5 +1,5 @@ FROM centos:centos7 -MAINTAINER Przemyslaw Ozgo +LABEL maintainer="Przemyslaw Ozgo " RUN \ yum update -y && \ diff --git a/docker/blocks/smtp/docker-compose.yaml b/docker/blocks/smtp/docker-compose.yaml new file mode 100644 index 00000000000..85d598b6167 --- /dev/null +++ b/docker/blocks/smtp/docker-compose.yaml @@ -0,0 +1,4 @@ + snmpd: + image: namshi/smtp + ports: + - "25:25" diff --git a/docker/blocks/smtp/fig b/docker/blocks/smtp/fig deleted file mode 100644 index 3aa25e01311..00000000000 --- a/docker/blocks/smtp/fig +++ /dev/null @@ -1,4 +0,0 @@ -snmpd: - image: namshi/smtp - ports: - - "25:25" diff --git a/docker/compose_header.yml b/docker/compose_header.yml new file mode 100644 index 00000000000..e7bf4f38b02 --- /dev/null +++ b/docker/compose_header.yml @@ -0,0 +1,2 @@ +version: "2" +services: diff --git a/docker/create_docker_compose.sh b/docker/create_docker_compose.sh index 8588c1c474a..9d28ede8e7e 100755 --- a/docker/create_docker_compose.sh +++ b/docker/create_docker_compose.sh @@ -7,8 +7,9 @@ template_dir=templates grafana_config_file=conf.tmp grafana_config=config -fig_file=docker-compose.yml -fig_config=fig +compose_header_file=compose_header.yml +fig_file=docker-compose.yaml +fig_config=docker-compose.yaml if [ "$#" == 0 ]; then blocks=`ls $blocks_dir` @@ -23,13 +24,16 @@ if [ "$#" == 0 ]; then exit 0 fi -for file in $gogs_config_file $fig_file; do +for file in $grafana_config_file $fig_file; do if [ -e $file ]; then echo "Deleting $file" rm $file fi done +echo "Adding Compose header to $fig_file" +cat $compose_header_file >> $fig_file + for dir in $@; do current_dir=$blocks_dir/$dir if [ ! -d "$current_dir" ]; then @@ -45,7 +49,7 @@ for dir in $@; do if [ -e $current_dir/$fig_config ]; then echo "Adding $current_dir/$fig_config to $fig_file" - cat $current_dir/fig >> $fig_file + cat $current_dir/$fig_config >> $fig_file echo "" >> $fig_file fi done diff --git a/docs/sources/administration/provisioning.md b/docs/sources/administration/provisioning.md new file mode 100644 index 00000000000..70d9d7a81f3 --- /dev/null +++ b/docs/sources/administration/provisioning.md @@ -0,0 +1,166 @@ ++++ +title = "Provisioning" +description = "" +keywords = ["grafana", "provisioning"] +type = "docs" +[menu.docs] +parent = "admin" +weight = 8 ++++ + +# Provisioning Grafana + +## Config file + +Checkout the [configuration](/installation/configuration) page for more information about what you can configure in `grafana.ini` + +### Config file locations + +- Default configuration from `$WORKING_DIR/conf/defaults.ini` +- Custom configuration from `$WORKING_DIR/conf/custom.ini` +- The custom configuration file path can be overridden using the `--config` parameter + +> **Note.** If you have installed Grafana using the `deb` or `rpm` +> packages, then your configuration file is located at +> `/etc/grafana/grafana.ini`. This path is specified in the Grafana +> init.d script using `--config` file parameter. + +### Using environment variables + +All options in the configuration file (listed below) can be overridden +using environment variables using the syntax: + +```bash +GF__ +``` + +Where the section name is the text within the brackets. Everything +should be upper case, `.` should be replaced by `_`. For example, given these configuration settings: + +```bash +# default section +instance_name = ${HOSTNAME} + +[security] +admin_user = admin + +[auth.google] +client_secret = 0ldS3cretKey +``` + +Then you can override them using: + +```bash +export GF_DEFAULT_INSTANCE_NAME=my-instance +export GF_SECURITY_ADMIN_USER=true +export GF_AUTH_GOOGLE_CLIENT_SECRET=newS3cretKey +``` + +
+ +## Configuration management tools + +Currently we do not provide any scripts/manifests for configuring Grafana. Rather then spending time learning and creating scripts/manifests for each tool, we think our time is better spent making Grafana easier to provision. Therefor, we heavily relay on the expertise of he community. + +Tool | Project +-----|------------ +Puppet | [https://forge.puppet.com/puppet/grafana](https://forge.puppet.com/puppet/grafana) +Ansible | [https://github.com/picotrading/ansible-grafana](https://github.com/picotrading/ansible-grafana) +Chef | [https://github.com/JonathanTron/chef-grafana](https://github.com/JonathanTron/chef-grafana) +Saltstack | [https://github.com/salt-formulas/salt-formula-grafana](https://github.com/salt-formulas/salt-formula-grafana) + +## Datasources + +> This feature is available from v4.7 + +It's possible to manage datasources in Grafana by adding one or more yaml config files in the [`conf/datasources`](/installation/configuration/#datasources) directory. Each config file can contain a list of `datasources` that will be added or updated during start up. If the datasource already exists, Grafana will update it to match the configuration file. The config file can also contain a list of datasources that should be deleted. That list is called `delete_datasources`. Grafana will delete datasources listed in `delete_datasources` before inserting/updating those in the `datasource` list. + +### Running multiple grafana instances. +If you are running multiple instances of Grafana you might run into problems if they have different versions of the datasource.yaml configuration file. The best way to solve this problem is to add a version number to each datasource in the configuration and increase it when you update the config. Grafana will only update datasources with the same or lower version number than specified in the config. That way old configs cannot overwrite newer configs if they restart at the same time. + +### Example datasource config file +```yaml +# list of datasources that should be deleted from the database +delete_datasources: + - name: Graphite + org_id: 1 + +# list of datasources to insert/update depending +# whats available in the datbase +datasources: + # name of the datasource. Required +- name: Graphite + # datasource type. Required + type: graphite + # access mode. direct or proxy. Required + access: proxy + # org id. will default to org_id 1 if not specified + org_id: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basic_auth: + # basic auth username + basic_auth_user: + # basic auth password + basic_auth_password: + # enable/disable with credentials headers + with_credentials: + # mark as default datasource. Max one per org + is_default: + # fields that will be converted to json and stored in json_data + json_data: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secure_json_data: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +#### Json data + +Since all datasources dont have the same configuration settings we only have the most common ones as fields. The rest should be stored as a json blob in the `json_data` field. Here are the most common settings that the core datasources use. + +| Name | Type | Datasource |Description | +| ----| ---- | ---- | --- | +| tlsAuth | boolean | *All* | Enable TLS authentication using client cert configured in secure json data | +| tlsAuthWithCACert | boolean | *All* | Enable TLS authtication using CA cert | +| graphiteVersion | string | Graphite | Graphite version | +| timeInterval | string | Elastic, Influxdb & Prometheus | Lowest interval/step value that should be used for this data source | +| esVersion | string | Elastic | Elasticsearch version | +| timeField | string | Elastic | Which field that should be used as timestamp | +| interval | string | Elastic | Index date time format | +| authType | string | Cloudwatch | Auth provider. keys/credentials/arn | +| assumeRoleArn | string | Cloudwatch | ARN of Assume Role | +| defaultRegion | string | Cloudwatch | AWS region | +| customMetricsNamespaces | string | Cloudwatch | Namespaces of Custom Metrics | +| tsdbVersion | string | OpenTsdb | Version | +| tsdbResolution | string | OpenTsdb | Resolution | +| sslmode | string | Postgre | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' | + + +#### Secure Json data + +{"authType":"keys","defaultRegion":"us-west-2","timeField":"@timestamp"} + +Secure json data is a map of settings that will be encrypted with [secret key](/installation/configuration/#secret-key) from the grafana config. The purpose of this is only to hide content from the users of the application. This should be used for storing TLS Cert and password that Grafana will append to request on the server side. All these settings are optional. + +| Name | Type | Datasource | Description | +| ----| ---- | ---- | --- | +| tlsCACert | string | *All* |CA cert for out going requests | +| tlsClientCert | string | *All* |TLS Client cert for outgoing requests | +| tlsClientKey | string | *All* |TLS Client key for outgoing requests | +| password | string | Postgre | password | +| user | string | Postgre | user | diff --git a/docs/sources/features/datasources/postgres.md b/docs/sources/features/datasources/postgres.md index 154f822d9d0..2b111d51f0e 100644 --- a/docs/sources/features/datasources/postgres.md +++ b/docs/sources/features/datasources/postgres.md @@ -45,10 +45,10 @@ Macro example | Description ------------ | ------------- *$__time(dateColumn)* | Will be replaced by an expression to rename the column to `time`. For example, *dateColumn as time* *$__timeSec(dateColumn)* | Will be replaced by an expression to rename the column to `time` and converting the value to unix timestamp. For example, *extract(epoch from dateColumn) as time* -*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > to_timestamp(1494410783) AND dateColumn < to_timestamp(1494497183)* +*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *extract(epoch from dateColumn) BETWEEN 1494410783 AND 1494497183* *$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *to_timestamp(1494410783)* *$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *to_timestamp(1494497183)* -*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from "dateColumn")/extract(epoch from '5m'::interval))::int*extract(epoch from '5m'::interval)* +*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *(extract(epoch from "dateColumn")/300)::bigint*300* *$__unixEpochFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name with times represented as unix timestamp. For example, *dateColumn > 1494410783 AND dateColumn < 1494497183* *$__unixEpochFrom()* | Will be replaced by the start of the currently active time selection as unix timestamp. For example, *1494410783* *$__unixEpochTo()* | Will be replaced by the end of the currently active time selection as unix timestamp. For example, *1494497183* @@ -186,7 +186,7 @@ ORDER BY atimestamp ASC ## Annotations -[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view. +[Annotations]({{< relref "reference/annotations.md" >}}) allow you to overlay rich event information on top of graphs. You add annotation queries via the Dashboard menu / Annotations view. An example query: diff --git a/docs/sources/features/datasources/prometheus.md b/docs/sources/features/datasources/prometheus.md index dceb2254e41..15247ba5ebd 100644 --- a/docs/sources/features/datasources/prometheus.md +++ b/docs/sources/features/datasources/prometheus.md @@ -34,6 +34,7 @@ Name | Description *Basic Auth* | Enable basic authentication to the Prometheus data source. *User* | Name of your Prometheus user *Password* | Database user's password +*Scrape interval* | This will be used as a lower limit for the Prometheus step query parameter. Default value is 15s. ## Query editor @@ -95,3 +96,7 @@ Prometheus supports two ways to query annotations. - A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime)) The step option is useful to limit the number of events returned from your query. + +## Getting Grafana metrics into Prometheus + +Since 4.6.0 Grafana exposes metrics for Prometheus on the `/metrics` endpoint. We also bundle a dashboard within Grafana so you can get started viewing your metrics faster. You can import the bundled dashboard by going to the data source edit page and click the dashboard tab. There you can find a dashboard for Grafana and one for Prometheus. Import and start viewing all the metrics! diff --git a/docs/sources/features/datasources/testdata.md b/docs/sources/features/datasources/testdata.md index 491e5b60fbb..d99f9cabe08 100644 --- a/docs/sources/features/datasources/testdata.md +++ b/docs/sources/features/datasources/testdata.md @@ -17,7 +17,7 @@ This make is much easier to verify functionally since the data can be shared ver ## Enable -`Grafana TestData` is not enabled by default. To enable it you have to go to `/plugins/testdata/edit` and click the enable button to enable. +`Grafana TestData` is not enabled by default. To enable it, first navigate to the Plugins section, found in your Grafana main menu. Click the Apps tabs in the Plugins section and select the Grafana TestData App. (Or navigate to http://your_grafana_instance/plugins/testdata/edit to go directly there). Finally click the enable button to enable. ## Create mock data. diff --git a/docs/sources/http_api/annotations.md b/docs/sources/http_api/annotations.md index 7aab127cb0c..19c2a5c386c 100644 --- a/docs/sources/http_api/annotations.md +++ b/docs/sources/http_api/annotations.md @@ -89,7 +89,7 @@ Content-Type: application/json ## Create Annotation -Creates an annotation in the Grafana database. The `dashboardId` and `panelId` fields are optional. If they are not specified then a global annotation is created and can be queried in any dashboard that adds the Grafana annotations data source. +Creates an annotation in the Grafana database. The `dashboardId` and `panelId` fields are optional. If they are not specified then a global annotation is created and can be queried in any dashboard that adds the Grafana annotations data source. When creating a region annotation the response will include both `id` and `endId`, if not only `id`. `POST /api/annotations` @@ -117,7 +117,11 @@ Content-Type: application/json HTTP/1.1 200 Content-Type: application/json -{"message":"Annotation added"} +{ + "message":"Annotation added", + "id": 1, + "endId": 2 +} ``` ## Create Annotation in Graphite format @@ -148,7 +152,10 @@ Content-Type: application/json HTTP/1.1 200 Content-Type: application/json -{"message":"Graphite annotation added"} +{ + "message":"Graphite annotation added", + "id": 1 +} ``` ## Update Annotation diff --git a/docs/sources/http_api/dashboard.md b/docs/sources/http_api/dashboard.md index 300e5613db4..0538754bd96 100644 --- a/docs/sources/http_api/dashboard.md +++ b/docs/sources/http_api/dashboard.md @@ -258,7 +258,7 @@ Query parameters: **Example Request**: ```http -GET /api/search?query=MyDashboard&starred=true&tag=prod HTTP/1.1 +GET /api/search?query=Production%20Overview&starred=true&tag=prod HTTP/1.1 Accept: application/json Content-Type: application/json Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk @@ -276,8 +276,8 @@ Content-Type: application/json "title":"Production Overview", "uri":"db/production-overview", "type":"dash-db", - "tags":[], - "isStarred":false + "tags":[prod], + "isStarred":true } ] -``` \ No newline at end of file +``` diff --git a/docs/sources/index.md b/docs/sources/index.md index 9226c842abc..7a431e29692 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -46,8 +46,8 @@ those options. - [Graphite]({{< relref "features/datasources/graphite.md" >}}) - [Elasticsearch]({{< relref "features/datasources/elasticsearch.md" >}}) - [InfluxDB]({{< relref "features/datasources/influxdb.md" >}}) -- [Prometheus]({{< relref "features/datasources/influxdb.md" >}}) -- [OpenTSDB]({{< relref "features/datasources/prometheus.md" >}}) +- [Prometheus]({{< relref "features/datasources/prometheus.md" >}}) +- [OpenTSDB]({{< relref "features/datasources/opentsdb.md" >}}) - [MySQL]({{< relref "features/datasources/mysql.md" >}}) - [Postgres]({{< relref "features/datasources/postgres.md" >}}) - [Cloudwatch]({{< relref "features/datasources/cloudwatch.md" >}}) diff --git a/docs/sources/installation/configuration.md b/docs/sources/installation/configuration.md index 627a76a963e..c044b3d73dc 100644 --- a/docs/sources/installation/configuration.md +++ b/docs/sources/installation/configuration.md @@ -87,6 +87,14 @@ command line in the init.d script or the systemd service file. It can be overridden in the configuration file or in the default environment variable file. +### plugins + +Directory where grafana will automatically scan and look for plugins + +### datasources + +Config files containing datasources that will be configured at startup + ## [server] ### http_addr @@ -224,6 +232,9 @@ The maximum number of connections in the idle connection pool. ### max_open_conn The maximum number of open connections to the database. +### log_queries +Set to `true` to log the sql calls and execution times. +
## [security] @@ -551,7 +562,7 @@ session provider you have configured. - **file:** session file path, e.g. `data/sessions` - **mysql:** go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` -- **postgres:** ex: user=a password=b host=localhost port=5432 dbname=c sslmode=require +- **postgres:** ex: user=a password=b host=localhost port=5432 dbname=c sslmode=verify-full - **memcache:** ex: 127.0.0.1:11211 - **redis:** ex: `addr=127.0.0.1:6379,pool_size=100,prefix=grafana` @@ -580,7 +591,7 @@ CREATE TABLE session ( ); ``` -Postgres valid `sslmode` are `disable`, `require` (default), `verify-ca`, and `verify-full`. +Postgres valid `sslmode` are `disable`, `require`, `verify-ca`, and `verify-full` (default). ### cookie_name @@ -613,6 +624,12 @@ Analytics ID here. By default this feature is disabled.
+## [dashboards] + +### versions_to_keep (introduced in v5.0) + +Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1. + ## [dashboards.json] If you have a system that automatically builds dashboards as json files you can enable this feature to have the @@ -673,7 +690,7 @@ Ex `filters = sqlstore:debug` ## [metrics] ### enabled -Enable metrics reporting. defaults true. Available via HTTP API `/api/metrics`. +Enable metrics reporting. defaults true. Available via HTTP API `/metrics`. ### interval_seconds diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md index 7ecb6d14b0c..d832ea7a8ed 100644 --- a/docs/sources/installation/debian.md +++ b/docs/sources/installation/debian.md @@ -15,7 +15,7 @@ weight = 1 Description | Download ------------ | ------------- -Stable for Debian-based Linux | [grafana_4.6.0_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.0_amd64.deb) +Stable for Debian-based Linux | [grafana_4.6.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.2_amd64.deb) @@ -26,9 +26,9 @@ installation. ```bash -wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.0_amd64.deb +wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.6.2_amd64.deb sudo apt-get install -y adduser libfontconfig -sudo dpkg -i grafana_4.6.0_amd64.deb +sudo dpkg -i grafana_4.6.2_amd64.deb ``` @@ -27,7 +27,7 @@ installation. You can install Grafana using Yum directly. ```bash -$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0-1.x86_64.rpm +$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2-1.x86_64.rpm ``` Or install manually using `rpm`. @@ -35,15 +35,15 @@ Or install manually using `rpm`. #### On CentOS / Fedora / Redhat: ```bash -$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0-1.x86_64.rpm +$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2-1.x86_64.rpm $ sudo yum install initscripts fontconfig -$ sudo rpm -Uvh grafana-4.6.0-1.x86_64.rpm +$ sudo rpm -Uvh grafana-4.6.2-1.x86_64.rpm ``` #### On OpenSuse: ```bash -$ sudo rpm -i --nodeps grafana-4.6.0-1.x86_64.rpm +$ sudo rpm -i --nodeps grafana-4.6.2-1.x86_64.rpm ``` ## Install via YUM Repository diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index 181d26d694c..9cfd689fb43 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -13,7 +13,7 @@ weight = 3 Description | Download ------------ | ------------- -Latest stable package for Windows | [grafana.4.6.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.0.windows-x64.zip) +Latest stable package for Windows | [grafana.4.6.2.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.6.2.windows-x64.zip) Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing installation. diff --git a/docs/sources/project/building_from_source.md b/docs/sources/project/building_from_source.md index e4ccedb7299..dba04164d00 100644 --- a/docs/sources/project/building_from_source.md +++ b/docs/sources/project/building_from_source.md @@ -13,9 +13,10 @@ dev environment. Grafana ships with its own required backend server; also comple ## Dependencies -- [Go 1.9.1](https://golang.org/dl/) -- [NodeJS LTS](https://nodejs.org/download/) +- [Go 1.9.2](https://golang.org/dl/) - [Git](https://git-scm.com/downloads) +- [NodeJS LTS](https://nodejs.org/download/) +- node-gyp is the Node.js native addon build tool and it requires extra dependencies: python 2.7, make and GCC. These are already installed for most Linux distros and MacOS. See the Building On Windows section or the [node-gyp installation instructions](https://github.com/nodejs/node-gyp#installation) for more details. ## Get Code Create a directory for the project and set your path accordingly (or use the [default Go workspace directory](https://golang.org/doc/code.html#GOPATH)). Then download and install Grafana into your $GOPATH directory: @@ -40,8 +41,8 @@ go run build.go build # (or 'go build ./pkg/cmd/grafana-server') ``` #### Building on Windows -The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need -to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download). + +The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download). [node-gyp](https://github.com/nodejs/node-gyp#installation) is the Node.js native addon build tool and it requires extra dependencies to be installed on Windows. In a command prompt which is run as administrator, run: diff --git a/docs/sources/tutorials/authproxy.md b/docs/sources/tutorials/authproxy.md index d4d2b9926fc..8003be20644 100644 --- a/docs/sources/tutorials/authproxy.md +++ b/docs/sources/tutorials/authproxy.md @@ -25,12 +25,16 @@ enabled = true header_name = X-WEBAUTH-USER header_property = username auto_sign_up = true +ldap_sync_ttl = 60 +whitelist = ``` * **enabled**: this is to toggle the feature on or off * **header_name**: this is the HTTP header name that passes the username or email address of the authenticated user to Grafana. Grafana will trust what ever username is contained in this header and automatically log the user in. * **header_property**: this tells Grafana whether the value in the header_name is a username or an email address. (In Grafana you can log in using your account username or account email) * **auto_sign_up**: If set to true, Grafana will automatically create user accounts in the Grafana DB if one does not exist. If set to false, users who do not exist in the GrafanaDB won’t be able to log in, even though their username and password are valid. +* **ldap_sync_ttl**: When both auth.proxy and auth.ldap are enabled, user's organisation and role are synchronised from ldap after the http proxy authentication. You can force ldap re-synchronisation after `ldap_sync_ttl` minutes. +* **whitelist**: Comma separated list of trusted authentication proxies IP. With a fresh install of Grafana, using the above configuration for the authProxy feature, we can send a simple API call to list all users. The only user that will be present is the default “Admin” user that is added the first time Grafana starts up. As you can see all we need to do to authenticate the request is to provide the “X-WEBAUTH-USER” header. diff --git a/jest.config.js b/jest.config.js index cbf4928f926..ead97e39dad 100644 --- a/jest.config.js +++ b/jest.config.js @@ -1,10 +1,15 @@ module.exports = { verbose: false, + "globals": { + "ts-jest": { + "tsConfigFile": "tsconfig.json" + } + }, "transform": { "^.+\\.tsx?$": "/node_modules/ts-jest/preprocessor.js" }, - "moduleDirectories": ["/node_modules", "/public"], + "moduleDirectories": ["node_modules", "public"], "roots": [ "/public" ], diff --git a/package.json b/package.json index fa6788a5c62..8488f7b1b0b 100644 --- a/package.json +++ b/package.json @@ -95,12 +95,12 @@ "zone.js": "^0.7.2" }, "scripts": { - "dev": "node ./node_modules/.bin/webpack --progress --colors --config scripts/webpack/webpack.dev.js", - "watch": "node ./node_modules/.bin/webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js", - "build": "node ./node_modules/.bin/grunt build", - "test": "node ./node_modules/.bin/grunt test", - "test:coverage": "node ./node_modules/.bin/grunt test --coverage=true", - "lint": "node ./node_modules/.bin/tslint -c tslint.json --project tsconfig.json --type-check", + "dev": "webpack --progress --colors --config scripts/webpack/webpack.dev.js", + "watch": "webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js", + "build": "grunt build", + "test": "grunt test", + "test:coverage": "grunt test --coverage=true", + "lint": "tslint -c tslint.json --project tsconfig.json --type-check", "karma": "node ./node_modules/grunt-cli/bin/grunt karma:dev", "jest": "node ./node_modules/jest-cli/bin/jest.js --notify --watch", "precommit": "node ./node_modules/grunt-cli/bin/grunt precommit" diff --git a/packaging/publish/publish_both.sh b/packaging/publish/publish_both.sh index 0a76851f6fa..6c4f5a5c29a 100755 --- a/packaging/publish/publish_both.sh +++ b/packaging/publish/publish_both.sh @@ -1,5 +1,5 @@ #! /usr/bin/env bash -version=4.5.2 +version=4.6.2 wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb diff --git a/pkg/api/annotations.go b/pkg/api/annotations.go index e6454e9cf86..32a0a3035d3 100644 --- a/pkg/api/annotations.go +++ b/pkg/api/annotations.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/middleware" "github.com/grafana/grafana/pkg/services/annotations" + "github.com/grafana/grafana/pkg/util" ) func GetAnnotations(c *middleware.Context) Response { @@ -75,9 +76,11 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response return ApiError(500, "Failed to save annotation", err) } + startID := item.Id + // handle regions if cmd.IsRegion { - item.RegionId = item.Id + item.RegionId = startID if item.Data == nil { item.Data = simplejson.New() @@ -93,9 +96,18 @@ func PostAnnotation(c *middleware.Context, cmd dtos.PostAnnotationsCmd) Response if err := repo.Save(&item); err != nil { return ApiError(500, "Failed save annotation for region end time", err) } + + return Json(200, util.DynMap{ + "message": "Annotation added", + "id": startID, + "endId": item.Id, + }) } - return ApiSuccess("Annotation added") + return Json(200, util.DynMap{ + "message": "Annotation added", + "id": startID, + }) } func formatGraphiteAnnotation(what string, data string) string { @@ -154,7 +166,10 @@ func PostGraphiteAnnotation(c *middleware.Context, cmd dtos.PostGraphiteAnnotati return ApiError(500, "Failed to save Graphite annotation", err) } - return ApiSuccess("Graphite annotation added") + return Json(200, util.DynMap{ + "message": "Graphite annotation added", + "id": item.Id, + }) } func UpdateAnnotation(c *middleware.Context, cmd dtos.UpdateAnnotationsCmd) Response { diff --git a/pkg/api/api.go b/pkg/api/api.go index 957ee1e23e6..b707dc17e21 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -212,10 +212,10 @@ func (hs *HttpServer) registerRoutes() { // Data sources apiRoute.Group("/datasources", func(datasourceRoute RouteRegister) { datasourceRoute.Get("/", wrap(GetDataSources)) - datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource) + datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), wrap(AddDataSource)) datasourceRoute.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource)) - datasourceRoute.Delete("/:id", DeleteDataSourceById) - datasourceRoute.Delete("/name/:name", DeleteDataSourceByName) + datasourceRoute.Delete("/:id", wrap(DeleteDataSourceById)) + datasourceRoute.Delete("/name/:name", wrap(DeleteDataSourceByName)) datasourceRoute.Get("/:id", wrap(GetDataSourceById)) datasourceRoute.Get("/name/:name", wrap(GetDataSourceByName)) }, reqOrgAdmin) @@ -340,8 +340,8 @@ func (hs *HttpServer) registerRoutes() { r.Any("/api/gnet/*", reqSignedIn, ProxyGnetRequest) // Gravatar service. - avt := avatar.CacheServer() - r.Get("/avatar/:hash", avt.ServeHTTP) + avatarCacheServer := avatar.NewCacheServer() + r.Get("/avatar/:hash", avatarCacheServer.Handler) // Websocket r.Any("/ws", hs.streamManager.Serve) diff --git a/pkg/api/avatar/avatar.go b/pkg/api/avatar/avatar.go index 80280fd3cc9..fdf93d06b5d 100644 --- a/pkg/api/avatar/avatar.go +++ b/pkg/api/avatar/avatar.go @@ -24,6 +24,7 @@ import ( "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/setting" + "gopkg.in/macaron.v1" ) var gravatarSource string @@ -89,12 +90,12 @@ func (this *Avatar) Update() (err error) { return err } -type service struct { +type CacheServer struct { notFound *Avatar cache map[string]*Avatar } -func (this *service) mustInt(r *http.Request, defaultValue int, keys ...string) (v int) { +func (this *CacheServer) mustInt(r *http.Request, defaultValue int, keys ...string) (v int) { for _, k := range keys { if _, err := fmt.Sscanf(r.FormValue(k), "%d", &v); err == nil { defaultValue = v @@ -103,8 +104,8 @@ func (this *service) mustInt(r *http.Request, defaultValue int, keys ...string) return defaultValue } -func (this *service) ServeHTTP(w http.ResponseWriter, r *http.Request) { - urlPath := r.URL.Path +func (this *CacheServer) Handler(ctx *macaron.Context) { + urlPath := ctx.Req.URL.Path hash := urlPath[strings.LastIndex(urlPath, "/")+1:] var avatar *Avatar @@ -126,20 +127,24 @@ func (this *service) ServeHTTP(w http.ResponseWriter, r *http.Request) { this.cache[hash] = avatar } - w.Header().Set("Content-Type", "image/jpeg") - w.Header().Set("Content-Length", strconv.Itoa(len(avatar.data.Bytes()))) - w.Header().Set("Cache-Control", "private, max-age=3600") + ctx.Resp.Header().Add("Content-Type", "image/jpeg") - if err := avatar.Encode(w); err != nil { + if !setting.EnableGzip { + ctx.Resp.Header().Add("Content-Length", strconv.Itoa(len(avatar.data.Bytes()))) + } + + ctx.Resp.Header().Add("Cache-Control", "private, max-age=3600") + + if err := avatar.Encode(ctx.Resp); err != nil { log.Warn("avatar encode error: %v", err) - w.WriteHeader(500) + ctx.WriteHeader(500) } } -func CacheServer() http.Handler { +func NewCacheServer() *CacheServer { UpdateGravatarSource() - return &service{ + return &CacheServer{ notFound: newNotFound(), cache: make(map[string]*Avatar), } diff --git a/pkg/api/datasources.go b/pkg/api/datasources.go index 747ef8f25e6..b5c5f9cb834 100644 --- a/pkg/api/datasources.go +++ b/pkg/api/datasources.go @@ -33,6 +33,7 @@ func GetDataSources(c *middleware.Context) Response { BasicAuth: ds.BasicAuth, IsDefault: ds.IsDefault, JsonData: ds.JsonData, + ReadOnly: ds.ReadOnly, } if plugin, exists := plugins.DataSources[ds.Type]; exists { @@ -68,59 +69,70 @@ func GetDataSourceById(c *middleware.Context) Response { return Json(200, &dtos) } -func DeleteDataSourceById(c *middleware.Context) { +func DeleteDataSourceById(c *middleware.Context) Response { id := c.ParamsInt64(":id") if id <= 0 { - c.JsonApiErr(400, "Missing valid datasource id", nil) - return + return ApiError(400, "Missing valid datasource id", nil) + } + + ds, err := getRawDataSourceById(id, c.OrgId) + if err != nil { + return ApiError(400, "Failed to delete datasource", nil) + } + + if ds.ReadOnly { + return ApiError(403, "Cannot delete read-only data source", nil) } cmd := &m.DeleteDataSourceByIdCommand{Id: id, OrgId: c.OrgId} - err := bus.Dispatch(cmd) + err = bus.Dispatch(cmd) if err != nil { - c.JsonApiErr(500, "Failed to delete datasource", err) - return + return ApiError(500, "Failed to delete datasource", err) } - c.JsonOK("Data source deleted") + return ApiSuccess("Data source deleted") } -func DeleteDataSourceByName(c *middleware.Context) { +func DeleteDataSourceByName(c *middleware.Context) Response { name := c.Params(":name") if name == "" { - c.JsonApiErr(400, "Missing valid datasource name", nil) - return + return ApiError(400, "Missing valid datasource name", nil) + } + + getCmd := &m.GetDataSourceByNameQuery{Name: name, OrgId: c.OrgId} + if err := bus.Dispatch(getCmd); err != nil { + return ApiError(500, "Failed to delete datasource", err) + } + + if getCmd.Result.ReadOnly { + return ApiError(403, "Cannot delete read-only data source", nil) } cmd := &m.DeleteDataSourceByNameCommand{Name: name, OrgId: c.OrgId} - err := bus.Dispatch(cmd) if err != nil { - c.JsonApiErr(500, "Failed to delete datasource", err) - return + return ApiError(500, "Failed to delete datasource", err) } - c.JsonOK("Data source deleted") + return ApiSuccess("Data source deleted") } -func AddDataSource(c *middleware.Context, cmd m.AddDataSourceCommand) { +func AddDataSource(c *middleware.Context, cmd m.AddDataSourceCommand) Response { cmd.OrgId = c.OrgId if err := bus.Dispatch(&cmd); err != nil { if err == m.ErrDataSourceNameExists { - c.JsonApiErr(409, err.Error(), err) - return + return ApiError(409, err.Error(), err) } - c.JsonApiErr(500, "Failed to add datasource", err) - return + return ApiError(500, "Failed to add datasource", err) } ds := convertModelToDtos(cmd.Result) - c.JSON(200, util.DynMap{ + return Json(200, util.DynMap{ "message": "Datasource added", "id": cmd.Result.Id, "name": cmd.Result.Name, @@ -160,11 +172,14 @@ func fillWithSecureJsonData(cmd *m.UpdateDataSourceCommand) error { } ds, err := getRawDataSourceById(cmd.Id, cmd.OrgId) - if err != nil { return err } + if ds.ReadOnly { + return m.ErrDatasourceIsReadOnly + } + secureJsonData := ds.SecureJsonData.Decrypt() for k, v := range secureJsonData { @@ -201,6 +216,7 @@ func GetDataSourceByName(c *middleware.Context) Response { } dtos := convertModelToDtos(query.Result) + dtos.ReadOnly = true return Json(200, &dtos) } @@ -242,6 +258,7 @@ func convertModelToDtos(ds *m.DataSource) dtos.DataSource { JsonData: ds.JsonData, SecureJsonFields: map[string]bool{}, Version: ds.Version, + ReadOnly: ds.ReadOnly, } for k, v := range ds.SecureJsonData { diff --git a/pkg/api/dtos/datasource.go b/pkg/api/dtos/datasource.go index 7cb36e61ab4..f760486c561 100644 --- a/pkg/api/dtos/datasource.go +++ b/pkg/api/dtos/datasource.go @@ -26,6 +26,7 @@ type DataSource struct { JsonData *simplejson.Json `json:"jsonData,omitempty"` SecureJsonFields map[string]bool `json:"secureJsonFields"` Version int `json:"version"` + ReadOnly bool `json:"readOnly"` } type DataSourceListItemDTO struct { @@ -42,6 +43,7 @@ type DataSourceListItemDTO struct { BasicAuth bool `json:"basicAuth"` IsDefault bool `json:"isDefault"` JsonData *simplejson.Json `json:"jsonData,omitempty"` + ReadOnly bool `json:"readOnly"` } type DataSourceList []DataSourceListItemDTO diff --git a/pkg/api/http_server.go b/pkg/api/http_server.go index 037d2ae98e5..eadaa117e86 100644 --- a/pkg/api/http_server.go +++ b/pkg/api/http_server.go @@ -146,12 +146,13 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron { m := macaron.New() m.Use(middleware.Logger()) - m.Use(middleware.Recovery()) if setting.EnableGzip { m.Use(middleware.Gziper()) } + m.Use(middleware.Recovery()) + for _, route := range plugins.StaticRoutes { pluginRoute := path.Join("/public/plugins/", route.PluginId) hs.log.Debug("Plugins: Adding route", "route", pluginRoute, "dir", route.Directory) diff --git a/pkg/api/route_register.go b/pkg/api/route_register.go index daa6f35e52c..76ebb633ca1 100644 --- a/pkg/api/route_register.go +++ b/pkg/api/route_register.go @@ -81,8 +81,6 @@ func (rr *routeRegister) Register(router Router) *macaron.Router { } func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) { - //inject tracing - h := make([]macaron.Handler, 0) for _, fn := range rr.namedMiddleware { h = append(h, fn(pattern)) diff --git a/pkg/cmd/grafana-server/main.go b/pkg/cmd/grafana-server/main.go index fa63f05efba..183e4b047cd 100644 --- a/pkg/cmd/grafana-server/main.go +++ b/pkg/cmd/grafana-server/main.go @@ -16,7 +16,6 @@ import ( "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/models" - "github.com/grafana/grafana/pkg/services/sqlstore" "github.com/grafana/grafana/pkg/setting" _ "github.com/grafana/grafana/pkg/services/alerting/conditions" @@ -88,11 +87,6 @@ func main() { server.Start() } -func initSql() { - sqlstore.NewEngine() - sqlstore.EnsureAdminUser() -} - func listenToSystemSignals(server models.GrafanaServer) { signalChan := make(chan os.Signal, 1) ignoreChan := make(chan os.Signal, 1) diff --git a/pkg/cmd/grafana-server/server.go b/pkg/cmd/grafana-server/server.go index 4bbabbe3273..1d3ac092734 100644 --- a/pkg/cmd/grafana-server/server.go +++ b/pkg/cmd/grafana-server/server.go @@ -9,6 +9,9 @@ import ( "strconv" "time" + "github.com/grafana/grafana/pkg/cmd/grafana-cli/logger" + "github.com/grafana/grafana/pkg/services/provisioning" + "golang.org/x/sync/errgroup" "github.com/grafana/grafana/pkg/api" @@ -21,7 +24,9 @@ import ( "github.com/grafana/grafana/pkg/services/cleanup" "github.com/grafana/grafana/pkg/services/notifications" "github.com/grafana/grafana/pkg/services/search" + "github.com/grafana/grafana/pkg/services/sqlstore" "github.com/grafana/grafana/pkg/setting" + "github.com/grafana/grafana/pkg/social" "github.com/grafana/grafana/pkg/tracing" ) @@ -54,12 +59,19 @@ func (g *GrafanaServerImpl) Start() { g.writePIDFile() initSql() + metrics.Init(setting.Cfg) search.Init() login.Init() social.NewOAuthService() plugins.Init() + if err := provisioning.StartUp(setting.DatasourcesPath); err != nil { + logger.Error("Failed to provision Grafana from config", "error", err) + g.Shutdown(1, "Startup failed") + return + } + closer, err := tracing.Init(setting.Cfg) if err != nil { g.log.Error("Tracing settings is not valid", "error", err) @@ -87,6 +99,11 @@ func (g *GrafanaServerImpl) Start() { g.startHttpServer() } +func initSql() { + sqlstore.NewEngine() + sqlstore.EnsureAdminUser() +} + func (g *GrafanaServerImpl) initLogging() { err := setting.NewConfigContext(&setting.CommandLineArgs{ Config: *configFile, diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 4b155ae3208..4d7de98f2ea 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -225,7 +225,7 @@ func init() { M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "api_dataproxy_request_all_milliseconds", - Help: "summary for dashboard search duration", + Help: "summary for dataproxy request duration", Namespace: exporterName, }) diff --git a/pkg/middleware/middleware_test.go b/pkg/middleware/middleware_test.go index a279743aed3..0d9e0e5b973 100644 --- a/pkg/middleware/middleware_test.go +++ b/pkg/middleware/middleware_test.go @@ -363,6 +363,7 @@ type scenarioContext struct { respJson map[string]interface{} handlerFunc handlerFunc defaultHandler macaron.Handler + url string req *http.Request } diff --git a/pkg/middleware/recovery.go b/pkg/middleware/recovery.go index b63bc623549..0c9dc4670e2 100644 --- a/pkg/middleware/recovery.go +++ b/pkg/middleware/recovery.go @@ -123,23 +123,22 @@ func Recovery() macaron.Handler { c.Data["ErrorMsg"] = string(stack) } - c.HTML(500, "500") + ctx, ok := c.Data["ctx"].(*Context) - // // Lookup the current responsewriter - // val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil))) - // res := val.Interface().(http.ResponseWriter) - // - // // respond with panic message while in development mode - // var body []byte - // if setting.Env == setting.DEV { - // res.Header().Set("Content-Type", "text/html") - // body = []byte(fmt.Sprintf(panicHtml, err, err, stack)) - // } - // - // res.WriteHeader(http.StatusInternalServerError) - // if nil != body { - // res.Write(body) - // } + if ok && ctx.IsApiRequest() { + resp := make(map[string]interface{}) + resp["message"] = "Internal Server Error - Check the Grafana server logs for the detailed error message." + + if c.Data["ErrorMsg"] != nil { + resp["error"] = fmt.Sprintf("%v - %v", c.Data["Title"], c.Data["ErrorMsg"]) + } else { + resp["error"] = c.Data["Title"] + } + + c.JSON(500, resp) + } else { + c.HTML(500, "500") + } } }() diff --git a/pkg/middleware/recovery_test.go b/pkg/middleware/recovery_test.go new file mode 100644 index 00000000000..299186945ee --- /dev/null +++ b/pkg/middleware/recovery_test.go @@ -0,0 +1,79 @@ +package middleware + +import ( + "path/filepath" + "testing" + + "github.com/go-macaron/session" + "github.com/grafana/grafana/pkg/bus" + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +func TestRecoveryMiddleware(t *testing.T) { + Convey("Given an api route that panics", t, func() { + apiUrl := "/api/whatever" + recoveryScenario("recovery middleware should return json", apiUrl, func(sc *scenarioContext) { + sc.handlerFunc = PanicHandler + sc.fakeReq("GET", apiUrl).exec() + sc.req.Header.Add("content-type", "application/json") + + So(sc.resp.Code, ShouldEqual, 500) + So(sc.respJson["message"], ShouldStartWith, "Internal Server Error - Check the Grafana server logs for the detailed error message.") + So(sc.respJson["error"], ShouldStartWith, "Server Error") + }) + }) + + Convey("Given a non-api route that panics", t, func() { + apiUrl := "/whatever" + recoveryScenario("recovery middleware should return html", apiUrl, func(sc *scenarioContext) { + sc.handlerFunc = PanicHandler + sc.fakeReq("GET", apiUrl).exec() + + So(sc.resp.Code, ShouldEqual, 500) + So(sc.resp.Header().Get("content-type"), ShouldEqual, "text/html; charset=UTF-8") + So(sc.resp.Body.String(), ShouldContainSubstring, "Grafana - Error") + }) + }) +} + +func PanicHandler(c *Context) { + panic("Handler has panicked") +} + +func recoveryScenario(desc string, url string, fn scenarioFunc) { + Convey(desc, func() { + defer bus.ClearBusHandlers() + + sc := &scenarioContext{ + url: url, + } + viewsPath, _ := filepath.Abs("../../public/views") + + sc.m = macaron.New() + sc.m.Use(Recovery()) + + sc.m.Use(macaron.Renderer(macaron.RenderOptions{ + Directory: viewsPath, + Delims: macaron.Delims{Left: "[[", Right: "]]"}, + })) + + sc.m.Use(GetContextHandler()) + // mock out gc goroutine + startSessionGC = func() {} + sc.m.Use(Sessioner(&session.Options{})) + sc.m.Use(OrgRedirect()) + sc.m.Use(AddDefaultResponseHeaders()) + + sc.defaultHandler = func(c *Context) { + sc.context = c + if sc.handlerFunc != nil { + sc.handlerFunc(sc.context) + } + } + + sc.m.Get(url, sc.defaultHandler) + + fn(sc) + }) +} diff --git a/pkg/models/dashboard_version.go b/pkg/models/dashboard_version.go index 06b5797e57c..4acb4282a58 100644 --- a/pkg/models/dashboard_version.go +++ b/pkg/models/dashboard_version.go @@ -69,3 +69,10 @@ type GetDashboardVersionsQuery struct { Result []*DashboardVersionDTO } + +// +// Commands +// + +type DeleteExpiredVersionsCommand struct { +} diff --git a/pkg/models/datasource.go b/pkg/models/datasource.go index 7fc8f935124..9c1cb6fe9e2 100644 --- a/pkg/models/datasource.go +++ b/pkg/models/datasource.go @@ -27,6 +27,7 @@ var ( ErrDataSourceNotFound = errors.New("Data source not found") ErrDataSourceNameExists = errors.New("Data source with same name already exists") ErrDataSourceUpdatingOldVersion = errors.New("Trying to update old version of datasource") + ErrDatasourceIsReadOnly = errors.New("Data source is readonly. Can only be updated from configuration.") ) type DsAccess string @@ -50,6 +51,7 @@ type DataSource struct { IsDefault bool JsonData *simplejson.Json SecureJsonData securejsondata.SecureJsonData + ReadOnly bool Created time.Time Updated time.Time @@ -109,6 +111,7 @@ type AddDataSourceCommand struct { IsDefault bool `json:"isDefault"` JsonData *simplejson.Json `json:"jsonData"` SecureJsonData map[string]string `json:"secureJsonData"` + ReadOnly bool `json:"readOnly"` OrgId int64 `json:"-"` @@ -132,6 +135,7 @@ type UpdateDataSourceCommand struct { JsonData *simplejson.Json `json:"jsonData"` SecureJsonData map[string]string `json:"secureJsonData"` Version int `json:"version"` + ReadOnly bool `json:"readOnly"` OrgId int64 `json:"-"` Id int64 `json:"-"` @@ -142,11 +146,15 @@ type UpdateDataSourceCommand struct { type DeleteDataSourceByIdCommand struct { Id int64 OrgId int64 + + DeletedDatasourcesCount int64 } type DeleteDataSourceByNameCommand struct { Name string OrgId int64 + + DeletedDatasourcesCount int64 } // --------------------- @@ -157,6 +165,10 @@ type GetDataSourcesQuery struct { Result []*DataSource } +type GetAllDataSourcesQuery struct { + Result []*DataSource +} + type GetDataSourceByIdQuery struct { Id int64 OrgId int64 diff --git a/pkg/services/alerting/notifiers/teams.go b/pkg/services/alerting/notifiers/teams.go new file mode 100644 index 00000000000..200a8594428 --- /dev/null +++ b/pkg/services/alerting/notifiers/teams.go @@ -0,0 +1,122 @@ +package notifiers + +import ( + "encoding/json" + + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/log" + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/services/alerting" +) + +func init() { + alerting.RegisterNotifier(&alerting.NotifierPlugin{ + Type: "teams", + Name: "Microsoft Teams", + Description: "Sends notifications using Incomming Webhook connector to Microsoft Teams", + Factory: NewTeamsNotifier, + OptionsTemplate: ` +

Teams settings

+
+ Url + +
+ `, + }) + +} + +func NewTeamsNotifier(model *m.AlertNotification) (alerting.Notifier, error) { + url := model.Settings.Get("url").MustString() + if url == "" { + return nil, alerting.ValidationError{Reason: "Could not find url property in settings"} + } + + return &TeamsNotifier{ + NotifierBase: NewNotifierBase(model.Id, model.IsDefault, model.Name, model.Type, model.Settings), + Url: url, + log: log.New("alerting.notifier.teams"), + }, nil +} + +type TeamsNotifier struct { + NotifierBase + Url string + Recipient string + Mention string + log log.Logger +} + +func (this *TeamsNotifier) Notify(evalContext *alerting.EvalContext) error { + this.log.Info("Executing teams notification", "ruleId", evalContext.Rule.Id, "notification", this.Name) + + ruleUrl, err := evalContext.GetRuleUrl() + if err != nil { + this.log.Error("Failed get rule link", "error", err) + return err + } + + fields := make([]map[string]interface{}, 0) + fieldLimitCount := 4 + for index, evt := range evalContext.EvalMatches { + fields = append(fields, map[string]interface{}{ + "name": evt.Metric, + "value": evt.Value, + }) + if index > fieldLimitCount { + break + } + } + + if evalContext.Error != nil { + fields = append(fields, map[string]interface{}{ + "name": "Error message", + "value": evalContext.Error.Error(), + }) + } + + message := this.Mention + if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok. + message += " " + evalContext.Rule.Message + } + + body := map[string]interface{}{ + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "summary": message, + "title": evalContext.GetNotificationTitle(), + "themeColor": evalContext.GetStateModel().Color, + "sections": []map[string]interface{}{ + { + "title": "Details", + "facts": fields, + "images": []map[string]interface{}{ + { + "image": evalContext.ImagePublicUrl, + }, + }, + "text": message, + "potentialAction": []map[string]interface{}{ + { + "@context": "http://schema.org", + "@type": "ViewAction", + "name": "View Rule", + "target": []string{ + ruleUrl, + }, + }, + }, + }, + }, + } + + data, _ := json.Marshal(&body) + cmd := &m.SendWebhookSync{Url: this.Url, Body: string(data)} + + if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil { + this.log.Error("Failed to send teams notification", "error", err, "webhook", this.Name) + return err + } + + return nil +} diff --git a/pkg/services/alerting/notifiers/teams_test.go b/pkg/services/alerting/notifiers/teams_test.go new file mode 100644 index 00000000000..a9647350736 --- /dev/null +++ b/pkg/services/alerting/notifiers/teams_test.go @@ -0,0 +1,75 @@ +package notifiers + +import ( + "testing" + + "github.com/grafana/grafana/pkg/components/simplejson" + m "github.com/grafana/grafana/pkg/models" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTeamsNotifier(t *testing.T) { + Convey("Teams notifier tests", t, func() { + + Convey("Parsing alert notification from settings", func() { + Convey("empty settings should return error", func() { + json := `{ }` + + settingsJSON, _ := simplejson.NewJson([]byte(json)) + model := &m.AlertNotification{ + Name: "ops", + Type: "teams", + Settings: settingsJSON, + } + + _, err := NewTeamsNotifier(model) + So(err, ShouldNotBeNil) + }) + + Convey("from settings", func() { + json := ` + { + "url": "http://google.com" + }` + + settingsJSON, _ := simplejson.NewJson([]byte(json)) + model := &m.AlertNotification{ + Name: "ops", + Type: "teams", + Settings: settingsJSON, + } + + not, err := NewTeamsNotifier(model) + teamsNotifier := not.(*TeamsNotifier) + + So(err, ShouldBeNil) + So(teamsNotifier.Name, ShouldEqual, "ops") + So(teamsNotifier.Type, ShouldEqual, "teams") + So(teamsNotifier.Url, ShouldEqual, "http://google.com") + }) + + Convey("from settings with Recipient and Mention", func() { + json := ` + { + "url": "http://google.com" + }` + + settingsJSON, _ := simplejson.NewJson([]byte(json)) + model := &m.AlertNotification{ + Name: "ops", + Type: "teams", + Settings: settingsJSON, + } + + not, err := NewTeamsNotifier(model) + teamsNotifier := not.(*TeamsNotifier) + + So(err, ShouldBeNil) + So(teamsNotifier.Name, ShouldEqual, "ops") + So(teamsNotifier.Type, ShouldEqual, "teams") + So(teamsNotifier.Url, ShouldEqual, "http://google.com") + }) + + }) + }) +} diff --git a/pkg/services/cleanup/cleanup.go b/pkg/services/cleanup/cleanup.go index ffaa75de9cc..6e5e7684100 100644 --- a/pkg/services/cleanup/cleanup.go +++ b/pkg/services/cleanup/cleanup.go @@ -39,12 +39,13 @@ func (service *CleanUpService) Run(ctx context.Context) error { func (service *CleanUpService) start(ctx context.Context) error { service.cleanUpTmpFiles() - ticker := time.NewTicker(time.Hour * 1) + ticker := time.NewTicker(time.Minute * 10) for { select { case <-ticker.C: service.cleanUpTmpFiles() service.deleteExpiredSnapshots() + service.deleteExpiredDashboardVersions() case <-ctx.Done(): return ctx.Err() } @@ -83,3 +84,7 @@ func (service *CleanUpService) cleanUpTmpFiles() { func (service *CleanUpService) deleteExpiredSnapshots() { bus.Dispatch(&m.DeleteExpiredSnapshotsCommand{}) } + +func (service *CleanUpService) deleteExpiredDashboardVersions() { + bus.Dispatch(&m.DeleteExpiredVersionsCommand{}) +} diff --git a/pkg/services/provisioning/datasources/datasources.go b/pkg/services/provisioning/datasources/datasources.go new file mode 100644 index 00000000000..325dbbbd757 --- /dev/null +++ b/pkg/services/provisioning/datasources/datasources.go @@ -0,0 +1,148 @@ +package datasources + +import ( + "errors" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/grafana/grafana/pkg/bus" + + "github.com/grafana/grafana/pkg/log" + + "github.com/grafana/grafana/pkg/models" + yaml "gopkg.in/yaml.v2" +) + +var ( + ErrInvalidConfigToManyDefault = errors.New("datasource.yaml config is invalid. Only one datasource can be marked as default") +) + +func Provision(configDirectory string) error { + dc := newDatasourceProvisioner(log.New("provisioning.datasources")) + return dc.applyChanges(configDirectory) +} + +type DatasourceProvisioner struct { + log log.Logger + cfgProvider configReader +} + +func newDatasourceProvisioner(log log.Logger) DatasourceProvisioner { + return DatasourceProvisioner{ + log: log, + cfgProvider: configReader{}, + } +} + +func (dc *DatasourceProvisioner) apply(cfg *DatasourcesAsConfig) error { + if err := dc.deleteDatasources(cfg.DeleteDatasources); err != nil { + return err + } + + for _, ds := range cfg.Datasources { + cmd := &models.GetDataSourceByNameQuery{OrgId: ds.OrgId, Name: ds.Name} + err := bus.Dispatch(cmd) + if err != nil && err != models.ErrDataSourceNotFound { + return err + } + + if err == models.ErrDataSourceNotFound { + dc.log.Info("inserting datasource from configuration ", "name", ds.Name) + insertCmd := createInsertCommand(ds) + if err := bus.Dispatch(insertCmd); err != nil { + return err + } + } else { + dc.log.Debug("updating datasource from configuration", "name", ds.Name) + updateCmd := createUpdateCommand(ds, cmd.Result.Id) + if err := bus.Dispatch(updateCmd); err != nil { + return err + } + } + } + + return nil +} + +func (dc *DatasourceProvisioner) applyChanges(configPath string) error { + configs, err := dc.cfgProvider.readConfig(configPath) + if err != nil { + return err + } + + for _, cfg := range configs { + if err := dc.apply(cfg); err != nil { + return err + } + } + + return nil +} + +func (dc *DatasourceProvisioner) deleteDatasources(dsToDelete []*DeleteDatasourceConfig) error { + for _, ds := range dsToDelete { + cmd := &models.DeleteDataSourceByNameCommand{OrgId: ds.OrgId, Name: ds.Name} + if err := bus.Dispatch(cmd); err != nil { + return err + } + + if cmd.DeletedDatasourcesCount > 0 { + dc.log.Info("deleted datasource based on configuration", "name", ds.Name) + } + } + + return nil +} + +type configReader struct{} + +func (configReader) readConfig(path string) ([]*DatasourcesAsConfig, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var datasources []*DatasourcesAsConfig + for _, file := range files { + if strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") { + filename, _ := filepath.Abs(filepath.Join(path, file.Name())) + yamlFile, err := ioutil.ReadFile(filename) + + if err != nil { + return nil, err + } + var datasource *DatasourcesAsConfig + err = yaml.Unmarshal(yamlFile, &datasource) + if err != nil { + return nil, err + } + + datasources = append(datasources, datasource) + } + } + + defaultCount := 0 + for _, cfg := range datasources { + for _, ds := range cfg.Datasources { + if ds.OrgId == 0 { + ds.OrgId = 1 + } + + if ds.IsDefault { + defaultCount++ + if defaultCount > 1 { + return nil, ErrInvalidConfigToManyDefault + } + } + } + + for _, ds := range cfg.DeleteDatasources { + if ds.OrgId == 0 { + ds.OrgId = 1 + } + } + } + + return datasources, nil +} diff --git a/pkg/services/provisioning/datasources/datasources_test.go b/pkg/services/provisioning/datasources/datasources_test.go new file mode 100644 index 00000000000..f3252c28d9d --- /dev/null +++ b/pkg/services/provisioning/datasources/datasources_test.go @@ -0,0 +1,202 @@ +package datasources + +import ( + "testing" + + "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/models" + + . "github.com/smartystreets/goconvey/convey" +) + +var ( + logger log.Logger = log.New("fake.logger") + oneDatasourcesConfig string = "" + twoDatasourcesConfig string = "./test-configs/two-datasources" + twoDatasourcesConfigPurgeOthers string = "./test-configs/insert-two-delete-two" + doubleDatasourcesConfig string = "./test-configs/double-default" + allProperties string = "./test-configs/all-properties" + brokenYaml string = "./test-configs/broken-yaml" + + fakeRepo *fakeRepository +) + +func TestDatasourceAsConfig(t *testing.T) { + Convey("Testing datasource as configuration", t, func() { + fakeRepo = &fakeRepository{} + bus.ClearBusHandlers() + bus.AddHandler("test", mockDelete) + bus.AddHandler("test", mockInsert) + bus.AddHandler("test", mockUpdate) + bus.AddHandler("test", mockGet) + bus.AddHandler("test", mockGetAll) + + Convey("One configured datasource", func() { + Convey("no datasource in database", func() { + dc := newDatasourceProvisioner(logger) + err := dc.applyChanges(twoDatasourcesConfig) + if err != nil { + t.Fatalf("applyChanges return an error %v", err) + } + + So(len(fakeRepo.deleted), ShouldEqual, 0) + So(len(fakeRepo.inserted), ShouldEqual, 2) + So(len(fakeRepo.updated), ShouldEqual, 0) + }) + + Convey("One datasource in database with same name", func() { + fakeRepo.loadAll = []*models.DataSource{ + {Name: "Graphite", OrgId: 1, Id: 1}, + } + + Convey("should update one datasource", func() { + dc := newDatasourceProvisioner(logger) + err := dc.applyChanges(twoDatasourcesConfig) + if err != nil { + t.Fatalf("applyChanges return an error %v", err) + } + + So(len(fakeRepo.deleted), ShouldEqual, 0) + So(len(fakeRepo.inserted), ShouldEqual, 1) + So(len(fakeRepo.updated), ShouldEqual, 1) + }) + }) + + Convey("Two datasources with is_default", func() { + dc := newDatasourceProvisioner(logger) + err := dc.applyChanges(doubleDatasourcesConfig) + Convey("should raise error", func() { + So(err, ShouldEqual, ErrInvalidConfigToManyDefault) + }) + }) + }) + + Convey("Two configured datasource and purge others ", func() { + Convey("two other datasources in database", func() { + fakeRepo.loadAll = []*models.DataSource{ + {Name: "old-graphite", OrgId: 1, Id: 1}, + {Name: "old-graphite2", OrgId: 1, Id: 2}, + } + + Convey("should have two new datasources", func() { + dc := newDatasourceProvisioner(logger) + err := dc.applyChanges(twoDatasourcesConfigPurgeOthers) + if err != nil { + t.Fatalf("applyChanges return an error %v", err) + } + + So(len(fakeRepo.deleted), ShouldEqual, 2) + So(len(fakeRepo.inserted), ShouldEqual, 2) + So(len(fakeRepo.updated), ShouldEqual, 0) + }) + }) + }) + + Convey("Two configured datasource and purge others = false", func() { + Convey("two other datasources in database", func() { + fakeRepo.loadAll = []*models.DataSource{ + {Name: "Graphite", OrgId: 1, Id: 1}, + {Name: "old-graphite2", OrgId: 1, Id: 2}, + } + + Convey("should have two new datasources", func() { + dc := newDatasourceProvisioner(logger) + err := dc.applyChanges(twoDatasourcesConfig) + if err != nil { + t.Fatalf("applyChanges return an error %v", err) + } + + So(len(fakeRepo.deleted), ShouldEqual, 0) + So(len(fakeRepo.inserted), ShouldEqual, 1) + So(len(fakeRepo.updated), ShouldEqual, 1) + }) + }) + }) + + Convey("broken yaml should return error", func() { + _, err := configReader{}.readConfig(brokenYaml) + So(err, ShouldNotBeNil) + }) + + Convey("can read all properties", func() { + cfgProvifer := configReader{} + cfg, err := cfgProvifer.readConfig(allProperties) + if err != nil { + t.Fatalf("readConfig return an error %v", err) + } + + So(len(cfg), ShouldEqual, 2) + + dsCfg := cfg[0] + ds := dsCfg.Datasources[0] + + So(ds.Name, ShouldEqual, "name") + So(ds.Type, ShouldEqual, "type") + So(ds.Access, ShouldEqual, models.DS_ACCESS_PROXY) + So(ds.OrgId, ShouldEqual, 2) + So(ds.Url, ShouldEqual, "url") + So(ds.User, ShouldEqual, "user") + So(ds.Password, ShouldEqual, "password") + So(ds.Database, ShouldEqual, "database") + So(ds.BasicAuth, ShouldBeTrue) + So(ds.BasicAuthUser, ShouldEqual, "basic_auth_user") + So(ds.BasicAuthPassword, ShouldEqual, "basic_auth_password") + So(ds.WithCredentials, ShouldBeTrue) + So(ds.IsDefault, ShouldBeTrue) + So(ds.Editable, ShouldBeTrue) + + So(len(ds.JsonData), ShouldBeGreaterThan, 2) + So(ds.JsonData["graphiteVersion"], ShouldEqual, "1.1") + So(ds.JsonData["tlsAuth"], ShouldEqual, true) + So(ds.JsonData["tlsAuthWithCACert"], ShouldEqual, true) + + So(len(ds.SecureJsonData), ShouldBeGreaterThan, 2) + So(ds.SecureJsonData["tlsCACert"], ShouldEqual, "MjNOcW9RdkbUDHZmpco2HCYzVq9dE+i6Yi+gmUJotq5CDA==") + So(ds.SecureJsonData["tlsClientCert"], ShouldEqual, "ckN0dGlyMXN503YNfjTcf9CV+GGQneN+xmAclQ==") + So(ds.SecureJsonData["tlsClientKey"], ShouldEqual, "ZkN4aG1aNkja/gKAB1wlnKFIsy2SRDq4slrM0A==") + + dstwo := cfg[1].Datasources[0] + So(dstwo.Name, ShouldEqual, "name2") + }) + }) +} + +type fakeRepository struct { + inserted []*models.AddDataSourceCommand + deleted []*models.DeleteDataSourceByNameCommand + updated []*models.UpdateDataSourceCommand + + loadAll []*models.DataSource +} + +func mockDelete(cmd *models.DeleteDataSourceByNameCommand) error { + fakeRepo.deleted = append(fakeRepo.deleted, cmd) + return nil +} + +func mockUpdate(cmd *models.UpdateDataSourceCommand) error { + fakeRepo.updated = append(fakeRepo.updated, cmd) + return nil +} + +func mockInsert(cmd *models.AddDataSourceCommand) error { + fakeRepo.inserted = append(fakeRepo.inserted, cmd) + return nil +} + +func mockGetAll(cmd *models.GetAllDataSourcesQuery) error { + cmd.Result = fakeRepo.loadAll + return nil +} + +func mockGet(cmd *models.GetDataSourceByNameQuery) error { + for _, v := range fakeRepo.loadAll { + if cmd.Name == v.Name && cmd.OrgId == v.OrgId { + cmd.Result = v + return nil + } + } + + return models.ErrDataSourceNotFound +} diff --git a/pkg/services/provisioning/datasources/test-configs/all-properties/all-properties.yaml b/pkg/services/provisioning/datasources/test-configs/all-properties/all-properties.yaml new file mode 100644 index 00000000000..af0d3009a4c --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/all-properties/all-properties.yaml @@ -0,0 +1,23 @@ +datasources: + - name: name + type: type + access: proxy + org_id: 2 + url: url + password: password + user: user + database: database + basic_auth: true + basic_auth_user: basic_auth_user + basic_auth_password: basic_auth_password + with_credentials: true + is_default: true + json_data: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + secure_json_data: + tlsCACert: "MjNOcW9RdkbUDHZmpco2HCYzVq9dE+i6Yi+gmUJotq5CDA==" + tlsClientCert: "ckN0dGlyMXN503YNfjTcf9CV+GGQneN+xmAclQ==" + tlsClientKey: "ZkN4aG1aNkja/gKAB1wlnKFIsy2SRDq4slrM0A==" + editable: true diff --git a/pkg/services/provisioning/datasources/test-configs/all-properties/not.yaml.txt b/pkg/services/provisioning/datasources/test-configs/all-properties/not.yaml.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/services/provisioning/datasources/test-configs/all-properties/second.yaml b/pkg/services/provisioning/datasources/test-configs/all-properties/second.yaml new file mode 100644 index 00000000000..43c41ee9b3b --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/all-properties/second.yaml @@ -0,0 +1,7 @@ +purge_other_datasources: true +datasources: + - name: name2 + type: type2 + access: proxy + org_id: 2 + url: url2 diff --git a/pkg/services/provisioning/datasources/test-configs/broken-yaml/broken.yaml b/pkg/services/provisioning/datasources/test-configs/broken-yaml/broken.yaml new file mode 100644 index 00000000000..9050f543cef --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/broken-yaml/broken.yaml @@ -0,0 +1,6 @@ +#sfxzgnsxzcvnbzcvn +cvbn +cvbn +c +vbn +cvbncvbn \ No newline at end of file diff --git a/pkg/services/provisioning/datasources/test-configs/double-default/default-1.yaml b/pkg/services/provisioning/datasources/test-configs/double-default/default-1.yaml new file mode 100644 index 00000000000..f202d617fc9 --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/double-default/default-1.yaml @@ -0,0 +1,7 @@ +datasources: + - name: Graphite + type: graphite + access: proxy + url: http://localhost:8080 + is_default: true + diff --git a/pkg/services/provisioning/datasources/test-configs/double-default/default-2.yaml b/pkg/services/provisioning/datasources/test-configs/double-default/default-2.yaml new file mode 100644 index 00000000000..f202d617fc9 --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/double-default/default-2.yaml @@ -0,0 +1,7 @@ +datasources: + - name: Graphite + type: graphite + access: proxy + url: http://localhost:8080 + is_default: true + diff --git a/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/one-datasources.yaml b/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/one-datasources.yaml new file mode 100644 index 00000000000..a1a58dc6621 --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/one-datasources.yaml @@ -0,0 +1,7 @@ +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://localhost:9090 +delete_datasources: + - name: old-graphite diff --git a/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/two-datasources.yml b/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/two-datasources.yml new file mode 100644 index 00000000000..d9d151227c9 --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/insert-two-delete-two/two-datasources.yml @@ -0,0 +1,7 @@ +datasources: + - name: Graphite + type: graphite + access: proxy + url: http://localhost:8080 +delete_datasources: + - name: old-graphite3 diff --git a/pkg/services/provisioning/datasources/test-configs/two-datasources/two-datasources.yaml b/pkg/services/provisioning/datasources/test-configs/two-datasources/two-datasources.yaml new file mode 100644 index 00000000000..d555db3365b --- /dev/null +++ b/pkg/services/provisioning/datasources/test-configs/two-datasources/two-datasources.yaml @@ -0,0 +1,9 @@ +datasources: + - name: Graphite + type: graphite + access: proxy + url: http://localhost:8080 + - name: Prometheus + type: prometheus + access: proxy + url: http://localhost:9090 diff --git a/pkg/services/provisioning/datasources/test-configs/zero-datasources/placeholder-for-git b/pkg/services/provisioning/datasources/test-configs/zero-datasources/placeholder-for-git new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/services/provisioning/datasources/types.go b/pkg/services/provisioning/datasources/types.go new file mode 100644 index 00000000000..ee2175d6a90 --- /dev/null +++ b/pkg/services/provisioning/datasources/types.go @@ -0,0 +1,92 @@ +package datasources + +import "github.com/grafana/grafana/pkg/models" +import "github.com/grafana/grafana/pkg/components/simplejson" + +type DatasourcesAsConfig struct { + Datasources []*DataSourceFromConfig `json:"datasources" yaml:"datasources"` + DeleteDatasources []*DeleteDatasourceConfig `json:"delete_datasources" yaml:"delete_datasources"` +} + +type DeleteDatasourceConfig struct { + OrgId int64 `json:"org_id" yaml:"org_id"` + Name string `json:"name" yaml:"name"` +} + +type DataSourceFromConfig struct { + OrgId int64 `json:"org_id" yaml:"org_id"` + Version int `json:"version" yaml:"version"` + + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` + Access string `json:"access" yaml:"access"` + Url string `json:"url" yaml:"url"` + Password string `json:"password" yaml:"password"` + User string `json:"user" yaml:"user"` + Database string `json:"database" yaml:"database"` + BasicAuth bool `json:"basic_auth" yaml:"basic_auth"` + BasicAuthUser string `json:"basic_auth_user" yaml:"basic_auth_user"` + BasicAuthPassword string `json:"basic_auth_password" yaml:"basic_auth_password"` + WithCredentials bool `json:"with_credentials" yaml:"with_credentials"` + IsDefault bool `json:"is_default" yaml:"is_default"` + JsonData map[string]interface{} `json:"json_data" yaml:"json_data"` + SecureJsonData map[string]string `json:"secure_json_data" yaml:"secure_json_data"` + Editable bool `json:"editable" yaml:"editable"` +} + +func createInsertCommand(ds *DataSourceFromConfig) *models.AddDataSourceCommand { + jsonData := simplejson.New() + if len(ds.JsonData) > 0 { + for k, v := range ds.JsonData { + jsonData.Set(k, v) + } + } + + return &models.AddDataSourceCommand{ + OrgId: ds.OrgId, + Name: ds.Name, + Type: ds.Type, + Access: models.DsAccess(ds.Access), + Url: ds.Url, + Password: ds.Password, + User: ds.User, + Database: ds.Database, + BasicAuth: ds.BasicAuth, + BasicAuthUser: ds.BasicAuthUser, + BasicAuthPassword: ds.BasicAuthPassword, + WithCredentials: ds.WithCredentials, + IsDefault: ds.IsDefault, + JsonData: jsonData, + SecureJsonData: ds.SecureJsonData, + ReadOnly: !ds.Editable, + } +} + +func createUpdateCommand(ds *DataSourceFromConfig, id int64) *models.UpdateDataSourceCommand { + jsonData := simplejson.New() + if len(ds.JsonData) > 0 { + for k, v := range ds.JsonData { + jsonData.Set(k, v) + } + } + + return &models.UpdateDataSourceCommand{ + Id: id, + OrgId: ds.OrgId, + Name: ds.Name, + Type: ds.Type, + Access: models.DsAccess(ds.Access), + Url: ds.Url, + Password: ds.Password, + User: ds.User, + Database: ds.Database, + BasicAuth: ds.BasicAuth, + BasicAuthUser: ds.BasicAuthUser, + BasicAuthPassword: ds.BasicAuthPassword, + WithCredentials: ds.WithCredentials, + IsDefault: ds.IsDefault, + JsonData: jsonData, + SecureJsonData: ds.SecureJsonData, + ReadOnly: !ds.Editable, + } +} diff --git a/pkg/services/provisioning/provisioning.go b/pkg/services/provisioning/provisioning.go new file mode 100644 index 00000000000..1bea60f03e4 --- /dev/null +++ b/pkg/services/provisioning/provisioning.go @@ -0,0 +1,14 @@ +package provisioning + +import ( + "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/services/provisioning/datasources" +) + +var ( + logger log.Logger = log.New("services.provisioning") +) + +func StartUp(datasourcePath string) error { + return datasources.Provision(datasourcePath) +} diff --git a/pkg/services/search/models.go b/pkg/services/search/models.go index a95a5dc3d4a..2065cc3b36a 100644 --- a/pkg/services/search/models.go +++ b/pkg/services/search/models.go @@ -54,15 +54,16 @@ type Query struct { } type FindPersistedDashboardsQuery struct { - Title string - OrgId int64 - SignedInUser *models.SignedInUser - IsStarred bool - DashboardIds []int64 - Type string - FolderId int64 - Tags []string - Limit int + Title string + OrgId int64 + SignedInUser *models.SignedInUser + IsStarred bool + DashboardIds []int64 + Type string + FolderId int64 + Tags []string + ExpandedFolders []int64 + Limit int Result HitList } diff --git a/pkg/services/sqlstore/alert.go b/pkg/services/sqlstore/alert.go index 33a4cae53c2..73be7d774fd 100644 --- a/pkg/services/sqlstore/alert.go +++ b/pkg/services/sqlstore/alert.go @@ -94,7 +94,12 @@ func HandleAlertsQuery(query *m.GetAlertsQuery) error { if i > 0 { sql.WriteString(" OR ") } - sql.WriteString("state = ? ") + if strings.HasPrefix(v, "not_") { + sql.WriteString("state <> ? ") + v = strings.TrimPrefix(v, "not_") + } else { + sql.WriteString("state = ? ") + } params = append(params, v) } sql.WriteString(")") diff --git a/pkg/services/sqlstore/annotation_test.go b/pkg/services/sqlstore/annotation_test.go index 3f7415a952b..e1902b63fa8 100644 --- a/pkg/services/sqlstore/annotation_test.go +++ b/pkg/services/sqlstore/annotation_test.go @@ -37,16 +37,18 @@ func TestAnnotations(t *testing.T) { repo := SqlAnnotationRepo{} Convey("Can save annotation", func() { - err := repo.Save(&annotations.Item{ + annotation := &annotations.Item{ OrgId: 1, UserId: 1, DashboardId: 1, Text: "hello", Epoch: 10, Tags: []string{"outage", "error", "type:outage", "server:server-1"}, - }) + } + err := repo.Save(annotation) So(err, ShouldBeNil) + So(annotation.Id, ShouldBeGreaterThan, 0) Convey("Can query for annotation", func() { items, err := repo.Find(&annotations.ItemQuery{ diff --git a/pkg/services/sqlstore/dashboard.go b/pkg/services/sqlstore/dashboard.go index fcc49799def..ce4c74ac3df 100644 --- a/pkg/services/sqlstore/dashboard.go +++ b/pkg/services/sqlstore/dashboard.go @@ -1,9 +1,6 @@ package sqlstore import ( - "bytes" - "fmt" - "strings" "time" "github.com/grafana/grafana/pkg/bus" @@ -189,77 +186,39 @@ type DashboardSearchProjection struct { } func findDashboards(query *search.FindPersistedDashboardsQuery) ([]DashboardSearchProjection, error) { - var sql bytes.Buffer - params := make([]interface{}, 0) limit := query.Limit if limit == 0 { limit = 1000 } - sql.WriteString(` - SELECT - dashboard.id, - dashboard.title, - dashboard.slug, - dashboard_tag.term, - dashboard.is_folder, - dashboard.folder_id, - folder.slug as folder_slug, - folder.title as folder_title - FROM `) + sb := NewSearchBuilder(query.SignedInUser, limit). + WithTags(query.Tags). + WithDashboardIdsIn(query.DashboardIds) - // add tags filter - if len(query.Tags) > 0 { - sql.WriteString( - `( - SELECT - dashboard.id FROM dashboard - LEFT OUTER JOIN dashboard_tag ON dashboard_tag.dashboard_id = dashboard.id - `) - if query.IsStarred { - sql.WriteString(" INNER JOIN star on star.dashboard_id = dashboard.id") - } - - sql.WriteString(` WHERE dashboard_tag.term IN (?` + strings.Repeat(",?", len(query.Tags)-1) + `) AND `) - for _, tag := range query.Tags { - params = append(params, tag) - } - params = createSearchWhereClause(query, &sql, params) - fmt.Printf("params2 %v", params) - - // this ends the inner select (tag filtered part) - sql.WriteString(` - GROUP BY dashboard.id HAVING COUNT(dashboard.id) >= ? - LIMIT ?) as ids - INNER JOIN dashboard on ids.id = dashboard.id - `) - - params = append(params, len(query.Tags)) - params = append(params, limit) - } else { - sql.WriteString(`( SELECT dashboard.id FROM dashboard `) - if query.IsStarred { - sql.WriteString(" INNER JOIN star on star.dashboard_id = dashboard.id") - } - sql.WriteString(` WHERE `) - params = createSearchWhereClause(query, &sql, params) - - sql.WriteString(` - LIMIT ?) as ids - INNER JOIN dashboard on ids.id = dashboard.id - `) - params = append(params, limit) + if query.IsStarred { + sb.IsStarred() } - sql.WriteString(` - LEFT OUTER JOIN dashboard folder on folder.id = dashboard.folder_id - LEFT OUTER JOIN dashboard_tag on dashboard.id = dashboard_tag.dashboard_id`) + if len(query.Title) > 0 { + sb.WithTitle(query.Title) + } - sql.WriteString(fmt.Sprintf(" ORDER BY dashboard.title ASC LIMIT 5000")) + if len(query.Type) > 0 { + sb.WithType(query.Type) + } + + if query.FolderId > 0 { + sb.WithFolderId(query.FolderId) + } + + if len(query.ExpandedFolders) > 0 { + sb.WithExpandedFolders(query.ExpandedFolders) + } var res []DashboardSearchProjection - err := x.Sql(sql.String(), params...).Find(&res) + sql, params := sb.ToSql() + err := x.Sql(sql, params...).Find(&res) if err != nil { return nil, err } @@ -267,61 +226,6 @@ func findDashboards(query *search.FindPersistedDashboardsQuery) ([]DashboardSear return res, nil } -func createSearchWhereClause(query *search.FindPersistedDashboardsQuery, sql *bytes.Buffer, params []interface{}) []interface{} { - sql.WriteString(` dashboard.org_id=?`) - params = append(params, query.SignedInUser.OrgId) - - if query.IsStarred { - sql.WriteString(` AND star.user_id=?`) - params = append(params, query.SignedInUser.UserId) - } - - if len(query.DashboardIds) > 0 { - sql.WriteString(` AND dashboard.id IN (?` + strings.Repeat(",?", len(query.DashboardIds)-1) + `)`) - for _, dashboardId := range query.DashboardIds { - params = append(params, dashboardId) - } - } - - if query.SignedInUser.OrgRole != m.ROLE_ADMIN { - allowedDashboardsSubQuery := ` AND (dashboard.has_acl = 0 OR dashboard.id in ( - SELECT distinct d.id AS DashboardId - FROM dashboard AS d - LEFT JOIN dashboard_acl as da on d.folder_id = da.dashboard_id or d.id = da.dashboard_id - LEFT JOIN user_group_member as ugm on ugm.user_group_id = da.user_group_id - LEFT JOIN org_user ou on ou.role = da.role - WHERE - d.has_acl = 1 and - (da.user_id = ? or ugm.user_id = ? or ou.id is not null) - and d.org_id = ? - ) - )` - - sql.WriteString(allowedDashboardsSubQuery) - params = append(params, query.SignedInUser.UserId, query.SignedInUser.UserId, query.SignedInUser.OrgId) - } - - if len(query.Title) > 0 { - sql.WriteString(" AND dashboard.title " + dialect.LikeStr() + " ?") - params = append(params, "%"+query.Title+"%") - } - - if len(query.Type) > 0 && query.Type == "dash-folder" { - sql.WriteString(" AND dashboard.is_folder = 1") - } - - if len(query.Type) > 0 && query.Type == "dash-db" { - sql.WriteString(" AND dashboard.is_folder = 0") - } - - if query.FolderId > 0 { - sql.WriteString(" AND dashboard.folder_id = ?") - params = append(params, query.FolderId) - } - - return params -} - func SearchDashboards(query *search.FindPersistedDashboardsQuery) error { res, err := findDashboards(query) if err != nil { diff --git a/pkg/services/sqlstore/dashboard_test.go b/pkg/services/sqlstore/dashboard_test.go index e0473b0e38f..983b4ca1814 100644 --- a/pkg/services/sqlstore/dashboard_test.go +++ b/pkg/services/sqlstore/dashboard_test.go @@ -382,6 +382,19 @@ func TestDashboardDataAccess(t *testing.T) { currentUser := createUser("viewer", "Viewer", false) + Convey("and one folder is expanded, the other collapsed", func() { + Convey("should return dashboards in root and expanded folder", func() { + query := &search.FindPersistedDashboardsQuery{ExpandedFolders: []int64{folder1.Id}, SignedInUser: &m.SignedInUser{UserId: currentUser.Id, OrgId: 1}, OrgId: 1} + err := SearchDashboards(query) + So(err, ShouldBeNil) + So(len(query.Result), ShouldEqual, 4) + So(query.Result[0].Id, ShouldEqual, folder1.Id) + So(query.Result[1].Id, ShouldEqual, folder2.Id) + So(query.Result[2].Id, ShouldEqual, childDash1.Id) + So(query.Result[3].Id, ShouldEqual, dashInRoot.Id) + }) + }) + Convey("and acl is set for one dashboard folder", func() { var otherUser int64 = 999 updateTestDashboardWithAcl(folder1.Id, otherUser, m.PERMISSION_EDIT) diff --git a/pkg/services/sqlstore/dashboard_version.go b/pkg/services/sqlstore/dashboard_version.go index c5ef7d374f0..49c35397094 100644 --- a/pkg/services/sqlstore/dashboard_version.go +++ b/pkg/services/sqlstore/dashboard_version.go @@ -1,13 +1,17 @@ package sqlstore import ( + "strings" + "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" ) func init() { bus.AddHandler("sql", GetDashboardVersion) bus.AddHandler("sql", GetDashboardVersions) + bus.AddHandler("sql", DeleteExpiredVersions) } // GetDashboardVersion gets the dashboard version for the given dashboard ID and version number. @@ -62,3 +66,73 @@ func GetDashboardVersions(query *m.GetDashboardVersionsQuery) error { } return nil } + +func DeleteExpiredVersions(cmd *m.DeleteExpiredVersionsCommand) error { + return inTransaction(func(sess *DBSession) error { + expiredCount := int64(0) + versions := []DashboardVersionExp{} + versionsToKeep := setting.DashboardVersionsToKeep + + if versionsToKeep < 1 { + versionsToKeep = 1 + } + + err := sess.Table("dashboard_version"). + Select("dashboard_version.id, dashboard_version.version, dashboard_version.dashboard_id"). + Where(`dashboard_id IN ( + SELECT dashboard_id FROM dashboard_version + GROUP BY dashboard_id HAVING COUNT(dashboard_version.id) > ? + )`, versionsToKeep). + Desc("dashboard_version.dashboard_id", "dashboard_version.version"). + Find(&versions) + + if err != nil { + return err + } + + // Keep last versionsToKeep versions and delete other + versionIdsToDelete := getVersionIDsToDelete(versions, versionsToKeep) + if len(versionIdsToDelete) > 0 { + deleteExpiredSql := `DELETE FROM dashboard_version WHERE id IN (?` + strings.Repeat(",?", len(versionIdsToDelete)-1) + `)` + expiredResponse, err := sess.Exec(deleteExpiredSql, versionIdsToDelete...) + if err != nil { + return err + } + expiredCount, _ = expiredResponse.RowsAffected() + sqlog.Debug("Deleted old/expired dashboard versions", "expired", expiredCount) + } + + return nil + }) +} + +// Short version of DashboardVersion for getting expired versions +type DashboardVersionExp struct { + Id int64 `json:"id"` + DashboardId int64 `json:"dashboardId"` + Version int `json:"version"` +} + +func getVersionIDsToDelete(versions []DashboardVersionExp, versionsToKeep int) []interface{} { + versionIds := make([]interface{}, 0) + + if len(versions) == 0 { + return versionIds + } + + currentDashboard := versions[0].DashboardId + count := 0 + for _, v := range versions { + if v.DashboardId == currentDashboard { + count++ + } else { + count = 1 + currentDashboard = v.DashboardId + } + if count > versionsToKeep { + versionIds = append(versionIds, v.Id) + } + } + + return versionIds +} diff --git a/pkg/services/sqlstore/dashboard_version_test.go b/pkg/services/sqlstore/dashboard_version_test.go index 919efcd0004..180a87f6ad8 100644 --- a/pkg/services/sqlstore/dashboard_version_test.go +++ b/pkg/services/sqlstore/dashboard_version_test.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana/pkg/components/simplejson" m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" ) func updateTestDashboard(dashboard *m.Dashboard, data map[string]interface{}) { @@ -101,3 +102,44 @@ func TestGetDashboardVersions(t *testing.T) { }) }) } + +func TestDeleteExpiredVersions(t *testing.T) { + Convey("Testing dashboard versions clean up", t, func() { + InitTestDB(t) + versionsToKeep := 5 + versionsToWrite := 10 + setting.DashboardVersionsToKeep = versionsToKeep + + savedDash := insertTestDashboard("test dash 53", 1, "diff-all") + for i := 0; i < versionsToWrite-1; i++ { + updateTestDashboard(savedDash, map[string]interface{}{ + "tags": "different-tag", + }) + } + + Convey("Clean up old dashboard versions", func() { + err := DeleteExpiredVersions(&m.DeleteExpiredVersionsCommand{}) + So(err, ShouldBeNil) + + query := m.GetDashboardVersionsQuery{DashboardId: savedDash.Id, OrgId: 1} + GetDashboardVersions(&query) + + So(len(query.Result), ShouldEqual, versionsToKeep) + // Ensure latest versions were kept + So(query.Result[versionsToKeep-1].Version, ShouldEqual, versionsToWrite-versionsToKeep+1) + So(query.Result[0].Version, ShouldEqual, versionsToWrite) + }) + + Convey("Don't delete anything if there're no expired versions", func() { + setting.DashboardVersionsToKeep = versionsToWrite + + err := DeleteExpiredVersions(&m.DeleteExpiredVersionsCommand{}) + So(err, ShouldBeNil) + + query := m.GetDashboardVersionsQuery{DashboardId: savedDash.Id, OrgId: 1} + GetDashboardVersions(&query) + + So(len(query.Result), ShouldEqual, versionsToWrite) + }) + }) +} diff --git a/pkg/services/sqlstore/datasource.go b/pkg/services/sqlstore/datasource.go index 7069990c476..e9b400a1772 100644 --- a/pkg/services/sqlstore/datasource.go +++ b/pkg/services/sqlstore/datasource.go @@ -13,6 +13,7 @@ import ( func init() { bus.AddHandler("sql", GetDataSources) + bus.AddHandler("sql", GetAllDataSources) bus.AddHandler("sql", AddDataSource) bus.AddHandler("sql", DeleteDataSourceById) bus.AddHandler("sql", DeleteDataSourceByName) @@ -54,10 +55,19 @@ func GetDataSources(query *m.GetDataSourcesQuery) error { return sess.Find(&query.Result) } +func GetAllDataSources(query *m.GetAllDataSourcesQuery) error { + sess := x.Limit(1000, 0).Asc("name") + + query.Result = make([]*m.DataSource, 0) + return sess.Find(&query.Result) +} + func DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error { return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM data_source WHERE id=? and org_id=?" - _, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId) + result, err := sess.Exec(rawSql, cmd.Id, cmd.OrgId) + affected, _ := result.RowsAffected() + cmd.DeletedDatasourcesCount = affected return err }) } @@ -65,7 +75,9 @@ func DeleteDataSourceById(cmd *m.DeleteDataSourceByIdCommand) error { func DeleteDataSourceByName(cmd *m.DeleteDataSourceByNameCommand) error { return inTransaction(func(sess *DBSession) error { var rawSql = "DELETE FROM data_source WHERE name=? and org_id=?" - _, err := sess.Exec(rawSql, cmd.Name, cmd.OrgId) + result, err := sess.Exec(rawSql, cmd.Name, cmd.OrgId) + affected, _ := result.RowsAffected() + cmd.DeletedDatasourcesCount = affected return err }) } @@ -98,6 +110,7 @@ func AddDataSource(cmd *m.AddDataSourceCommand) error { Created: time.Now(), Updated: time.Now(), Version: 1, + ReadOnly: cmd.ReadOnly, } if _, err := sess.Insert(ds); err != nil { @@ -143,12 +156,14 @@ func UpdateDataSource(cmd *m.UpdateDataSourceCommand) error { JsonData: cmd.JsonData, SecureJsonData: securejsondata.GetEncryptedJsonData(cmd.SecureJsonData), Updated: time.Now(), + ReadOnly: cmd.ReadOnly, Version: cmd.Version + 1, } sess.UseBool("is_default") sess.UseBool("basic_auth") sess.UseBool("with_credentials") + sess.UseBool("read_only") var updateSession *xorm.Session if cmd.Version != 0 { diff --git a/pkg/services/sqlstore/datasource_test.go b/pkg/services/sqlstore/datasource_test.go index de16b17f960..e6f0114ab4d 100644 --- a/pkg/services/sqlstore/datasource_test.go +++ b/pkg/services/sqlstore/datasource_test.go @@ -47,6 +47,7 @@ func TestDataAccess(t *testing.T) { Access: m.DS_ACCESS_DIRECT, Url: "http://test", Database: "site", + ReadOnly: true, }) So(err, ShouldBeNil) @@ -61,6 +62,7 @@ func TestDataAccess(t *testing.T) { So(ds.OrgId, ShouldEqual, 10) So(ds.Database, ShouldEqual, "site") + So(ds.ReadOnly, ShouldBeTrue) }) Convey("Given a datasource", func() { diff --git a/pkg/services/sqlstore/migrations/datasource_mig.go b/pkg/services/sqlstore/migrations/datasource_mig.go index cc8a7f05177..919881adaba 100644 --- a/pkg/services/sqlstore/migrations/datasource_mig.go +++ b/pkg/services/sqlstore/migrations/datasource_mig.go @@ -126,4 +126,8 @@ func addDataSourceMigration(mg *Migrator) { Sqlite(setVersionToOneWhereZero). Postgres(setVersionToOneWhereZero). Mysql(setVersionToOneWhereZero)) + + mg.AddMigration("Add read_only data column", NewAddColumnMigration(tableV2, &Column{ + Name: "read_only", Type: DB_Bool, Nullable: true, + })) } diff --git a/pkg/services/sqlstore/search_builder.go b/pkg/services/sqlstore/search_builder.go new file mode 100644 index 00000000000..f58858d5347 --- /dev/null +++ b/pkg/services/sqlstore/search_builder.go @@ -0,0 +1,228 @@ +package sqlstore + +import ( + "bytes" + "strings" + + m "github.com/grafana/grafana/pkg/models" +) + +// SearchBuilder is a builder/object mother that builds a dashboard search query +type SearchBuilder struct { + tags []string + isStarred bool + limit int + signedInUser *m.SignedInUser + whereDashboardIdsIn []int64 + whereTitle string + whereTypeFolder bool + whereTypeDash bool + whereFolderId int64 + expandedFolders []int64 + sql bytes.Buffer + params []interface{} +} + +func NewSearchBuilder(signedInUser *m.SignedInUser, limit int) *SearchBuilder { + searchBuilder := &SearchBuilder{ + signedInUser: signedInUser, + limit: limit, + } + + return searchBuilder +} + +func (sb *SearchBuilder) WithTags(tags []string) *SearchBuilder { + if len(tags) > 0 { + sb.tags = tags + } + + return sb +} + +func (sb *SearchBuilder) IsStarred() *SearchBuilder { + sb.isStarred = true + + return sb +} + +func (sb *SearchBuilder) WithDashboardIdsIn(ids []int64) *SearchBuilder { + if len(ids) > 0 { + sb.whereDashboardIdsIn = ids + } + + return sb +} + +func (sb *SearchBuilder) WithTitle(title string) *SearchBuilder { + sb.whereTitle = title + + return sb +} + +func (sb *SearchBuilder) WithType(queryType string) *SearchBuilder { + if len(queryType) > 0 && queryType == "dash-folder" { + sb.whereTypeFolder = true + } + + if len(queryType) > 0 && queryType == "dash-db" { + sb.whereTypeDash = true + } + + return sb +} + +func (sb *SearchBuilder) WithFolderId(folderId int64) *SearchBuilder { + sb.whereFolderId = folderId + + return sb +} + +func (sb *SearchBuilder) WithExpandedFolders(expandedFolders []int64) *SearchBuilder { + sb.expandedFolders = expandedFolders + return sb +} + +// ToSql builds the sql and returns it as a string, together with the params. +func (sb *SearchBuilder) ToSql() (string, []interface{}) { + sb.params = make([]interface{}, 0) + + sb.buildSelect() + + if len(sb.tags) > 0 { + sb.buildTagQuery() + } else { + sb.buildMainQuery() + } + + sb.sql.WriteString(` + LEFT OUTER JOIN dashboard folder on folder.id = dashboard.folder_id + LEFT OUTER JOIN dashboard_tag on dashboard.id = dashboard_tag.dashboard_id`) + + sb.sql.WriteString(" ORDER BY dashboard.title ASC LIMIT 5000") + + return sb.sql.String(), sb.params +} + +func (sb *SearchBuilder) buildSelect() { + sb.sql.WriteString( + `SELECT + dashboard.id, + dashboard.title, + dashboard.slug, + dashboard_tag.term, + dashboard.is_folder, + dashboard.folder_id, + folder.slug as folder_slug, + folder.title as folder_title + FROM `) +} + +func (sb *SearchBuilder) buildTagQuery() { + sb.sql.WriteString( + `( + SELECT + dashboard.id FROM dashboard + LEFT OUTER JOIN dashboard_tag ON dashboard_tag.dashboard_id = dashboard.id + `) + + if sb.isStarred { + sb.sql.WriteString(" INNER JOIN star on star.dashboard_id = dashboard.id") + } + + sb.sql.WriteString(` WHERE dashboard_tag.term IN (?` + strings.Repeat(",?", len(sb.tags)-1) + `) AND `) + for _, tag := range sb.tags { + sb.params = append(sb.params, tag) + } + + sb.buildSearchWhereClause() + + // this ends the inner select (tag filtered part) + sb.sql.WriteString(` + GROUP BY dashboard.id HAVING COUNT(dashboard.id) >= ? + LIMIT ?) as ids + INNER JOIN dashboard on ids.id = dashboard.id + `) + + sb.params = append(sb.params, len(sb.tags)) + sb.params = append(sb.params, sb.limit) +} + +func (sb *SearchBuilder) buildMainQuery() { + sb.sql.WriteString(`( SELECT dashboard.id FROM dashboard `) + + if sb.isStarred { + sb.sql.WriteString(" INNER JOIN star on star.dashboard_id = dashboard.id") + } + + sb.sql.WriteString(` WHERE `) + sb.buildSearchWhereClause() + + sb.sql.WriteString(` + LIMIT ?) as ids + INNER JOIN dashboard on ids.id = dashboard.id + `) + sb.params = append(sb.params, sb.limit) +} + +func (sb *SearchBuilder) buildSearchWhereClause() { + sb.sql.WriteString(` dashboard.org_id=?`) + sb.params = append(sb.params, sb.signedInUser.OrgId) + + if sb.isStarred { + sb.sql.WriteString(` AND star.user_id=?`) + sb.params = append(sb.params, sb.signedInUser.UserId) + } + + if len(sb.whereDashboardIdsIn) > 0 { + sb.sql.WriteString(` AND dashboard.id IN (?` + strings.Repeat(",?", len(sb.whereDashboardIdsIn)-1) + `)`) + for _, dashboardId := range sb.whereDashboardIdsIn { + sb.params = append(sb.params, dashboardId) + } + } + + if sb.signedInUser.OrgRole != m.ROLE_ADMIN { + allowedDashboardsSubQuery := ` AND (dashboard.has_acl = 0 OR dashboard.id in ( + SELECT distinct d.id AS DashboardId + FROM dashboard AS d + LEFT JOIN dashboard_acl as da on d.folder_id = da.dashboard_id or d.id = da.dashboard_id + LEFT JOIN user_group_member as ugm on ugm.user_group_id = da.user_group_id + LEFT JOIN org_user ou on ou.role = da.role + WHERE + d.has_acl = 1 and + (da.user_id = ? or ugm.user_id = ? or ou.id is not null) + and d.org_id = ? + ) + )` + + sb.sql.WriteString(allowedDashboardsSubQuery) + sb.params = append(sb.params, sb.signedInUser.UserId, sb.signedInUser.UserId, sb.signedInUser.OrgId) + } + + if len(sb.whereTitle) > 0 { + sb.sql.WriteString(" AND dashboard.title " + dialect.LikeStr() + " ?") + sb.params = append(sb.params, "%"+sb.whereTitle+"%") + } + + if sb.whereTypeFolder { + sb.sql.WriteString(" AND dashboard.is_folder = 1") + } + + if sb.whereTypeDash { + sb.sql.WriteString(" AND dashboard.is_folder = 0") + } + + if sb.whereFolderId > 0 { + sb.sql.WriteString(" AND dashboard.folder_id = ?") + sb.params = append(sb.params, sb.whereFolderId) + } + + if len(sb.expandedFolders) > 0 { + sb.sql.WriteString(` AND (dashboard.folder_id IN (?` + strings.Repeat(",?", len(sb.expandedFolders)-1) + `) `) + sb.sql.WriteString(` OR dashboard.folder_id IS NULL OR dashboard.folder_id = 0)`) + + for _, ef := range sb.expandedFolders { + sb.params = append(sb.params, ef) + } + } +} diff --git a/pkg/services/sqlstore/search_builder_test.go b/pkg/services/sqlstore/search_builder_test.go new file mode 100644 index 00000000000..32ccbc583f5 --- /dev/null +++ b/pkg/services/sqlstore/search_builder_test.go @@ -0,0 +1,37 @@ +package sqlstore + +import ( + "testing" + + m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/services/sqlstore/migrator" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSearchBuilder(t *testing.T) { + dialect = migrator.NewDialect("sqlite3") + + Convey("Testing building a search", t, func() { + signedInUser := &m.SignedInUser{ + OrgId: 1, + UserId: 1, + } + sb := NewSearchBuilder(signedInUser, 1000) + + Convey("When building a normal search", func() { + sql, params := sb.IsStarred().WithTitle("test").ToSql() + So(sql, ShouldStartWith, "SELECT") + So(sql, ShouldContainSubstring, "INNER JOIN dashboard on ids.id = dashboard.id") + So(sql, ShouldEndWith, "ORDER BY dashboard.title ASC LIMIT 5000") + So(len(params), ShouldBeGreaterThan, 0) + }) + + Convey("When building a search with tag filter", func() { + sql, params := sb.WithTags([]string{"tag1", "tag2"}).ToSql() + So(sql, ShouldStartWith, "SELECT") + So(sql, ShouldContainSubstring, "LEFT OUTER JOIN dashboard_tag") + So(sql, ShouldEndWith, "ORDER BY dashboard.title ASC LIMIT 5000") + So(len(params), ShouldBeGreaterThan, 0) + }) + }) +} diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index e2fc1f29063..f37499bd60f 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -158,10 +158,14 @@ func getEngine() (*xorm.Engine, error) { } else { engine.SetMaxOpenConns(DbCfg.MaxOpenConn) engine.SetMaxIdleConns(DbCfg.MaxIdleConn) - engine.SetLogger(&xorm.DiscardLogger{}) - // engine.SetLogger(NewXormLogger(log.LvlInfo, log.New("sqlstore.xorm"))) - // engine.ShowSQL = true - // engine.ShowInfo = true + debugSql := setting.Cfg.Section("database").Key("log_queries").MustBool(false) + if !debugSql { + engine.SetLogger(&xorm.DiscardLogger{}) + } else { + engine.SetLogger(NewXormLogger(log.LvlInfo, log.New("sqlstore.xorm"))) + engine.ShowSQL(true) + engine.ShowExecTime(true) + } } return engine, nil } @@ -190,12 +194,12 @@ func LoadConfig() { DbCfg.Host = sec.Key("host").String() DbCfg.Name = sec.Key("name").String() DbCfg.User = sec.Key("user").String() - DbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0) - DbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(0) if len(DbCfg.Pwd) == 0 { DbCfg.Pwd = sec.Key("password").String() } } + DbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0) + DbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(0) if DbCfg.Type == "sqlite3" { UseSQLite3 = true diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index ca65fe581af..2caf7366727 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -50,11 +50,12 @@ var ( BuildStamp int64 // Paths - LogsPath string - HomePath string - DataPath string - PluginsPath string - CustomInitPath = "conf/custom.ini" + LogsPath string + HomePath string + DataPath string + PluginsPath string + DatasourcesPath string + CustomInitPath = "conf/custom.ini" // Log settings. LogModes []string @@ -89,6 +90,9 @@ var ( SnapShotTTLDays int SnapShotRemoveExpired bool + // Dashboard history + DashboardVersionsToKeep int + // User settings AllowUserSignUp bool AllowUserOrgCreate bool @@ -470,6 +474,7 @@ func NewConfigContext(args *CommandLineArgs) error { Env = Cfg.Section("").Key("app_mode").MustString("development") InstanceName = Cfg.Section("").Key("instance_name").MustString("unknown_instance_name") PluginsPath = makeAbsolute(Cfg.Section("paths").Key("plugins").String(), HomePath) + DatasourcesPath = makeAbsolute(Cfg.Section("paths").Key("datasources").String(), HomePath) server := Cfg.Section("server") AppUrl, AppSubUrl = parseAppUrlAndSubUrl(server) @@ -518,6 +523,10 @@ func NewConfigContext(args *CommandLineArgs) error { SnapShotRemoveExpired = snapshots.Key("snapshot_remove_expired").MustBool(true) SnapShotTTLDays = snapshots.Key("snapshot_TTL_days").MustInt(90) + // read dashboard settings + dashboards := Cfg.Section("dashboards") + DashboardVersionsToKeep = dashboards.Key("versions_to_keep").MustInt(20) + // read data source proxy white list DataProxyWhiteList = make(map[string]bool) for _, hostAndIp := range util.SplitString(security.Key("data_source_proxy_whitelist").String()) { @@ -661,5 +670,6 @@ func LogConfigurationInfo() { logger.Info("Path Data", "path", DataPath) logger.Info("Path Logs", "path", LogsPath) logger.Info("Path Plugins", "path", PluginsPath) + logger.Info("Path Datasources", "path", DatasourcesPath) logger.Info("App mode " + Env) } diff --git a/pkg/tsdb/cloudwatch/cloudwatch.go b/pkg/tsdb/cloudwatch/cloudwatch.go index 266b71ec14e..d5bdd010269 100644 --- a/pkg/tsdb/cloudwatch/cloudwatch.go +++ b/pkg/tsdb/cloudwatch/cloudwatch.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/grafana/grafana/pkg/components/null" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/metrics" @@ -24,6 +25,7 @@ import ( type CloudWatchExecutor struct { *models.DataSource + ec2Svc ec2iface.EC2API } type DatasourceInfo struct { @@ -267,7 +269,10 @@ func parseQuery(model *simplejson.Json) (*CloudWatchQuery, error) { period = int(d.Seconds()) } - alias := model.Get("alias").MustString("{{metric}}_{{stat}}") + alias := model.Get("alias").MustString() + if alias == "" { + alias = "{{metric}}_{{stat}}" + } return &CloudWatchQuery{ Region: region, @@ -287,6 +292,7 @@ func formatAlias(query *CloudWatchQuery, stat string, dimensions map[string]stri data["namespace"] = query.Namespace data["metric"] = query.MetricName data["stat"] = stat + data["period"] = strconv.Itoa(query.Period) for k, v := range dimensions { data[k] = v } @@ -311,7 +317,8 @@ func parseResponse(resp *cloudwatch.GetMetricStatisticsOutput, query *CloudWatch var value float64 for _, s := range append(query.Statistics, query.ExtendedStatistics...) { series := tsdb.TimeSeries{ - Tags: map[string]string{}, + Tags: map[string]string{}, + Points: make([]tsdb.TimePoint, 0), } for _, d := range query.Dimensions { series.Tags[*d.Name] = *d.Value diff --git a/pkg/tsdb/cloudwatch/metric_find_query.go b/pkg/tsdb/cloudwatch/metric_find_query.go index b1ce507d27c..b9d4d5b6a80 100644 --- a/pkg/tsdb/cloudwatch/metric_find_query.go +++ b/pkg/tsdb/cloudwatch/metric_find_query.go @@ -87,6 +87,7 @@ func init() { "AWS/Logs": {"IncomingBytes", "IncomingLogEvents", "ForwardedBytes", "ForwardedLogEvents", "DeliveryErrors", "DeliveryThrottling"}, "AWS/ML": {"PredictCount", "PredictFailureCount"}, "AWS/NATGateway": {"PacketsOutToDestination", "PacketsOutToSource", "PacketsInFromSource", "PacketsInFromDestination", "BytesOutToDestination", "BytesOutToSource", "BytesInFromSource", "BytesInFromDestination", "ErrorPortAllocation", "ActiveConnectionCount", "ConnectionAttemptCount", "ConnectionEstablishedCount", "IdleTimeoutCount", "PacketsDropCount"}, + "AWS/NetworkELB": {"ActiveFlowCount", "ConsumedLCUs", "HealthyHostCount", "NewFlowCount", "ProcessedBytes", "TCP_Client_Reset_Count", "TCP_ELB_Reset_Count", "TCP_Target_Reset_Count", "UnHealthyHostCount"}, "AWS/OpsWorks": {"cpu_idle", "cpu_nice", "cpu_system", "cpu_user", "cpu_waitio", "load_1", "load_5", "load_15", "memory_buffers", "memory_cached", "memory_free", "memory_swap", "memory_total", "memory_used", "procs"}, "AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"}, "AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"}, @@ -132,6 +133,7 @@ func init() { "AWS/Logs": {"LogGroupName", "DestinationType", "FilterName"}, "AWS/ML": {"MLModelId", "RequestMode"}, "AWS/NATGateway": {"NatGatewayId"}, + "AWS/NetworkELB": {"LoadBalancer", "TargetGroup", "AvailabilityZone"}, "AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"}, "AWS/Redshift": {"NodeID", "ClusterIdentifier"}, "AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DbClusterIdentifier", "DatabaseClass", "EngineName", "Role"}, @@ -183,6 +185,18 @@ func (e *CloudWatchExecutor) executeMetricFindQuery(ctx context.Context, queryCo data, err = e.handleGetEbsVolumeIds(ctx, parameters, queryContext) break case "ec2_instance_attribute": + region := parameters.Get("region").MustString() + dsInfo := e.getDsInfo(region) + cfg, err := e.getAwsConfig(dsInfo) + if err != nil { + return nil, errors.New("Failed to call ec2:DescribeInstances") + } + sess, err := session.NewSession(cfg) + if err != nil { + return nil, errors.New("Failed to call ec2:DescribeInstances") + } + e.ec2Svc = ec2.New(sess, cfg) + data, err = e.handleGetEc2InstanceAttribute(ctx, parameters, queryContext) break } @@ -373,14 +387,16 @@ func (e *CloudWatchExecutor) handleGetEc2InstanceAttribute(ctx context.Context, var filters []*ec2.Filter for k, v := range filterJson { - if vv, ok := v.([]string); ok { - var vvvv []*string + if vv, ok := v.([]interface{}); ok { + var vvvvv []*string for _, vvv := range vv { - vvvv = append(vvvv, &vvv) + if vvvv, ok := vvv.(string); ok { + vvvvv = append(vvvvv, &vvvv) + } } filters = append(filters, &ec2.Filter{ Name: aws.String(k), - Values: vvvv, + Values: vvvvv, }) } } @@ -467,24 +483,13 @@ func (e *CloudWatchExecutor) cloudwatchListMetrics(region string, namespace stri } func (e *CloudWatchExecutor) ec2DescribeInstances(region string, filters []*ec2.Filter, instanceIds []*string) (*ec2.DescribeInstancesOutput, error) { - dsInfo := e.getDsInfo(region) - cfg, err := e.getAwsConfig(dsInfo) - if err != nil { - return nil, errors.New("Failed to call ec2:DescribeInstances") - } - sess, err := session.NewSession(cfg) - if err != nil { - return nil, errors.New("Failed to call ec2:DescribeInstances") - } - svc := ec2.New(sess, cfg) - params := &ec2.DescribeInstancesInput{ Filters: filters, InstanceIds: instanceIds, } var resp ec2.DescribeInstancesOutput - err = svc.DescribeInstancesPages(params, + err := e.ec2Svc.DescribeInstancesPages(params, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { reservations, _ := awsutil.ValuesAtPath(page, "Reservations") for _, reservation := range reservations { diff --git a/pkg/tsdb/cloudwatch/metric_find_query_test.go b/pkg/tsdb/cloudwatch/metric_find_query_test.go index 238e815fac1..255b343a33a 100644 --- a/pkg/tsdb/cloudwatch/metric_find_query_test.go +++ b/pkg/tsdb/cloudwatch/metric_find_query_test.go @@ -1,13 +1,28 @@ package cloudwatch import ( + "context" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/grafana/grafana/pkg/components/simplejson" + "github.com/grafana/grafana/pkg/tsdb" . "github.com/smartystreets/goconvey/convey" ) +type mockedEc2 struct { + ec2iface.EC2API + Resp ec2.DescribeInstancesOutput +} + +func (m mockedEc2) DescribeInstancesPages(in *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool) error { + fn(&m.Resp, true) + return nil +} + func TestCloudWatchMetrics(t *testing.T) { Convey("When calling getMetricsForCustomMetrics", t, func() { @@ -66,4 +81,37 @@ func TestCloudWatchMetrics(t *testing.T) { }) }) + Convey("When calling handleGetEc2InstanceAttribute", t, func() { + executor := &CloudWatchExecutor{ + ec2Svc: mockedEc2{Resp: ec2.DescribeInstancesOutput{ + Reservations: []*ec2.Reservation{ + { + Instances: []*ec2.Instance{ + { + InstanceId: aws.String("i-12345678"), + Tags: []*ec2.Tag{ + { + Key: aws.String("Environment"), + Value: aws.String("production"), + }, + }, + }, + }, + }, + }, + }}, + } + + json := simplejson.New() + json.Set("region", "us-east-1") + json.Set("attributeName", "InstanceId") + filters := make(map[string]interface{}) + filters["tag:Environment"] = []string{"production"} + json.Set("filters", filters) + result, _ := executor.handleGetEc2InstanceAttribute(context.Background(), json, &tsdb.TsdbQuery{}) + + Convey("Should equal production InstanceId", func() { + So(result[0].Text, ShouldEqual, "i-12345678") + }) + }) } diff --git a/pkg/tsdb/influxdb/model_parser.go b/pkg/tsdb/influxdb/model_parser.go index f3d87739e5b..deb2f15e3ce 100644 --- a/pkg/tsdb/influxdb/model_parser.go +++ b/pkg/tsdb/influxdb/model_parser.go @@ -2,9 +2,11 @@ package influxdb import ( "strconv" + "time" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/tsdb" ) type InfluxdbQueryParser struct{} @@ -37,13 +39,7 @@ func (qp *InfluxdbQueryParser) Parse(model *simplejson.Json, dsInfo *models.Data return nil, err } - interval := model.Get("interval").MustString("") - if interval == "" && dsInfo.JsonData != nil { - dsInterval := dsInfo.JsonData.Get("timeInterval").MustString("") - if dsInterval != "" { - interval = dsInterval - } - } + parsedInterval, err := tsdb.GetIntervalFrom(dsInfo, model, time.Millisecond*1) return &Query{ Measurement: measurement, @@ -53,7 +49,7 @@ func (qp *InfluxdbQueryParser) Parse(model *simplejson.Json, dsInfo *models.Data Tags: tags, Selects: selects, RawQuery: rawQuery, - Interval: interval, + Interval: parsedInterval, Alias: alias, UseRawQuery: useRawQuery, }, nil diff --git a/pkg/tsdb/influxdb/model_parser_test.go b/pkg/tsdb/influxdb/model_parser_test.go index f7049efb9a7..f8759afd3ba 100644 --- a/pkg/tsdb/influxdb/model_parser_test.go +++ b/pkg/tsdb/influxdb/model_parser_test.go @@ -2,6 +2,7 @@ package influxdb import ( "testing" + "time" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/models" @@ -115,7 +116,7 @@ func TestInfluxdbQueryParser(t *testing.T) { So(len(res.GroupBy), ShouldEqual, 3) So(len(res.Selects), ShouldEqual, 3) So(len(res.Tags), ShouldEqual, 2) - So(res.Interval, ShouldEqual, ">20s") + So(res.Interval, ShouldEqual, time.Second*20) So(res.Alias, ShouldEqual, "serie alias") }) @@ -174,7 +175,7 @@ func TestInfluxdbQueryParser(t *testing.T) { So(len(res.GroupBy), ShouldEqual, 2) So(len(res.Selects), ShouldEqual, 1) So(len(res.Tags), ShouldEqual, 0) - So(res.Interval, ShouldEqual, ">10s") + So(res.Interval, ShouldEqual, time.Second*10) }) }) } diff --git a/pkg/tsdb/influxdb/models.go b/pkg/tsdb/influxdb/models.go index 44e05608290..82ed72c2a18 100644 --- a/pkg/tsdb/influxdb/models.go +++ b/pkg/tsdb/influxdb/models.go @@ -1,5 +1,7 @@ package influxdb +import "time" + type Query struct { Measurement string Policy string @@ -10,8 +12,7 @@ type Query struct { RawQuery string UseRawQuery bool Alias string - - Interval string + Interval time.Duration } type Tag struct { diff --git a/pkg/tsdb/influxdb/query.go b/pkg/tsdb/influxdb/query.go index 3a796e6db63..499f446e9f0 100644 --- a/pkg/tsdb/influxdb/query.go +++ b/pkg/tsdb/influxdb/query.go @@ -29,10 +29,8 @@ func (query *Query) Build(queryContext *tsdb.TsdbQuery) (string, error) { res += query.renderGroupBy(queryContext) } - interval, err := getDefinedInterval(query, queryContext) - if err != nil { - return "", err - } + calculator := tsdb.NewIntervalCalculator(&tsdb.IntervalOptions{}) + interval := calculator.Calculate(queryContext.TimeRange, query.Interval) res = strings.Replace(res, "$timeFilter", query.renderTimeFilter(queryContext), -1) res = strings.Replace(res, "$interval", interval.Text, -1) @@ -41,29 +39,6 @@ func (query *Query) Build(queryContext *tsdb.TsdbQuery) (string, error) { return res, nil } -func getDefinedInterval(query *Query, queryContext *tsdb.TsdbQuery) (*tsdb.Interval, error) { - defaultInterval := tsdb.CalculateInterval(queryContext.TimeRange) - - if query.Interval == "" { - return &defaultInterval, nil - } - - setInterval := strings.Replace(strings.Replace(query.Interval, "<", "", 1), ">", "", 1) - parsedSetInterval, err := time.ParseDuration(setInterval) - - if err != nil { - return nil, err - } - - if strings.Contains(query.Interval, ">") { - if defaultInterval.Value > parsedSetInterval { - return &defaultInterval, nil - } - } - - return &tsdb.Interval{Value: parsedSetInterval, Text: setInterval}, nil -} - func (query *Query) renderTags() []string { var res []string for i, tag := range query.Tags { diff --git a/pkg/tsdb/influxdb/query_test.go b/pkg/tsdb/influxdb/query_test.go index 5c8dc1eaf69..4a620539a26 100644 --- a/pkg/tsdb/influxdb/query_test.go +++ b/pkg/tsdb/influxdb/query_test.go @@ -2,6 +2,7 @@ package influxdb import ( "testing" + "time" "strings" @@ -38,7 +39,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) { Measurement: "cpu", Policy: "policy", GroupBy: []*QueryPart{groupBy1, groupBy3}, - Interval: "10s", + Interval: time.Second * 10, } rawQuery, err := query.Build(queryContext) @@ -52,7 +53,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) { Measurement: "cpu", GroupBy: []*QueryPart{groupBy1, groupBy2, groupBy3}, Tags: []*Tag{tag1, tag2}, - Interval: "5s", + Interval: time.Second * 5, } rawQuery, err := query.Build(queryContext) @@ -64,7 +65,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) { query := &Query{ Selects: []*Select{{*qp1, *qp2, *mathPartDivideBy100}}, Measurement: "cpu", - Interval: "5s", + Interval: time.Second * 5, } rawQuery, err := query.Build(queryContext) @@ -76,7 +77,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) { query := &Query{ Selects: []*Select{{*qp1, *qp2, *mathPartDivideByIntervalMs}}, Measurement: "cpu", - Interval: "5s", + Interval: time.Second * 5, } rawQuery, err := query.Build(queryContext) @@ -117,7 +118,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) { Measurement: "cpu", Policy: "policy", GroupBy: []*QueryPart{groupBy1, groupBy3}, - Interval: "10s", + Interval: time.Second * 10, RawQuery: "Raw query", UseRawQuery: true, } diff --git a/pkg/tsdb/interval.go b/pkg/tsdb/interval.go index aef6cc4f47b..e26d39f3986 100644 --- a/pkg/tsdb/interval.go +++ b/pkg/tsdb/interval.go @@ -2,14 +2,18 @@ package tsdb import ( "fmt" + "strings" "time" + + "github.com/grafana/grafana/pkg/components/simplejson" + "github.com/grafana/grafana/pkg/models" ) var ( - defaultRes int64 = 1500 - minInterval time.Duration = 1 * time.Millisecond - year time.Duration = time.Hour * 24 * 365 - day time.Duration = time.Hour * 24 * 365 + defaultRes int64 = 1500 + defaultMinInterval time.Duration = 1 * time.Millisecond + year time.Duration = time.Hour * 24 * 365 + day time.Duration = time.Hour * 24 ) type Interval struct { @@ -17,14 +21,68 @@ type Interval struct { Value time.Duration } -func CalculateInterval(timerange *TimeRange) Interval { - interval := time.Duration((timerange.MustGetTo().UnixNano() - timerange.MustGetFrom().UnixNano()) / defaultRes) +type intervalCalculator struct { + minInterval time.Duration +} - if interval < minInterval { - return Interval{Text: formatDuration(minInterval), Value: interval} +type IntervalCalculator interface { + Calculate(timeRange *TimeRange, minInterval time.Duration) Interval +} + +type IntervalOptions struct { + MinInterval time.Duration +} + +func NewIntervalCalculator(opt *IntervalOptions) *intervalCalculator { + if opt == nil { + opt = &IntervalOptions{} } - return Interval{Text: formatDuration(roundInterval(interval)), Value: interval} + calc := &intervalCalculator{} + + if opt.MinInterval == 0 { + calc.minInterval = defaultMinInterval + } else { + calc.minInterval = opt.MinInterval + } + + return calc +} + +func (ic *intervalCalculator) Calculate(timerange *TimeRange, minInterval time.Duration) Interval { + to := timerange.MustGetTo().UnixNano() + from := timerange.MustGetFrom().UnixNano() + interval := time.Duration((to - from) / defaultRes) + + if interval < minInterval { + return Interval{Text: formatDuration(minInterval), Value: minInterval} + } + + rounded := roundInterval(interval) + return Interval{Text: formatDuration(rounded), Value: rounded} +} + +func GetIntervalFrom(dsInfo *models.DataSource, queryModel *simplejson.Json, defaultInterval time.Duration) (time.Duration, error) { + interval := queryModel.Get("interval").MustString("") + + if interval == "" && dsInfo.JsonData != nil { + dsInterval := dsInfo.JsonData.Get("timeInterval").MustString("") + if dsInterval != "" { + interval = dsInterval + } + } + + if interval == "" { + return defaultInterval, nil + } + + interval = strings.Replace(strings.Replace(interval, "<", "", 1), ">", "", 1) + parsedInterval, err := time.ParseDuration(interval) + if err != nil { + return time.Duration(0), err + } + + return parsedInterval, nil } func formatDuration(inter time.Duration) string { diff --git a/pkg/tsdb/interval_test.go b/pkg/tsdb/interval_test.go index 7b243b4e3ba..1e36e5428fe 100644 --- a/pkg/tsdb/interval_test.go +++ b/pkg/tsdb/interval_test.go @@ -14,31 +14,33 @@ func TestInterval(t *testing.T) { HomePath: "../../", }) + calculator := NewIntervalCalculator(&IntervalOptions{}) + Convey("for 5min", func() { tr := NewTimeRange("5m", "now") - interval := CalculateInterval(tr) + interval := calculator.Calculate(tr, time.Millisecond*1) So(interval.Text, ShouldEqual, "200ms") }) Convey("for 15min", func() { tr := NewTimeRange("15m", "now") - interval := CalculateInterval(tr) + interval := calculator.Calculate(tr, time.Millisecond*1) So(interval.Text, ShouldEqual, "500ms") }) Convey("for 30min", func() { tr := NewTimeRange("30m", "now") - interval := CalculateInterval(tr) + interval := calculator.Calculate(tr, time.Millisecond*1) So(interval.Text, ShouldEqual, "1s") }) Convey("for 1h", func() { tr := NewTimeRange("1h", "now") - interval := CalculateInterval(tr) + interval := calculator.Calculate(tr, time.Millisecond*1) So(interval.Text, ShouldEqual, "2s") }) @@ -51,6 +53,7 @@ func TestInterval(t *testing.T) { So(formatDuration(time.Second*61), ShouldEqual, "1m") So(formatDuration(time.Millisecond*30), ShouldEqual, "30ms") So(formatDuration(time.Hour*23), ShouldEqual, "23h") + So(formatDuration(time.Hour*24), ShouldEqual, "1d") So(formatDuration(time.Hour*24*367), ShouldEqual, "1y") }) }) diff --git a/pkg/tsdb/mysql/macros.go b/pkg/tsdb/mysql/macros.go index 36c38804a01..108b81fc5f3 100644 --- a/pkg/tsdb/mysql/macros.go +++ b/pkg/tsdb/mysql/macros.go @@ -3,6 +3,8 @@ package mysql import ( "fmt" "regexp" + "strings" + "time" "github.com/grafana/grafana/pkg/tsdb" ) @@ -25,7 +27,7 @@ func (m *MySqlMacroEngine) Interpolate(timeRange *tsdb.TimeRange, sql string) (s var macroError error sql = replaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string { - res, err := m.evaluateMacro(groups[1], groups[2:]) + res, err := m.evaluateMacro(groups[1], strings.Split(groups[2], ",")) if err != nil && macroError == nil { macroError = err return "macro_error()" @@ -73,6 +75,15 @@ func (m *MySqlMacroEngine) evaluateMacro(name string, args []string) (string, er return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil case "__timeTo": return fmt.Sprintf("FROM_UNIXTIME(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil + case "__timeGroup": + if len(args) != 2 { + return "", fmt.Errorf("macro %v needs time column and interval", name) + } + interval, err := time.ParseDuration(strings.Trim(args[1], `'" `)) + if err != nil { + return "", fmt.Errorf("error parsing interval %v", args[1]) + } + return fmt.Sprintf("cast(cast(UNIX_TIMESTAMP(%s)/(%.0f) as signed)*%.0f as signed)", args[0], interval.Seconds(), interval.Seconds()), nil case "__unixEpochFilter": if len(args) == 0 { return "", fmt.Errorf("missing time column argument for macro %v", name) diff --git a/pkg/tsdb/mysql/macros_test.go b/pkg/tsdb/mysql/macros_test.go index c92020d0aae..988612fb287 100644 --- a/pkg/tsdb/mysql/macros_test.go +++ b/pkg/tsdb/mysql/macros_test.go @@ -40,6 +40,14 @@ func TestMacroEngine(t *testing.T) { So(sql, ShouldEqual, "select FROM_UNIXTIME(18446744066914186738)") }) + Convey("interpolate __timeGroup function", func() { + + sql, err := engine.Interpolate(timeRange, "GROUP BY $__timeGroup(time_column,'5m')") + So(err, ShouldBeNil) + + So(sql, ShouldEqual, "GROUP BY cast(cast(UNIX_TIMESTAMP(time_column)/(300) as signed)*300 as signed)") + }) + Convey("interpolate __timeTo function", func() { sql, err := engine.Interpolate(timeRange, "select $__timeTo(time_column)") So(err, ShouldBeNil) diff --git a/pkg/tsdb/postgres/macros.go b/pkg/tsdb/postgres/macros.go index 21400b03dfd..288787589ce 100644 --- a/pkg/tsdb/postgres/macros.go +++ b/pkg/tsdb/postgres/macros.go @@ -4,6 +4,7 @@ import ( "fmt" "regexp" "strings" + "time" "github.com/grafana/grafana/pkg/tsdb" ) @@ -71,6 +72,7 @@ func (m *PostgresMacroEngine) evaluateMacro(name string, args []string) (string, } return fmt.Sprintf("extract(epoch from %s) as \"time\"", args[0]), nil case "__timeFilter": + // dont use to_timestamp in this macro for redshift compatibility #9566 if len(args) == 0 { return "", fmt.Errorf("missing time column argument for macro %v", name) } @@ -80,10 +82,14 @@ func (m *PostgresMacroEngine) evaluateMacro(name string, args []string) (string, case "__timeTo": return fmt.Sprintf("to_timestamp(%d)", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil case "__timeGroup": - if len(args) < 2 { + if len(args) != 2 { return "", fmt.Errorf("macro %v needs time column and interval", name) } - return fmt.Sprintf("(extract(epoch from \"%s\")/extract(epoch from %s::interval))::int*extract(epoch from %s::interval)", args[0], args[1], args[1]), nil + interval, err := time.ParseDuration(strings.Trim(args[1], `' `)) + if err != nil { + return "", fmt.Errorf("error parsing interval %v", args[1]) + } + return fmt.Sprintf("(extract(epoch from \"%s\")/%v)::bigint*%v", args[0], interval.Seconds(), interval.Seconds()), nil case "__unixEpochFilter": if len(args) == 0 { return "", fmt.Errorf("missing time column argument for macro %v", name) diff --git a/pkg/tsdb/postgres/macros_test.go b/pkg/tsdb/postgres/macros_test.go index ba991e6f2d5..ff268805259 100644 --- a/pkg/tsdb/postgres/macros_test.go +++ b/pkg/tsdb/postgres/macros_test.go @@ -45,7 +45,7 @@ func TestMacroEngine(t *testing.T) { sql, err := engine.Interpolate(timeRange, "GROUP BY $__timeGroup(time_column,'5m')") So(err, ShouldBeNil) - So(sql, ShouldEqual, "GROUP BY (extract(epoch from \"time_column\")/extract(epoch from '5m'::interval))::int*extract(epoch from '5m'::interval)") + So(sql, ShouldEqual, "GROUP BY (extract(epoch from \"time_column\")/300)::bigint*300") }) Convey("interpolate __timeTo function", func() { diff --git a/pkg/tsdb/postgres/postgres.go b/pkg/tsdb/postgres/postgres.go index 6fc9c89e7be..dcb60977edc 100644 --- a/pkg/tsdb/postgres/postgres.go +++ b/pkg/tsdb/postgres/postgres.go @@ -4,6 +4,7 @@ import ( "container/list" "context" "fmt" + "net/url" "strconv" "time" @@ -51,8 +52,8 @@ func generateConnectionString(datasource *models.DataSource) string { } } - sslmode := datasource.JsonData.Get("sslmode").MustString("require") - return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s", datasource.User, password, datasource.Url, datasource.Database, sslmode) + sslmode := datasource.JsonData.Get("sslmode").MustString("verify-full") + return fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s", url.PathEscape(datasource.User), url.PathEscape(password), url.PathEscape(datasource.Url), url.PathEscape(datasource.Database), url.QueryEscape(sslmode)) } func (e *PostgresQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) { @@ -186,7 +187,7 @@ func (e PostgresQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *co case float64: timestamp = columnValue * 1000 case time.Time: - timestamp = float64(columnValue.Unix() * 1000) + timestamp = float64(columnValue.UnixNano() / 1e6) default: return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp") } diff --git a/pkg/tsdb/prometheus/prometheus.go b/pkg/tsdb/prometheus/prometheus.go index 33219837281..e798b92c6fe 100644 --- a/pkg/tsdb/prometheus/prometheus.go +++ b/pkg/tsdb/prometheus/prometheus.go @@ -48,14 +48,16 @@ func NewPrometheusExecutor(dsInfo *models.DataSource) (tsdb.TsdbQueryEndpoint, e } var ( - plog log.Logger - legendFormat *regexp.Regexp + plog log.Logger + legendFormat *regexp.Regexp + intervalCalculator tsdb.IntervalCalculator ) func init() { plog = log.New("tsdb.prometheus") tsdb.RegisterTsdbQueryEndpoint("prometheus", NewPrometheusExecutor) legendFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`) + intervalCalculator = tsdb.NewIntervalCalculator(&tsdb.IntervalOptions{MinInterval: time.Second * 1}) } func (e *PrometheusExecutor) getClient(dsInfo *models.DataSource) (apiv1.API, error) { @@ -88,7 +90,7 @@ func (e *PrometheusExecutor) Query(ctx context.Context, dsInfo *models.DataSourc return nil, err } - query, err := parseQuery(tsdbQuery.Queries, tsdbQuery) + query, err := parseQuery(dsInfo, tsdbQuery.Queries, tsdbQuery) if err != nil { return nil, err } @@ -138,7 +140,7 @@ func formatLegend(metric model.Metric, query *PrometheusQuery) string { return string(result) } -func parseQuery(queries []*tsdb.Query, queryContext *tsdb.TsdbQuery) (*PrometheusQuery, error) { +func parseQuery(dsInfo *models.DataSource, queries []*tsdb.Query, queryContext *tsdb.TsdbQuery) (*PrometheusQuery, error) { queryModel := queries[0] expr, err := queryModel.Model.Get("expr").String() @@ -146,11 +148,6 @@ func parseQuery(queries []*tsdb.Query, queryContext *tsdb.TsdbQuery) (*Prometheu return nil, err } - step, err := queryModel.Model.Get("step").Int64() - if err != nil { - return nil, err - } - format := queryModel.Model.Get("legendFormat").MustString("") start, err := queryContext.TimeRange.ParseFrom() @@ -163,9 +160,18 @@ func parseQuery(queries []*tsdb.Query, queryContext *tsdb.TsdbQuery) (*Prometheu return nil, err } + dsInterval, err := tsdb.GetIntervalFrom(dsInfo, queryModel.Model, time.Second*15) + if err != nil { + return nil, err + } + + intervalFactor := queryModel.Model.Get("intervalFactor").MustInt64(1) + interval := intervalCalculator.Calculate(queryContext.TimeRange, dsInterval) + step := time.Duration(int64(interval.Value) * intervalFactor) + return &PrometheusQuery{ Expr: expr, - Step: time.Second * time.Duration(step), + Step: step, LegendFormat: format, Start: start, End: end, diff --git a/pkg/tsdb/prometheus/prometheus_test.go b/pkg/tsdb/prometheus/prometheus_test.go index d66ef75e479..c551ab98112 100644 --- a/pkg/tsdb/prometheus/prometheus_test.go +++ b/pkg/tsdb/prometheus/prometheus_test.go @@ -2,13 +2,21 @@ package prometheus import ( "testing" + "time" + "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/tsdb" + + "github.com/grafana/grafana/pkg/components/simplejson" p "github.com/prometheus/common/model" . "github.com/smartystreets/goconvey/convey" ) func TestPrometheus(t *testing.T) { Convey("Prometheus", t, func() { + dsInfo := &models.DataSource{ + JsonData: simplejson.New(), + } Convey("converting metric name", func() { metric := map[p.LabelName]p.LabelValue{ @@ -36,5 +44,108 @@ func TestPrometheus(t *testing.T) { So(formatLegend(metric, query), ShouldEqual, `http_request_total{app="backend", device="mobile"}`) }) + + Convey("parsing query model with step", func() { + json := `{ + "expr": "go_goroutines", + "format": "time_series", + "refId": "A" + }` + jsonModel, _ := simplejson.NewJson([]byte(json)) + queryContext := &tsdb.TsdbQuery{} + queryModels := []*tsdb.Query{ + {Model: jsonModel}, + } + + Convey("with 48h time range", func() { + queryContext.TimeRange = tsdb.NewTimeRange("12h", "now") + + model, err := parseQuery(dsInfo, queryModels, queryContext) + + So(err, ShouldBeNil) + So(model.Step, ShouldEqual, time.Second*30) + }) + }) + + Convey("parsing query model without step parameter", func() { + json := `{ + "expr": "go_goroutines", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + }` + jsonModel, _ := simplejson.NewJson([]byte(json)) + queryContext := &tsdb.TsdbQuery{} + queryModels := []*tsdb.Query{ + {Model: jsonModel}, + } + + Convey("with 48h time range", func() { + queryContext.TimeRange = tsdb.NewTimeRange("48h", "now") + + model, err := parseQuery(dsInfo, queryModels, queryContext) + + So(err, ShouldBeNil) + So(model.Step, ShouldEqual, time.Minute*2) + }) + + Convey("with 1h time range", func() { + queryContext.TimeRange = tsdb.NewTimeRange("1h", "now") + + model, err := parseQuery(dsInfo, queryModels, queryContext) + + So(err, ShouldBeNil) + So(model.Step, ShouldEqual, time.Second*15) + }) + }) + + Convey("parsing query model with intervalFactor", func() { + Convey("high intervalFactor", func() { + json := `{ + "expr": "go_goroutines", + "format": "time_series", + "intervalFactor": 10, + "refId": "A" + }` + jsonModel, _ := simplejson.NewJson([]byte(json)) + queryContext := &tsdb.TsdbQuery{} + queryModels := []*tsdb.Query{ + {Model: jsonModel}, + } + + Convey("with 48h time range", func() { + queryContext.TimeRange = tsdb.NewTimeRange("48h", "now") + + model, err := parseQuery(dsInfo, queryModels, queryContext) + + So(err, ShouldBeNil) + So(model.Step, ShouldEqual, time.Minute*20) + }) + }) + + Convey("low intervalFactor", func() { + json := `{ + "expr": "go_goroutines", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + }` + jsonModel, _ := simplejson.NewJson([]byte(json)) + queryContext := &tsdb.TsdbQuery{} + queryModels := []*tsdb.Query{ + {Model: jsonModel}, + } + + Convey("with 48h time range", func() { + queryContext.TimeRange = tsdb.NewTimeRange("48h", "now") + + model, err := parseQuery(dsInfo, queryModels, queryContext) + + So(err, ShouldBeNil) + So(model.Step, ShouldEqual, time.Minute*2) + }) + }) + }) + }) } diff --git a/pkg/tsdb/sql_engine.go b/pkg/tsdb/sql_engine.go index d79ca938bb4..12778b4e1ad 100644 --- a/pkg/tsdb/sql_engine.go +++ b/pkg/tsdb/sql_engine.go @@ -57,12 +57,13 @@ func (e *DefaultSqlEngine) InitEngine(driverName string, dsInfo *models.DataSour } engine, err := xorm.NewEngine(driverName, cnnstr) - engine.SetMaxOpenConns(10) - engine.SetMaxIdleConns(10) if err != nil { return err } + engine.SetMaxOpenConns(10) + engine.SetMaxIdleConns(10) + engineCache.cache[dsInfo.Id] = engine e.XormEngine = engine diff --git a/pkg/tsdb/testdata/scenarios.go b/pkg/tsdb/testdata/scenarios.go index 0a7f1467933..e907fa8aae0 100644 --- a/pkg/tsdb/testdata/scenarios.go +++ b/pkg/tsdb/testdata/scenarios.go @@ -1,6 +1,7 @@ package testdata import ( + "encoding/json" "math/rand" "strconv" "strings" @@ -142,6 +143,45 @@ func init() { }, }) + registerScenario(&Scenario{ + Id: "manual_entry", + Name: "Manual Entry", + Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult { + queryRes := tsdb.NewQueryResult() + + points := query.Model.Get("points").MustArray() + + series := newSeriesForQuery(query) + startTime := context.TimeRange.GetFromAsMsEpoch() + endTime := context.TimeRange.GetToAsMsEpoch() + + for _, val := range points { + pointValues := val.([]interface{}) + + var value null.Float + var time int64 + + if valueFloat, err := strconv.ParseFloat(string(pointValues[0].(json.Number)), 64); err == nil { + value = null.FloatFrom(valueFloat) + } + + if timeInt, err := strconv.ParseInt(string(pointValues[1].(json.Number)), 10, 64); err != nil { + continue + } else { + time = timeInt + } + + if time >= startTime && time <= endTime { + series.Points = append(series.Points, tsdb.NewTimePoint(value, float64(time))) + } + } + + queryRes.Series = append(queryRes.Series, series) + + return queryRes + }, + }) + registerScenario(&Scenario{ Id: "csv_metric_values", Name: "CSV Metric Values", diff --git a/pkg/util/encoding.go b/pkg/util/encoding.go index e87da9d3d55..0edb721e422 100644 --- a/pkg/util/encoding.go +++ b/pkg/util/encoding.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "encoding/hex" "errors" - "fmt" "hash" "strings" ) @@ -30,7 +29,7 @@ func GetRandomString(n int, alphabets ...byte) string { func EncodePassword(password string, salt string) string { newPasswd := PBKDF2([]byte(password), []byte(salt), 10000, 50, sha256.New) - return fmt.Sprintf("%x", newPasswd) + return hex.EncodeToString(newPasswd) } // Encode string to md5 hex value. diff --git a/pkg/util/encoding_test.go b/pkg/util/encoding_test.go index abcf5425826..6b0e9331467 100644 --- a/pkg/util/encoding_test.go +++ b/pkg/util/encoding_test.go @@ -7,7 +7,6 @@ import ( ) func TestEncoding(t *testing.T) { - Convey("When generating base64 header", t, func() { result := GetBasicAuthHeader("grafana", "1234") @@ -23,4 +22,8 @@ func TestEncoding(t *testing.T) { So(password, ShouldEqual, "1234") }) + Convey("When encoding password", t, func() { + encodedPassword := EncodePassword("iamgod", "pepper") + So(encodedPassword, ShouldEqual, "e59c568621e57756495a468f47c74e07c911b037084dd464bb2ed72410970dc849cabd71b48c394faf08a5405dae53741ce9") + }) } diff --git a/public/app/core/components/code_editor/code_editor.ts b/public/app/core/components/code_editor/code_editor.ts index 2615a635c7e..cc3b1e46ad4 100644 --- a/public/app/core/components/code_editor/code_editor.ts +++ b/public/app/core/components/code_editor/code_editor.ts @@ -36,6 +36,8 @@ import 'brace/mode/text'; import 'brace/snippets/text'; import 'brace/mode/sql'; import 'brace/snippets/sql'; +import 'brace/mode/markdown'; +import 'brace/snippets/markdown'; const DEFAULT_THEME_DARK = "ace/theme/grafana-dark"; const DEFAULT_THEME_LIGHT = "ace/theme/textmate"; diff --git a/public/app/core/components/colorpicker/ColorPickerPopover.tsx b/public/app/core/components/colorpicker/ColorPickerPopover.tsx index 4677eee22a7..360c3fdd5c4 100644 --- a/public/app/core/components/colorpicker/ColorPickerPopover.tsx +++ b/public/app/core/components/colorpicker/ColorPickerPopover.tsx @@ -56,10 +56,11 @@ export class ColorPickerPopover extends React.Component { let newColor = tinycolor(colorString); if (newColor.isValid()) { // Update only color state + let newColorString = newColor.toString(); this.setState({ - color: newColor.toString(), + color: newColorString, }); - this.props.onColorSelect(newColor); + this.props.onColorSelect(newColorString); } } diff --git a/public/app/core/components/colorpicker/SeriesColorPicker.tsx b/public/app/core/components/colorpicker/SeriesColorPicker.tsx index 6b6d387a2b2..3b24b9a4661 100644 --- a/public/app/core/components/colorpicker/SeriesColorPicker.tsx +++ b/public/app/core/components/colorpicker/SeriesColorPicker.tsx @@ -43,7 +43,7 @@ export class SeriesColorPicker extends React.Component { render() { return (
- {this.props.series && this.renderAxisSelection()} + {this.props.series.yaxis && this.renderAxisSelection()}
); diff --git a/public/app/core/components/form_dropdown/form_dropdown.ts b/public/app/core/components/form_dropdown/form_dropdown.ts index b1cbd9b6ced..364d61cfcdb 100644 --- a/public/app/core/components/form_dropdown/form_dropdown.ts +++ b/public/app/core/components/form_dropdown/form_dropdown.ts @@ -1,5 +1,3 @@ -/// - import _ from 'lodash'; import $ from 'jquery'; import coreModule from '../../core_module'; @@ -159,6 +157,8 @@ export class FormDropdownCtrl { } updateValue(text) { + text = _.unescape(text); + if (text === '' || this.text === text) { return; } @@ -199,9 +199,9 @@ export class FormDropdownCtrl { } open() { - this.inputElement.show(); - this.inputElement.css('width', (Math.max(this.linkElement.width(), 80) + 16) + 'px'); + + this.inputElement.show(); this.inputElement.focus(); this.linkElement.hide(); diff --git a/public/app/core/components/search/search.html b/public/app/core/components/search/search.html index 57fa3bb894a..4342042b052 100644 --- a/public/app/core/components/search/search.html +++ b/public/app/core/components/search/search.html @@ -61,7 +61,9 @@ {{tag}} - + + + diff --git a/public/app/core/components/search/search.ts b/public/app/core/components/search/search.ts index 32ea6800106..99b6c5c4f68 100644 --- a/public/app/core/components/search/search.ts +++ b/public/app/core/components/search/search.ts @@ -18,7 +18,7 @@ export class SearchCtrl { openCompleted: boolean; /** @ngInject */ - constructor($scope, private $location, private $timeout, private backendSrv, public contextSrv, $rootScope) { + constructor($scope, private $location, private $timeout, private backendSrv, private dashboardSrv, public contextSrv, $rootScope) { $rootScope.onAppEvent('show-dash-search', this.openSearch.bind(this), $scope); $rootScope.onAppEvent('hide-dash-search', this.closeSearch.bind(this), $scope); } @@ -194,6 +194,15 @@ export class SearchCtrl { this.searchDashboards(); } + starDashboard(row, evt) { + this.dashboardSrv.starDashboard(row.id, row.isStarred).then(newState => { + row.isStarred = newState; + }); + if (evt) { + evt.stopPropagation(); + evt.preventDefault(); + } + } } export function searchDirective() { diff --git a/public/app/core/core.ts b/public/app/core/core.ts index c8e6ff97905..66d8b8b656d 100644 --- a/public/app/core/core.ts +++ b/public/app/core/core.ts @@ -1,5 +1,4 @@ import "./directives/dash_class"; -import "./directives/confirm_click"; import "./directives/dash_edit_link"; import "./directives/dropdown_typeahead"; import "./directives/metric_segment"; diff --git a/public/app/core/directives/confirm_click.js b/public/app/core/directives/confirm_click.js deleted file mode 100644 index 95b60347e10..00000000000 --- a/public/app/core/directives/confirm_click.js +++ /dev/null @@ -1,23 +0,0 @@ -define([ - '../core_module', -], -function (coreModule) { - 'use strict'; - - coreModule.default.directive('confirmClick', function() { - return { - restrict: 'A', - link: function(scope, elem, attrs) { - elem.bind('click', function() { - var message = attrs.confirmation || "Are you sure you want to do that?"; - if (window.confirm(message)) { - var action = attrs.confirmClick; - if (action) { - scope.$apply(scope.$eval(action)); - } - } - }); - }, - }; - }); -}); diff --git a/public/app/core/directives/metric_segment.js b/public/app/core/directives/metric_segment.js index 37352556819..da21a5b3c45 100644 --- a/public/app/core/directives/metric_segment.js +++ b/public/app/core/directives/metric_segment.js @@ -39,6 +39,8 @@ function (_, $, coreModule) { return; } + value = _.unescape(value); + $scope.$apply(function() { var selected = _.find($scope.altSegments, {value: value}); if (selected) { @@ -46,6 +48,7 @@ function (_, $, coreModule) { segment.html = selected.html || selected.value; segment.fake = false; segment.expandable = selected.expandable; + segment.type = selected.type; } else if (segment.custom !== 'false') { segment.value = value; diff --git a/public/app/core/directives/ng_model_on_blur.js b/public/app/core/directives/ng_model_on_blur.ts similarity index 71% rename from public/app/core/directives/ng_model_on_blur.js rename to public/app/core/directives/ng_model_on_blur.ts index b3c73e54dba..383d04e3961 100644 --- a/public/app/core/directives/ng_model_on_blur.js +++ b/public/app/core/directives/ng_model_on_blur.ts @@ -1,11 +1,8 @@ -define([ - '../core_module', - 'app/core/utils/rangeutil', -], -function (coreModule, rangeUtil) { - 'use strict'; +import coreModule from '../core_module'; +import * as rangeUtil from 'app/core/utils/rangeutil'; - coreModule.default.directive('ngModelOnblur', function() { +export class NgModelOnBlur { + constructor() { return { restrict: 'A', priority: 1, @@ -23,22 +20,27 @@ function (coreModule, rangeUtil) { }); } }; - }); + } +} - coreModule.default.directive('emptyToNull', function () { + +export class EmptyToNull { + constructor() { return { restrict: 'A', require: 'ngModel', link: function (scope, elm, attrs, ctrl) { ctrl.$parsers.push(function (viewValue) { - if(viewValue === "") { return null; } + if (viewValue === "") { return null; } return viewValue; }); } }; - }); + } +} - coreModule.default.directive('validTimeSpan', function() { +export class ValidTimeSpan { + constructor() { return { require: 'ngModel', link: function(scope, elm, attrs, ctrl) { @@ -54,5 +56,9 @@ function (coreModule, rangeUtil) { }; } }; - }); -}); + } +} + +coreModule.directive('ngModelOnblur', NgModelOnBlur); +coreModule.directive('emptyToNull', EmptyToNull); +coreModule.directive('validTimeSpan', ValidTimeSpan); diff --git a/public/app/core/services/all.js b/public/app/core/services/all.js index 1fea3e5a248..a308febb219 100644 --- a/public/app/core/services/all.js +++ b/public/app/core/services/all.js @@ -3,7 +3,6 @@ define([ './util_srv', './context_srv', './timer', - './keyboard_manager', './analytics', './popover_srv', './segment_srv', diff --git a/public/app/core/services/keyboard_manager.js b/public/app/core/services/keyboard_manager.js deleted file mode 100644 index b5eefda81a6..00000000000 --- a/public/app/core/services/keyboard_manager.js +++ /dev/null @@ -1,291 +0,0 @@ -define([ - 'angular', - 'lodash', - '../core_module', -], -function (angular, _, coreModule) { - 'use strict'; - - // This service was based on OpenJS library available in BSD License - // http://www.openjs.com/scripts/events/keyboard_shortcuts/index.php - coreModule.default.factory('keyboardManager', ['$window', '$timeout', function ($window, $timeout) { - var keyboardManagerService = {}; - - var defaultOpt = { - 'type': 'keydown', - 'propagate': false, - 'inputDisabled': false, - 'target': $window.document, - 'keyCode': false - }; - // Store all keyboard combination shortcuts - keyboardManagerService.keyboardEvent = {}; - // Add a new keyboard combination shortcut - keyboardManagerService.bind = function (label, callback, opt) { - var fct, elt, code, k; - // Initialize opt object - opt = angular.extend({}, defaultOpt, opt); - label = label.toLowerCase(); - elt = opt.target; - - if (typeof opt.target === 'string') { - elt = document.getElementById(opt.target); - } - - fct = function (e) { - e = e || $window.event; - - // Disable event handler when focus input and textarea - if (opt['inputDisabled']) { - var elt; - if (e.target) { - elt = e.target; - } - else if (e.srcElement) { - elt = e.srcElement; - } - - if (elt.nodeType === 3) { - elt = elt.parentNode; - } - - if (elt.tagName === 'INPUT' || elt.tagName === 'TEXTAREA') { - return; - } - } - - // Find out which key is pressed - if (e.keyCode) { - code = e.keyCode; - } - else if (e.which) { - code = e.which; - } - - var character = String.fromCharCode(code).toLowerCase(); - - if (code === 188) { - character = ","; // If the user presses , when the type is onkeydown - } - if (code === 190) { - character = "."; // If the user presses , when the type is onkeydown - } - - var keys = label.split("+"); - // Key Pressed - counts the number of valid keypresses - if it is same as the number of keys, the shortcut function is invoked - var kp = 0; - // Work around for stupid Shift key bug created by using lowercase - as a result the shift+num combination was broken - var shift_nums = { - "`": "~", - "1": "!", - "2": "@", - "3": "#", - "4": "$", - "5": "%", - "6": "^", - "7": "&", - "8": "*", - "9": "(", - "0": ")", - "-": "_", - "=": "+", - ";": ":", - "'": "\"", - ",": "<", - ".": ">", - "/": "?", - "»": "?", - "«": "?", - "¿": "?", - "\\": "|" - }; - // Special Keys - and their codes - var special_keys = { - 'esc': 27, - 'escape': 27, - 'tab': 9, - 'space': 32, - 'return': 13, - 'enter': 13, - 'backspace': 8, - - 'scrolllock': 145, - 'scroll_lock': 145, - 'scroll': 145, - 'capslock': 20, - 'caps_lock': 20, - 'caps': 20, - 'numlock': 144, - 'num_lock': 144, - 'num': 144, - - 'pause': 19, - 'break': 19, - - 'insert': 45, - 'home': 36, - 'delete': 46, - 'end': 35, - - 'pageup': 33, - 'page_up': 33, - 'pu': 33, - - 'pagedown': 34, - 'page_down': 34, - 'pd': 34, - - 'left': 37, - 'up': 38, - 'right': 39, - 'down': 40, - - 'f1': 112, - 'f2': 113, - 'f3': 114, - 'f4': 115, - 'f5': 116, - 'f6': 117, - 'f7': 118, - 'f8': 119, - 'f9': 120, - 'f10': 121, - 'f11': 122, - 'f12': 123 - }; - // Some modifiers key - var modifiers = { - shift: { - wanted: false, - pressed: e.shiftKey ? true : false - }, - ctrl : { - wanted: false, - pressed: e.ctrlKey ? true : false - }, - alt : { - wanted: false, - pressed: e.altKey ? true : false - }, - meta : { //Meta is Mac specific - wanted: false, - pressed: e.metaKey ? true : false - } - }; - // Foreach keys in label (split on +) - for (var i = 0, l = keys.length; k = keys[i], i < l; i++) { - switch (k) { - case 'ctrl': - case 'control': - kp++; - modifiers.ctrl.wanted = true; - break; - case 'shift': - case 'alt': - case 'meta': - kp++; - modifiers[k].wanted = true; - break; - } - - if (k.length > 1) { // If it is a special key - if (special_keys[k] === code) { - kp++; - } - } else if (opt['keyCode']) { // If a specific key is set into the config - if (opt['keyCode'] === code) { - kp++; - } - } else { // The special keys did not match - if (character === k) { - kp++; - } - else { - if (shift_nums[character] && e.shiftKey) { // Stupid Shift key bug created by using lowercase - character = shift_nums[character]; - if (character === k) { - kp++; - } - } - } - } - } - - if (kp === keys.length && - modifiers.ctrl.pressed === modifiers.ctrl.wanted && - modifiers.shift.pressed === modifiers.shift.wanted && - modifiers.alt.pressed === modifiers.alt.wanted && - modifiers.meta.pressed === modifiers.meta.wanted) { - $timeout(function() { - callback(e); - }, 1); - - if (!opt['propagate']) { // Stop the event - // e.cancelBubble is supported by IE - this will kill the bubbling process. - e.cancelBubble = true; - e.returnValue = false; - - // e.stopPropagation works in Firefox. - if (e.stopPropagation) { - e.stopPropagation(); - e.preventDefault(); - } - return false; - } - } - - }; - // Store shortcut - keyboardManagerService.keyboardEvent[label] = { - 'callback': fct, - 'target': elt, - 'event': opt['type'] - }; - //Attach the function with the event - if (elt.addEventListener) { - elt.addEventListener(opt['type'], fct, false); - } - else if (elt.attachEvent) { - elt.attachEvent('on' + opt['type'], fct); - } - else { - elt['on' + opt['type']] = fct; - } - }; - - keyboardManagerService.unbindAll = function() { - _.each(keyboardManagerService.keyboardEvent, function(value, key) { - keyboardManagerService.unbind(key); - }); - }; - - // Remove the shortcut - just specify the shortcut and I will remove the binding - keyboardManagerService.unbind = function (label) { - label = label.toLowerCase(); - - var binding = keyboardManagerService.keyboardEvent[label]; - delete(keyboardManagerService.keyboardEvent[label]); - - if (!binding) { - return; - } - - var type = binding['event'], - elt = binding['target'], - callback = binding['callback']; - - if (elt.detachEvent) { - elt.detachEvent('on' + type, callback); - } - else if (elt.removeEventListener) { - elt.removeEventListener(type, callback, false); - } - else { - elt['on' + type] = false; - } - }; - // - return keyboardManagerService; - }]); - -}); diff --git a/public/app/core/services/segment_srv.js b/public/app/core/services/segment_srv.js index f1733fcb3e6..d615525988a 100644 --- a/public/app/core/services/segment_srv.js +++ b/public/app/core/services/segment_srv.js @@ -103,7 +103,7 @@ function (angular, _, coreModule) { }; this.newPlusButton = function() { - return new MetricSegment({fake: true, html: '', type: 'plus-button' }); + return new MetricSegment({fake: true, html: '', type: 'plus-button', cssClass: 'query-part' }); }; this.newSelectTagValue = function() { diff --git a/public/app/core/services/timer.ts b/public/app/core/services/timer.ts index 6356e1f2910..6355105ee0e 100644 --- a/public/app/core/services/timer.ts +++ b/public/app/core/services/timer.ts @@ -21,7 +21,7 @@ export class Timer { } cancelAll() { - _.each(this.timers, function (t) { + _.each(this.timers, t => { this.$timeout.cancel(t); }); this.timers = []; diff --git a/public/app/core/specs/datemath.jest.ts b/public/app/core/specs/datemath.jest.ts index 0b752876882..e2bdebcebce 100644 --- a/public/app/core/specs/datemath.jest.ts +++ b/public/app/core/specs/datemath.jest.ts @@ -48,7 +48,7 @@ describe("DateMath", () => { it("now/d on a utc dashboard should be start of the current day in UTC time", () => { var today = new Date(); - var expected = new Date(Date.UTC(today.getFullYear(), today.getMonth(), today.getDate(), 0, 0, 0, 0)); + var expected = new Date(Date.UTC(today.getUTCFullYear(), today.getUTCMonth(), today.getUTCDate(), 0, 0, 0, 0)); var startOfDay = dateMath.parse('now/d', false, 'utc').valueOf(); expect(startOfDay).toBe(expected.getTime()); diff --git a/public/app/core/specs/kbn.jest.ts b/public/app/core/specs/kbn.jest.ts index efeaeaee8b2..9f50a05d5ad 100644 --- a/public/app/core/specs/kbn.jest.ts +++ b/public/app/core/specs/kbn.jest.ts @@ -114,7 +114,7 @@ describe('date time formats', function() { it('should format as US date', function() { var str = kbn.valueFormats.dateTimeAsUS(1505634997920, 1); - expect(str).toBe(moment(1505634997920).format('MM/DD/YYYY H:mm:ss a')); + expect(str).toBe(moment(1505634997920).format('MM/DD/YYYY h:mm:ss a')); }); it('should format as US date and skip date when today', function() { diff --git a/public/app/core/utils/kbn.ts b/public/app/core/utils/kbn.ts index f1c44cfae0e..b0715502114 100644 --- a/public/app/core/utils/kbn.ts +++ b/public/app/core/utils/kbn.ts @@ -475,6 +475,8 @@ kbn.valueFormats.wpm = kbn.formatBuilders.simpleCountUnit('wpm'); // Energy kbn.valueFormats.watt = kbn.formatBuilders.decimalSIPrefix('W'); kbn.valueFormats.kwatt = kbn.formatBuilders.decimalSIPrefix('W', 1); +kbn.valueFormats.mwatt = kbn.formatBuilders.decimalSIPrefix('W', -1); +kbn.valueFormats.kwattm = kbn.formatBuilders.decimalSIPrefix('W/Min', 1); kbn.valueFormats.voltamp = kbn.formatBuilders.decimalSIPrefix('VA'); kbn.valueFormats.kvoltamp = kbn.formatBuilders.decimalSIPrefix('VA', 1); kbn.valueFormats.voltampreact = kbn.formatBuilders.decimalSIPrefix('var'); @@ -485,9 +487,12 @@ kbn.valueFormats.joule = kbn.formatBuilders.decimalSIPrefix('J'); kbn.valueFormats.ev = kbn.formatBuilders.decimalSIPrefix('eV'); kbn.valueFormats.amp = kbn.formatBuilders.decimalSIPrefix('A'); kbn.valueFormats.kamp = kbn.formatBuilders.decimalSIPrefix('A', 1); +kbn.valueFormats.mamp = kbn.formatBuilders.decimalSIPrefix('A', -1); kbn.valueFormats.volt = kbn.formatBuilders.decimalSIPrefix('V'); kbn.valueFormats.kvolt = kbn.formatBuilders.decimalSIPrefix('V', 1); +kbn.valueFormats.mvolt = kbn.formatBuilders.decimalSIPrefix('V', -1); kbn.valueFormats.dBm = kbn.formatBuilders.decimalSIPrefix('dBm'); +kbn.valueFormats.ohm = kbn.formatBuilders.decimalSIPrefix('Ω'); // Temperature kbn.valueFormats.celsius = kbn.formatBuilders.fixedUnit('°C'); @@ -514,6 +519,12 @@ kbn.valueFormats.lengthm = kbn.formatBuilders.decimalSIPrefix('m'); kbn.valueFormats.lengthmm = kbn.formatBuilders.decimalSIPrefix('m', -1); kbn.valueFormats.lengthkm = kbn.formatBuilders.decimalSIPrefix('m', 1); kbn.valueFormats.lengthmi = kbn.formatBuilders.fixedUnit('mi'); +kbn.valueFormats.lengthft = kbn.formatBuilders.fixedUnit('ft'); + +// Area +kbn.valueFormats.areaM2 = kbn.formatBuilders.fixedUnit('m²'); +kbn.valueFormats.areaF2 = kbn.formatBuilders.fixedUnit('ft²'); +kbn.valueFormats.areaMI2 = kbn.formatBuilders.fixedUnit('mi²'); // Mass kbn.valueFormats.massmg = kbn.formatBuilders.decimalSIPrefix('g', -1); @@ -527,6 +538,11 @@ kbn.valueFormats.velocitykmh = kbn.formatBuilders.fixedUnit('km/h'); kbn.valueFormats.velocitymph = kbn.formatBuilders.fixedUnit('mph'); kbn.valueFormats.velocityknot = kbn.formatBuilders.fixedUnit('kn'); +// Acceleration +kbn.valueFormats.accMS2 = kbn.formatBuilders.fixedUnit('m/sec²'); +kbn.valueFormats.accFS2 = kbn.formatBuilders.fixedUnit('f/sec²'); +kbn.valueFormats.accG = kbn.formatBuilders.fixedUnit('g'); + // Volume kbn.valueFormats.litre = kbn.formatBuilders.decimalSIPrefix('L'); kbn.valueFormats.mlitre = kbn.formatBuilders.decimalSIPrefix('L', -1); @@ -540,6 +556,11 @@ kbn.valueFormats.flowcms = kbn.formatBuilders.fixedUnit('cms'); kbn.valueFormats.flowcfs = kbn.formatBuilders.fixedUnit('cfs'); kbn.valueFormats.flowcfm = kbn.formatBuilders.fixedUnit('cfm'); +// Angle +kbn.valueFormats.degree = kbn.formatBuilders.fixedUnit('°'); +kbn.valueFormats.radian = kbn.formatBuilders.fixedUnit('rad'); +kbn.valueFormats.grad = kbn.formatBuilders.fixedUnit('grad'); + // Time kbn.valueFormats.hertz = kbn.formatBuilders.decimalSIPrefix('Hz'); @@ -873,10 +894,19 @@ kbn.getUnitFormats = function() { submenu: [ { text: 'millimetre (mm)', value: 'lengthmm' }, { text: 'meter (m)', value: 'lengthm' }, + { text: 'feet (ft)', value: 'lengthft' }, { text: 'kilometer (km)', value: 'lengthkm' }, { text: 'mile (mi)', value: 'lengthmi' }, ], }, + { + text: 'area', + submenu: [ + {text: 'Square Meters (m²)', value: 'areaM2' }, + {text: 'Square Feet (ft²)', value: 'areaF2' }, + {text: 'Square Miles (mi²)', value: 'areaMI2'}, + ] + }, { text: 'mass', submenu: [ @@ -908,21 +938,26 @@ kbn.getUnitFormats = function() { { text: 'energy', submenu: [ - { text: 'watt (W)', value: 'watt' }, - { text: 'kilowatt (kW)', value: 'kwatt' }, - { text: 'volt-ampere (VA)', value: 'voltamp' }, - { text: 'kilovolt-ampere (kVA)', value: 'kvoltamp' }, - { text: 'volt-ampere reactive (var)', value: 'voltampreact' }, - { text: 'kilovolt-ampere reactive (kvar)', value: 'kvoltampreact' }, - { text: 'watt-hour (Wh)', value: 'watth' }, - { text: 'kilowatt-hour (kWh)', value: 'kwatth' }, - { text: 'joule (J)', value: 'joule' }, - { text: 'electron volt (eV)', value: 'ev' }, + { text: 'Watt (W)', value: 'watt' }, + { text: 'Kilowatt (kW)', value: 'kwatt' }, + { text: 'Milliwatt (mW)', value: 'mwatt' }, + { text: 'Volt-ampere (VA)', value: 'voltamp' }, + { text: 'Kilovolt-ampere (kVA)', value: 'kvoltamp' }, + { text: 'Volt-ampere reactive (var)', value: 'voltampreact' }, + { text: 'Kilovolt-ampere reactive (kvar)', value: 'kvoltampreact' }, + { text: 'Watt-hour (Wh)', value: 'watth' }, + { text: 'Kilowatt-hour (kWh)', value: 'kwatth' }, + { text: 'Kilowatt-min (kWm)', value: 'kwattm' }, + { text: 'Joule (J)', value: 'joule' }, + { text: 'Electron volt (eV)', value: 'ev' }, { text: 'Ampere (A)', value: 'amp' }, { text: 'Kiloampere (kA)', value: 'kamp' }, + { text: 'Milliampere (mA)', value: 'mamp' }, { text: 'Volt (V)', value: 'volt' }, { text: 'Kilovolt (kV)', value: 'kvolt' }, + { text: 'Millivolt (mV)', value: 'mvolt' }, { text: 'Decibel-milliwatt (dBm)', value: 'dBm' }, + { text: 'Ohm (Ω)', value: 'ohm' } ], }, { @@ -962,6 +997,22 @@ kbn.getUnitFormats = function() { { text: 'Cubic feet/min (cfm)', value: 'flowcfm' }, ], }, + { + text: 'angle', + submenu: [ + { text: 'Degrees (°)', value: 'degree' }, + { text: 'Radians', value: 'radian' }, + { text: 'Gradian', value: 'grad' } + ] + }, + { + text: 'acceleration', + submenu: [ + { text: 'Meters/sec²', value: 'accMS2' }, + { text: 'Feet/sec²', value: 'accFS2' }, + { text: 'G unit', value: 'accG' } + ] + } ]; }; diff --git a/public/app/core/utils/version.ts b/public/app/core/utils/version.ts new file mode 100644 index 00000000000..6ee1400df51 --- /dev/null +++ b/public/app/core/utils/version.ts @@ -0,0 +1,34 @@ +import _ from 'lodash'; + +const versionPattern = /^(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:-([0-9A-Za-z\.]+))?/; + +export class SemVersion { + major: number; + minor: number; + patch: number; + meta: string; + + constructor(version: string) { + let match = versionPattern.exec(version); + if (match) { + this.major = Number(match[1]); + this.minor = Number(match[2] || 0); + this.patch = Number(match[3] || 0); + this.meta = match[4]; + } + } + + isGtOrEq(version: string): boolean { + let compared = new SemVersion(version); + return !(this.major < compared.major || this.minor < compared.minor || this.patch < compared.patch); + } + + isValid(): boolean { + return _.isNumber(this.major); + } +} + +export function isVersionGtOrEq(a: string, b: string): boolean { + let a_semver = new SemVersion(a); + return a_semver.isGtOrEq(b); +} diff --git a/public/app/features/alerting/alert_list_ctrl.ts b/public/app/features/alerting/alert_list_ctrl.ts index f06b592e9ec..5e9d8d80a92 100644 --- a/public/app/features/alerting/alert_list_ctrl.ts +++ b/public/app/features/alerting/alert_list_ctrl.ts @@ -11,6 +11,7 @@ export class AlertListCtrl { stateFilters = [ {text: 'All', value: null}, {text: 'OK', value: 'ok'}, + {text: 'Not OK', value: 'not_ok'}, {text: 'Alerting', value: 'alerting'}, {text: 'No Data', value: 'no_data'}, {text: 'Paused', value: 'paused'}, diff --git a/public/app/features/alerting/alert_tab_ctrl.ts b/public/app/features/alerting/alert_tab_ctrl.ts index 2c273c93a01..b65a559e426 100644 --- a/public/app/features/alerting/alert_tab_ctrl.ts +++ b/public/app/features/alerting/alert_tab_ctrl.ts @@ -95,6 +95,7 @@ export class AlertTabCtrl { case "hipchat": return "fa fa-mail-forward"; case "pushover": return "fa fa-mobile"; case "kafka": return "fa fa-random"; + case "teams": return "fa fa-windows"; } return 'fa fa-bell'; } diff --git a/public/app/features/annotations/annotation_tooltip.ts b/public/app/features/annotations/annotation_tooltip.ts index c950d3edd55..4828eb671a6 100644 --- a/public/app/features/annotations/annotation_tooltip.ts +++ b/public/app/features/annotations/annotation_tooltip.ts @@ -39,7 +39,7 @@ export function annotationTooltipDirective($sanitize, dashboardSrv, contextSrv, text = text + '
' + event.text; } } else if (title) { - text = title + '
' + text; + text = title + '
' + (_.isString(text) ? text : ''); title = ''; } diff --git a/public/app/features/dashboard/dashboard_srv.ts b/public/app/features/dashboard/dashboard_srv.ts index 7c1af20716b..a01eb64155c 100644 --- a/public/app/features/dashboard/dashboard_srv.ts +++ b/public/app/features/dashboard/dashboard_srv.ts @@ -122,6 +122,27 @@ export class DashboardSrv { modalClass: 'modal--narrow' }); } + + starDashboard(dashboardId, isStarred) { + let promise; + + if (isStarred) { + promise = this.backendSrv.delete('/api/user/stars/dashboard/' + dashboardId).then(() => { + return false; + }); + } else { + promise = this.backendSrv.post('/api/user/stars/dashboard/' + dashboardId).then(() => { + return true; + }); + } + + return promise.then(res => { + if (this.dash && this.dash.id === dashboardId) { + this.dash.meta.isStarred = res; + } + return res; + }); + } } coreModule.service('dashboardSrv', DashboardSrv); diff --git a/public/app/features/dashboard/dashnav/dashnav.html b/public/app/features/dashboard/dashnav/dashnav.html index 833a3351e4c..1db66f145bf 100644 --- a/public/app/features/dashboard/dashnav/dashnav.html +++ b/public/app/features/dashboard/dashnav/dashnav.html @@ -84,7 +84,7 @@ Back to dashboard -
  • +
  • diff --git a/public/app/features/dashboard/dashnav/dashnav.ts b/public/app/features/dashboard/dashnav/dashnav.ts index 71bc23c3eab..b4697039560 100644 --- a/public/app/features/dashboard/dashnav/dashnav.ts +++ b/public/app/features/dashboard/dashnav/dashnav.ts @@ -49,14 +49,9 @@ export class DashNavCtrl { } starDashboard() { - if (this.dashboard.meta.isStarred) { - return this.backendSrv.delete('/api/user/stars/dashboard/' + this.dashboard.id).then(() => { - this.dashboard.meta.isStarred = false; - }); - } - - this.backendSrv.post('/api/user/stars/dashboard/' + this.dashboard.id).then(() => { - this.dashboard.meta.isStarred = true; + this.dashboardSrv.starDashboard(this.dashboard.id, this.dashboard.meta.isStarred) + .then(newState => { + this.dashboard.meta.isStarred = newState; }); } diff --git a/public/app/features/dashboard/save_as_modal.ts b/public/app/features/dashboard/save_as_modal.ts index 689718cb7e8..bbb2899f665 100644 --- a/public/app/features/dashboard/save_as_modal.ts +++ b/public/app/features/dashboard/save_as_modal.ts @@ -56,7 +56,9 @@ export class SaveDashboardAsModalCtrl { // do not want to create alert dupes if (dashboard.id > 0) { this.clone.panels.forEach(panel => { - delete panel.thresholds; + if (panel.type === "graph" && panel.alert) { + delete panel.thresholds; + } delete panel.alert; }); } diff --git a/public/app/features/dashboard/specs/save_as_modal.jest.ts b/public/app/features/dashboard/specs/save_as_modal.jest.ts new file mode 100644 index 00000000000..b2ab77f2255 --- /dev/null +++ b/public/app/features/dashboard/specs/save_as_modal.jest.ts @@ -0,0 +1,62 @@ +import { SaveDashboardAsModalCtrl } from '../save_as_modal'; +import { describe, it, expect } from 'test/lib/common'; + +describe('saving dashboard as', () => { + function scenario(name, panel, verify) { + describe(name, () => { + var json = { + title: 'name', + panels: [panel], + }; + + var mockDashboardSrv = { + getCurrent: function() { + return { + id: 5, + meta: {}, + getSaveModelClone: function() { + return json; + }, + }; + }, + }; + + var ctrl = new SaveDashboardAsModalCtrl(mockDashboardSrv); + var ctx: any = { + clone: ctrl.clone, + ctrl: ctrl, + panel: panel + }; + + it('verify', () => { + verify(ctx); + }); + }); + } + + scenario('default values', {}, ctx => { + var clone = ctx.clone; + expect(clone.id).toBe(null); + expect(clone.title).toBe('name Copy'); + expect(clone.editable).toBe(true); + expect(clone.hideControls).toBe(false); + }); + + var graphPanel = { id: 1, type: 'graph', alert: { rule: 1 }, thresholds: { value: 3000 } }; + + scenario('should remove alert from graph panel', graphPanel, ctx => { + expect(ctx.panel.alert).toBe(undefined); + }); + + scenario('should remove threshold from graph panel', graphPanel, ctx => { + expect(ctx.panel.thresholds).toBe(undefined); + }); + + scenario('singlestat should keep threshold', { id: 1, type: 'singlestat', thresholds: { value: 3000 } }, ctx => { + expect(ctx.panel.thresholds).not.toBe(undefined); + }); + + scenario('table should keep threshold', { id: 1, type: 'table', thresholds: { value: 3000 } }, ctx => { + expect(ctx.panel.thresholds).not.toBe(undefined); + }); +}); diff --git a/public/app/features/dashboard/specs/share_modal_ctrl_specs.ts b/public/app/features/dashboard/specs/share_modal_ctrl_specs.ts index 7a04f5f7579..c39342a6f40 100644 --- a/public/app/features/dashboard/specs/share_modal_ctrl_specs.ts +++ b/public/app/features/dashboard/specs/share_modal_ctrl_specs.ts @@ -2,7 +2,7 @@ import {describe, beforeEach, it, expect, sinon, angularMocks} from 'test/lib/co import helpers from 'test/specs/helpers'; import '../shareModalCtrl'; import config from 'app/core/config'; -import 'app/features/panellinks/linkSrv'; +import 'app/features/panellinks/link_srv'; describe('ShareModalCtrl', function() { var ctx = new helpers.ControllerTestContext(); diff --git a/public/app/features/dashboard/timepicker/input_date.ts b/public/app/features/dashboard/timepicker/input_date.ts index b6988a3ff16..ca3f5fa7ae9 100644 --- a/public/app/features/dashboard/timepicker/input_date.ts +++ b/public/app/features/dashboard/timepicker/input_date.ts @@ -1,5 +1,3 @@ -/// - import moment from 'moment'; import * as dateMath from 'app/core/utils/datemath'; @@ -7,16 +5,16 @@ export function inputDateDirective() { return { restrict: 'A', require: 'ngModel', - link: function ($scope, $elem, attrs, ngModel) { + link: function($scope, $elem, attrs, ngModel) { var format = 'YYYY-MM-DD HH:mm:ss'; - var fromUser = function (text) { + var fromUser = function(text) { if (text.indexOf('now') !== -1) { if (!dateMath.isValid(text)) { - ngModel.$setValidity("error", false); + ngModel.$setValidity('error', false); return undefined; } - ngModel.$setValidity("error", true); + ngModel.$setValidity('error', true); return text; } @@ -28,15 +26,15 @@ export function inputDateDirective() { } if (!parsed.isValid()) { - ngModel.$setValidity("error", false); + ngModel.$setValidity('error', false); return undefined; } - ngModel.$setValidity("error", true); + ngModel.$setValidity('error', true); return parsed; }; - var toUser = function (currentValue) { + var toUser = function(currentValue) { if (moment.isMoment(currentValue)) { return currentValue.format(format); } else { @@ -46,7 +44,6 @@ export function inputDateDirective() { ngModel.$parsers.push(fromUser); ngModel.$formatters.push(toUser); - } + }, }; } - diff --git a/public/app/features/dashboard/timepicker/settings.html b/public/app/features/dashboard/timepicker/settings.html index 1143f16e3b4..8c40121eaed 100644 --- a/public/app/features/dashboard/timepicker/settings.html +++ b/public/app/features/dashboard/timepicker/settings.html @@ -1,15 +1,14 @@
    -
    -
    - Auto-refresh - -
    -
    - Now delay now- - -
    +
    +
    + Auto-refresh + +
    +
    + Now delay now- + +
    + +
    diff --git a/public/app/features/dashboard/timepicker/timepicker.html b/public/app/features/dashboard/timepicker/timepicker.html index 12570b3f2b0..a230dd53b4d 100644 --- a/public/app/features/dashboard/timepicker/timepicker.html +++ b/public/app/features/dashboard/timepicker/timepicker.html @@ -1,34 +1,38 @@ diff --git a/public/app/features/dashlinks/editor.html b/public/app/features/dashlinks/editor.html index d1d73520283..11dfdf74c1f 100644 --- a/public/app/features/dashlinks/editor.html +++ b/public/app/features/dashlinks/editor.html @@ -13,9 +13,9 @@
    With tags - +
    - +
    Title diff --git a/public/app/features/panel/metrics_panel_ctrl.ts b/public/app/features/panel/metrics_panel_ctrl.ts index bef7bca817b..f4d329908b1 100644 --- a/public/app/features/panel/metrics_panel_ctrl.ts +++ b/public/app/features/panel/metrics_panel_ctrl.ts @@ -226,7 +226,6 @@ class MetricsPanelCtrl extends PanelCtrl { interval: this.interval, intervalMs: this.intervalMs, targets: this.panel.targets, - format: this.panel.renderer === 'png' ? 'png' : 'json', maxDataPoints: this.resolution, scopedVars: scopedVars, cacheTimeout: this.panel.cacheTimeout diff --git a/public/app/features/panellinks/linkSrv.js b/public/app/features/panellinks/linkSrv.js deleted file mode 100644 index 89d89487c51..00000000000 --- a/public/app/features/panellinks/linkSrv.js +++ /dev/null @@ -1,118 +0,0 @@ -define([ - 'angular', - 'lodash', - 'app/core/utils/kbn', -], -function (angular, _, kbn) { - 'use strict'; - - kbn = kbn.default; - - angular - .module('grafana.services') - .service('linkSrv', function(templateSrv, timeSrv) { - - this.getLinkUrl = function(link) { - var url = templateSrv.replace(link.url || ''); - var params = {}; - - if (link.keepTime) { - var range = timeSrv.timeRangeForUrl(); - params['from'] = range.from; - params['to'] = range.to; - } - - if (link.includeVars) { - templateSrv.fillVariableValuesForUrl(params); - } - - return this.addParamsToUrl(url, params); - }; - - this.addParamsToUrl = function(url, params) { - var paramsArray = []; - _.each(params, function(value, key) { - if (value === null) { return; } - if (value === true) { - paramsArray.push(key); - } - else if (_.isArray(value)) { - _.each(value, function(instance) { - paramsArray.push(key + '=' + encodeURIComponent(instance)); - }); - } - else { - paramsArray.push(key + '=' + encodeURIComponent(value)); - } - }); - - if (paramsArray.length === 0) { - return url; - } - - return this.appendToQueryString(url, paramsArray.join('&')); - }; - - this.appendToQueryString = function(url, stringToAppend) { - if (!_.isUndefined(stringToAppend) && stringToAppend !== null && stringToAppend !== '') { - var pos = url.indexOf('?'); - if (pos !== -1) { - if (url.length - pos > 1) { - url += '&'; - } - } else { - url += '?'; - } - url += stringToAppend; - } - return url; - }; - - this.getAnchorInfo = function(link) { - var info = {}; - info.href = this.getLinkUrl(link); - info.title = templateSrv.replace(link.title || ''); - return info; - }; - - this.getPanelLinkAnchorInfo = function(link, scopedVars) { - var info = {}; - if (link.type === 'absolute') { - info.target = link.targetBlank ? '_blank' : '_self'; - info.href = templateSrv.replace(link.url || '', scopedVars); - info.title = templateSrv.replace(link.title || '', scopedVars); - } - else if (link.dashUri) { - info.href = 'dashboard/' + link.dashUri + '?'; - info.title = templateSrv.replace(link.title || '', scopedVars); - info.target = link.targetBlank ? '_blank' : ''; - } - else { - info.title = templateSrv.replace(link.title || '', scopedVars); - var slug = kbn.slugifyForUrl(link.dashboard || ''); - info.href = 'dashboard/db/' + slug + '?'; - } - - var params = {}; - - if (link.keepTime) { - var range = timeSrv.timeRangeForUrl(); - params['from'] = range.from; - params['to'] = range.to; - } - - if (link.includeVars) { - templateSrv.fillVariableValuesForUrl(params, scopedVars); - } - - info.href = this.addParamsToUrl(info.href, params); - - if (link.params) { - info.href = this.appendToQueryString(info.href, templateSrv.replace(link.params, scopedVars)); - } - - return info; - }; - - }); -}); diff --git a/public/app/features/panellinks/link_srv.ts b/public/app/features/panellinks/link_srv.ts new file mode 100644 index 00000000000..71192a86487 --- /dev/null +++ b/public/app/features/panellinks/link_srv.ts @@ -0,0 +1,113 @@ +import angular from 'angular'; +import _ from 'lodash'; +import kbn from 'app/core/utils/kbn'; + +export class LinkSrv { + + /** @ngInject */ + constructor(private templateSrv, private timeSrv) {} + + getLinkUrl(link) { + var url = this.templateSrv.replace(link.url || ''); + var params = {}; + + if (link.keepTime) { + var range = this.timeSrv.timeRangeForUrl(); + params['from'] = range.from; + params['to'] = range.to; + } + + if (link.includeVars) { + this.templateSrv.fillVariableValuesForUrl(params); + } + + return this.addParamsToUrl(url, params); + } + + addParamsToUrl(url, params) { + var paramsArray = []; + + _.each(params, function(value, key) { + if (value === null) { + return; + } + if (value === true) { + paramsArray.push(key); + } else if (_.isArray(value)) { + _.each(value, function(instance) { + paramsArray.push(key + '=' + encodeURIComponent(instance)); + }); + } else { + paramsArray.push(key + '=' + encodeURIComponent(value)); + } + }); + + if (paramsArray.length === 0) { + return url; + } + + return this.appendToQueryString(url, paramsArray.join('&')); + } + + appendToQueryString(url, stringToAppend) { + if (!_.isUndefined(stringToAppend) && stringToAppend !== null && stringToAppend !== '') { + var pos = url.indexOf('?'); + if (pos !== -1) { + if (url.length - pos > 1) { + url += '&'; + } + } else { + url += '?'; + } + url += stringToAppend; + } + + return url; + } + + getAnchorInfo(link) { + var info: any = {}; + info.href = this.getLinkUrl(link); + info.title = this.templateSrv.replace(link.title || ''); + return info; + } + + getPanelLinkAnchorInfo(link, scopedVars) { + var info: any = {}; + if (link.type === 'absolute') { + info.target = link.targetBlank ? '_blank' : '_self'; + info.href = this.templateSrv.replace(link.url || '', scopedVars); + info.title = this.templateSrv.replace(link.title || '', scopedVars); + } else if (link.dashUri) { + info.href = 'dashboard/' + link.dashUri + '?'; + info.title = this.templateSrv.replace(link.title || '', scopedVars); + info.target = link.targetBlank ? '_blank' : ''; + } else { + info.title = this.templateSrv.replace(link.title || '', scopedVars); + var slug = kbn.slugifyForUrl(link.dashboard || ''); + info.href = 'dashboard/db/' + slug + '?'; + } + + var params = {}; + + if (link.keepTime) { + var range = this.timeSrv.timeRangeForUrl(); + params['from'] = range.from; + params['to'] = range.to; + } + + if (link.includeVars) { + this.templateSrv.fillVariableValuesForUrl(params, scopedVars); + } + + info.href = this.addParamsToUrl(info.href, params); + + if (link.params) { + info.href = this.appendToQueryString(info.href, this.templateSrv.replace(link.params, scopedVars)); + } + + return info; + } +} + +angular.module('grafana.services').service('linkSrv', LinkSrv); diff --git a/public/app/features/panellinks/module.js b/public/app/features/panellinks/module.js index 351b38f27c4..a36317dc2b3 100644 --- a/public/app/features/panellinks/module.js +++ b/public/app/features/panellinks/module.js @@ -1,7 +1,7 @@ define([ 'angular', 'lodash', - './linkSrv', + './link_srv', ], function (angular, _) { 'use strict'; diff --git a/public/app/features/panellinks/specs/link_srv.jest.ts b/public/app/features/panellinks/specs/link_srv.jest.ts new file mode 100644 index 00000000000..2ec38961e29 --- /dev/null +++ b/public/app/features/panellinks/specs/link_srv.jest.ts @@ -0,0 +1,47 @@ +import { LinkSrv } from '../link_srv'; +import _ from 'lodash'; + +jest.mock('angular', () => { + let AngularJSMock = require('test/mocks/angular'); + return new AngularJSMock(); +}); + +describe('linkSrv', function() { + var linkSrv; + var templateSrvMock = {}; + var timeSrvMock = {}; + + beforeEach(() => { + linkSrv = new LinkSrv(templateSrvMock, timeSrvMock); + }); + + describe('when appending query strings', function() { + it('add ? to URL if not present', function() { + var url = linkSrv.appendToQueryString('http://example.com', 'foo=bar'); + expect(url).toBe('http://example.com?foo=bar'); + }); + + it('do not add & to URL if ? is present but query string is empty', function() { + var url = linkSrv.appendToQueryString('http://example.com?', 'foo=bar'); + expect(url).toBe('http://example.com?foo=bar'); + }); + + it('add & to URL if query string is present', function() { + var url = linkSrv.appendToQueryString('http://example.com?foo=bar', 'hello=world'); + expect(url).toBe('http://example.com?foo=bar&hello=world'); + }); + + it('do not change the URL if there is nothing to append', function() { + _.each(['', undefined, null], function(toAppend) { + var url1 = linkSrv.appendToQueryString('http://example.com', toAppend); + expect(url1).toBe('http://example.com'); + + var url2 = linkSrv.appendToQueryString('http://example.com?', toAppend); + expect(url2).toBe('http://example.com?'); + + var url3 = linkSrv.appendToQueryString('http://example.com?foo=bar', toAppend); + expect(url3).toBe('http://example.com?foo=bar'); + }); + }); + }); +}); diff --git a/public/app/features/panellinks/specs/link_srv_specs.ts b/public/app/features/panellinks/specs/link_srv_specs.ts deleted file mode 100644 index 77bb0a36c1f..00000000000 --- a/public/app/features/panellinks/specs/link_srv_specs.ts +++ /dev/null @@ -1,46 +0,0 @@ -import {describe, beforeEach, it, expect, angularMocks} from 'test/lib/common'; -import 'app/features/panellinks/linkSrv'; -import _ from 'lodash'; - -describe('linkSrv', function() { - var _linkSrv; - - beforeEach(angularMocks.module('grafana.core')); - beforeEach(angularMocks.module('grafana.services')); - - beforeEach(angularMocks.inject(function(linkSrv) { - _linkSrv = linkSrv; - })); - - describe('when appending query strings', function() { - - it('add ? to URL if not present', function() { - var url = _linkSrv.appendToQueryString('http://example.com', 'foo=bar'); - expect(url).to.be('http://example.com?foo=bar'); - }); - - it('do not add & to URL if ? is present but query string is empty', function() { - var url = _linkSrv.appendToQueryString('http://example.com?', 'foo=bar'); - expect(url).to.be('http://example.com?foo=bar'); - }); - - it('add & to URL if query string is present', function() { - var url = _linkSrv.appendToQueryString('http://example.com?foo=bar', 'hello=world'); - expect(url).to.be('http://example.com?foo=bar&hello=world'); - }); - - it('do not change the URL if there is nothing to append', function() { - _.each(['', undefined, null], function(toAppend) { - var url1 = _linkSrv.appendToQueryString('http://example.com', toAppend); - expect(url1).to.be('http://example.com'); - - var url2 = _linkSrv.appendToQueryString('http://example.com?', toAppend); - expect(url2).to.be('http://example.com?'); - - var url3 = _linkSrv.appendToQueryString('http://example.com?foo=bar', toAppend); - expect(url3).to.be('http://example.com?foo=bar'); - }); - }); - - }); -}); diff --git a/public/app/features/plugins/ds_edit_ctrl.ts b/public/app/features/plugins/ds_edit_ctrl.ts index 1afa497f041..db0d5c744dc 100644 --- a/public/app/features/plugins/ds_edit_ctrl.ts +++ b/public/app/features/plugins/ds_edit_ctrl.ts @@ -157,6 +157,10 @@ export class DataSourceEditCtrl { return; } + if (this.current.readOnly) { + return; + } + if (this.current.id) { return this.backendSrv.put('/api/datasources/' + this.current.id, this.current).then((result) => { this.current = result.datasource; diff --git a/public/app/features/plugins/partials/ds_edit.html b/public/app/features/plugins/partials/ds_edit.html index ae8c505db55..e039fbd83ae 100644 --- a/public/app/features/plugins/partials/ds_edit.html +++ b/public/app/features/plugins/partials/ds_edit.html @@ -5,6 +5,8 @@ -
    @@ -71,8 +72,8 @@
    - - + Cancel @@ -87,4 +88,3 @@
    - diff --git a/public/app/features/plugins/plugin_loader.ts b/public/app/features/plugins/plugin_loader.ts index c450f2e474a..c1dd3246fa1 100644 --- a/public/app/features/plugins/plugin_loader.ts +++ b/public/app/features/plugins/plugin_loader.ts @@ -40,7 +40,10 @@ System.config({ css: 'vendor/plugin-css/css.js' }, meta: { - '*': {esModule: true} + '*': { + esModule: true, + authorization: true, + } } }); diff --git a/public/app/plugins/app/testdata/datasource/datasource.ts b/public/app/plugins/app/testdata/datasource/datasource.ts index 90ae9e3aa47..2df33ea4ac6 100644 --- a/public/app/plugins/app/testdata/datasource/datasource.ts +++ b/public/app/plugins/app/testdata/datasource/datasource.ts @@ -1,7 +1,4 @@ -/// - import _ from 'lodash'; -import angular from 'angular'; class TestDataDatasource { id: any; @@ -21,7 +18,8 @@ class TestDataDatasource { intervalMs: options.intervalMs, maxDataPoints: options.maxDataPoints, stringInput: item.stringInput, - jsonInput: angular.fromJson(item.jsonInput), + points: item.points, + alias: item.alias, datasourceId: this.id, }; }); diff --git a/public/app/plugins/app/testdata/datasource/module.ts b/public/app/plugins/app/testdata/datasource/module.ts index 309b7443836..9d7eaf3cc83 100644 --- a/public/app/plugins/app/testdata/datasource/module.ts +++ b/public/app/plugins/app/testdata/datasource/module.ts @@ -1,5 +1,3 @@ -/// - import {TestDataDatasource} from './datasource'; import {TestDataQueryCtrl} from './query_ctrl'; diff --git a/public/app/plugins/app/testdata/datasource/query_ctrl.ts b/public/app/plugins/app/testdata/datasource/query_ctrl.ts index e783584eb5d..dd5f59c0a5a 100644 --- a/public/app/plugins/app/testdata/datasource/query_ctrl.ts +++ b/public/app/plugins/app/testdata/datasource/query_ctrl.ts @@ -1,14 +1,16 @@ -/// - import _ from 'lodash'; -import {QueryCtrl} from 'app/plugins/sdk'; +import { QueryCtrl } from 'app/plugins/sdk'; +import moment from 'moment'; export class TestDataQueryCtrl extends QueryCtrl { static templateUrl = 'partials/query.editor.html'; scenarioList: any; scenario: any; + newPointValue: number; + newPointTime: any; + selectedPoint: any; /** @ngInject **/ constructor($scope, $injector, private backendSrv) { @@ -16,19 +18,53 @@ export class TestDataQueryCtrl extends QueryCtrl { this.target.scenarioId = this.target.scenarioId || 'random_walk'; this.scenarioList = []; + this.newPointTime = moment(); + this.selectedPoint = { text: 'Select point', value: null }; + } + + getPoints() { + return _.map(this.target.points, (point, index) => { + return { + text: moment(point[1]).format('MMMM Do YYYY, H:mm:ss') + ' : ' + point[0], + value: index, + }; + }); + } + + pointSelected(option) { + this.selectedPoint = option; + } + + deletePoint() { + this.target.points.splice(this.selectedPoint.value, 1); + this.selectedPoint = { text: 'Select point', value: null }; + this.refresh(); + } + + addPoint() { + this.target.points = this.target.points || []; + this.target.points.push([this.newPointValue, this.newPointTime.valueOf()]); + this.target.points = _.sortBy(this.target.points, p => p[1]); + this.refresh(); } $onInit() { return this.backendSrv.get('/api/tsdb/testdata/scenarios').then(res => { this.scenarioList = res; - this.scenario = _.find(this.scenarioList, {id: this.target.scenarioId}); + this.scenario = _.find(this.scenarioList, { id: this.target.scenarioId }); }); } scenarioChanged() { - this.scenario = _.find(this.scenarioList, {id: this.target.scenarioId}); + this.scenario = _.find(this.scenarioList, { id: this.target.scenarioId }); this.target.stringInput = this.scenario.stringInput; + + if (this.target.scenarioId === 'manual_entry') { + this.target.points = this.target.points || []; + } else { + delete this.target.points; + } + this.refresh(); } } - diff --git a/public/app/plugins/app/testdata/partials/query.editor.html b/public/app/plugins/app/testdata/partials/query.editor.html index a39582d5397..247918bce1f 100644 --- a/public/app/plugins/app/testdata/partials/query.editor.html +++ b/public/app/plugins/app/testdata/partials/query.editor.html @@ -1,8 +1,8 @@
    - -
    + +
    @@ -18,5 +18,23 @@
    +
    +
    + + + + + + + + +
    +
    + +
    +
    +
    +
    +
    diff --git a/public/app/plugins/datasource/cloudwatch/datasource.js b/public/app/plugins/datasource/cloudwatch/datasource.js index d21bcf6413c..ac4573ef43a 100644 --- a/public/app/plugins/datasource/cloudwatch/datasource.js +++ b/public/app/plugins/datasource/cloudwatch/datasource.js @@ -113,7 +113,7 @@ function (angular, _, moment, dateMath, kbn, templatingVariable) { }; this.performTimeSeriesQuery = function(request) { - return backendSrv.post('/api/tsdb/query', request).then(function (res) { + return this.awsRequest('/api/tsdb/query', request).then(function (res) { var data = []; if (res.results) { @@ -139,7 +139,7 @@ function (angular, _, moment, dateMath, kbn, templatingVariable) { this.doMetricQueryRequest = function (subtype, parameters) { var range = timeSrv.timeRange(); - return backendSrv.post('/api/tsdb/query', { + return this.awsRequest('/api/tsdb/query', { from: range.from.valueOf().toString(), to: range.to.valueOf().toString(), queries: [ @@ -277,7 +277,7 @@ function (angular, _, moment, dateMath, kbn, templatingVariable) { alarmNamePrefix: annotation.alarmNamePrefix || '' }; - return backendSrv.post('/api/tsdb/query', { + return this.awsRequest('/api/tsdb/query', { from: options.range.from.valueOf().toString(), to: options.range.to.valueOf().toString(), queries: [ @@ -325,10 +325,10 @@ function (angular, _, moment, dateMath, kbn, templatingVariable) { }); }; - this.awsRequest = function(data) { + this.awsRequest = function(url, data) { var options = { method: 'POST', - url: this.proxyUrl, + url: url, data: data }; diff --git a/public/app/plugins/datasource/cloudwatch/partials/query.parameter.html b/public/app/plugins/datasource/cloudwatch/partials/query.parameter.html index 67351358696..fd14fb7d077 100644 --- a/public/app/plugins/datasource/cloudwatch/partials/query.parameter.html +++ b/public/app/plugins/datasource/cloudwatch/partials/query.parameter.html @@ -49,6 +49,7 @@
  • {{stat}}
  • {{namespace}}
  • {{region}}
  • +
  • {{period}}
  • {{YOUR_DIMENSION_NAME}}
  • diff --git a/public/app/plugins/datasource/cloudwatch/specs/datasource_specs.ts b/public/app/plugins/datasource/cloudwatch/specs/datasource_specs.ts index 02312148b6a..f278ce9305d 100644 --- a/public/app/plugins/datasource/cloudwatch/specs/datasource_specs.ts +++ b/public/app/plugins/datasource/cloudwatch/specs/datasource_specs.ts @@ -66,9 +66,9 @@ describe('CloudWatchDatasource', function() { }; beforeEach(function() { - ctx.backendSrv.post = function(path, params) { - requestParams = params; - return ctx.$q.when(response); + ctx.backendSrv.datasourceRequest = function(params) { + requestParams = params.data; + return ctx.$q.when({data: response}); }; }); @@ -211,9 +211,9 @@ describe('CloudWatchDatasource', function() { }; beforeEach(function() { - ctx.backendSrv.post = function(path, params) { - requestParams = params; - return ctx.$q.when(response); + ctx.backendSrv.datasourceRequest = function(params) { + requestParams = params.data; + return ctx.$q.when({data: response}); }; }); @@ -234,12 +234,8 @@ describe('CloudWatchDatasource', function() { beforeEach(() => { setupCallback(); ctx.backendSrv.datasourceRequest = args => { - scenario.request = args; - return ctx.$q.when({ data: scenario.requestResponse }); - }; - ctx.backendSrv.post = (path, args) => { - scenario.request = args; - return ctx.$q.when(scenario.requestResponse); + scenario.request = args.data; + return ctx.$q.when({data: scenario.requestResponse}); }; ctx.ds.metricFindQuery(query).then(args => { scenario.result = args; diff --git a/public/app/plugins/datasource/elasticsearch/config_ctrl.ts b/public/app/plugins/datasource/elasticsearch/config_ctrl.ts index fdf941e81f7..2a3803db998 100644 --- a/public/app/plugins/datasource/elasticsearch/config_ctrl.ts +++ b/public/app/plugins/datasource/elasticsearch/config_ctrl.ts @@ -1,5 +1,3 @@ -/// - import _ from 'lodash'; export class ElasticConfigCtrl { @@ -9,6 +7,7 @@ export class ElasticConfigCtrl { /** @ngInject */ constructor($scope) { this.current.jsonData.timeField = this.current.jsonData.timeField || '@timestamp'; + this.current.jsonData.esVersion = this.current.jsonData.esVersion || 5; } indexPatternTypes = [ diff --git a/public/app/plugins/datasource/graphite/config_ctrl.ts b/public/app/plugins/datasource/graphite/config_ctrl.ts index 4f6ebef77ea..595584691a2 100644 --- a/public/app/plugins/datasource/graphite/config_ctrl.ts +++ b/public/app/plugins/datasource/graphite/config_ctrl.ts @@ -2,12 +2,26 @@ export class GraphiteConfigCtrl { static templateUrl = 'public/app/plugins/datasource/graphite/partials/config.html'; + datasourceSrv: any; current: any; /** @ngInject */ - constructor($scope) { + constructor($scope, datasourceSrv) { + this.datasourceSrv = datasourceSrv; this.current.jsonData = this.current.jsonData || {}; this.current.jsonData.graphiteVersion = this.current.jsonData.graphiteVersion || '0.9'; + + this.autoDetectGraphiteVersion(); + } + + autoDetectGraphiteVersion() { + this.datasourceSrv.loadDatasource(this.current.name) + .then((ds) => { + return ds.getVersion(); + }).then((version) => { + this.graphiteVersions.push({name: version, value: version}); + this.current.jsonData.graphiteVersion = version; + }); } graphiteVersions = [ diff --git a/public/app/plugins/datasource/graphite/datasource.ts b/public/app/plugins/datasource/graphite/datasource.ts index 5114922f1f7..56a061bc1a4 100644 --- a/public/app/plugins/datasource/graphite/datasource.ts +++ b/public/app/plugins/datasource/graphite/datasource.ts @@ -2,6 +2,7 @@ import _ from 'lodash'; import * as dateMath from 'app/core/utils/datemath'; +import {isVersionGtOrEq, SemVersion} from 'app/core/utils/version'; /** @ngInject */ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv) { @@ -9,6 +10,7 @@ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv this.url = instanceSettings.url; this.name = instanceSettings.name; this.graphiteVersion = instanceSettings.jsonData.graphiteVersion || '0.9'; + this.supportsTags = supportsTags(this.graphiteVersion); this.cacheTimeout = instanceSettings.cacheTimeout; this.withCredentials = instanceSettings.withCredentials; this.render_method = instanceSettings.render_method || 'POST'; @@ -217,6 +219,126 @@ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv }); }; + this.getTags = function(optionalOptions) { + let options = optionalOptions || {}; + + let httpOptions: any = { + method: 'GET', + url: '/tags', + // for cancellations + requestId: options.requestId, + }; + + if (options && options.range) { + httpOptions.params.from = this.translateTime(options.range.from, false); + httpOptions.params.until = this.translateTime(options.range.to, true); + } + + return this.doGraphiteRequest(httpOptions).then(results => { + return _.map(results.data, tag => { + return { + text: tag.tag, + id: tag.id + }; + }); + }); + }; + + this.getTagValues = function(tag, optionalOptions) { + let options = optionalOptions || {}; + + let httpOptions: any = { + method: 'GET', + url: '/tags/' + tag, + // for cancellations + requestId: options.requestId, + }; + + if (options && options.range) { + httpOptions.params.from = this.translateTime(options.range.from, false); + httpOptions.params.until = this.translateTime(options.range.to, true); + } + + return this.doGraphiteRequest(httpOptions).then(results => { + if (results.data && results.data.values) { + return _.map(results.data.values, value => { + return { + text: value.value, + id: value.id + }; + }); + } else { + return []; + } + }); + }; + + this.getTagsAutoComplete = (expression, tagPrefix) => { + let httpOptions: any = { + method: 'GET', + url: '/tags/autoComplete/tags', + params: { + expr: expression + } + }; + + if (tagPrefix) { + httpOptions.params.tagPrefix = tagPrefix; + } + + return this.doGraphiteRequest(httpOptions).then(results => { + if (results.data) { + return _.map(results.data, (tag) => { + return { text: tag }; + }); + } else { + return []; + } + }); + }; + + this.getTagValuesAutoComplete = (expression, tag, valuePrefix) => { + let httpOptions: any = { + method: 'GET', + url: '/tags/autoComplete/values', + params: { + expr: expression, + tag: tag + } + }; + + if (valuePrefix) { + httpOptions.params.valuePrefix = valuePrefix; + } + + return this.doGraphiteRequest(httpOptions).then(results => { + if (results.data) { + return _.map(results.data, (value) => { + return { text: value }; + }); + } else { + return []; + } + }); + }; + + this.getVersion = function() { + let httpOptions = { + method: 'GET', + url: '/version/_', // Prevent last / trimming + }; + + return this.doGraphiteRequest(httpOptions).then(results => { + if (results.data) { + let semver = new SemVersion(results.data); + return semver.isValid() ? results.data : ''; + } + return ''; + }).catch(() => { + return ''; + }); + }; + this.testDatasource = function() { return this.metricFindQuery('*').then(function () { return { status: "success", message: "Data source is working"}; @@ -303,3 +425,7 @@ export function GraphiteDatasource(instanceSettings, $q, backendSrv, templateSrv return clean_options; }; } + +function supportsTags(version: string): boolean { + return isVersionGtOrEq(version, '1.1'); +} diff --git a/public/app/plugins/datasource/graphite/gfunc.js b/public/app/plugins/datasource/graphite/gfunc.js new file mode 100644 index 00000000000..94f4ca4c988 --- /dev/null +++ b/public/app/plugins/datasource/graphite/gfunc.js @@ -0,0 +1,981 @@ +define([ + 'lodash', + 'jquery', + 'app/core/utils/version' +], +function (_, $, version) { + 'use strict'; + + var index = []; + var categories = { + Combine: [], + Transform: [], + Calculate: [], + Filter: [], + Special: [] + }; + + function addFuncDef(funcDef) { + funcDef.params = funcDef.params || []; + funcDef.defaultParams = funcDef.defaultParams || []; + + if (funcDef.category) { + funcDef.category.push(funcDef); + } + index[funcDef.name] = funcDef; + index[funcDef.shortName || funcDef.name] = funcDef; + } + + var optionalSeriesRefArgs = [ + { name: 'other', type: 'value_or_series', optional: true }, + { name: 'other', type: 'value_or_series', optional: true }, + { name: 'other', type: 'value_or_series', optional: true }, + { name: 'other', type: 'value_or_series', optional: true }, + { name: 'other', type: 'value_or_series', optional: true } + ]; + + addFuncDef({ + name: 'scaleToSeconds', + category: categories.Transform, + params: [{ name: 'seconds', type: 'int' }], + defaultParams: [1], + }); + + addFuncDef({ + name: 'perSecond', + category: categories.Transform, + params: [{ name: "max value", type: "int", optional: true }], + defaultParams: [], + }); + + addFuncDef({ + name: "holtWintersForecast", + category: categories.Calculate, + }); + + addFuncDef({ + name: "holtWintersConfidenceBands", + category: categories.Calculate, + params: [{ name: "delta", type: 'int' }], + defaultParams: [3] + }); + + addFuncDef({ + name: "holtWintersAberration", + category: categories.Calculate, + params: [{ name: "delta", type: 'int' }], + defaultParams: [3] + }); + + addFuncDef({ + name: "nPercentile", + category: categories.Calculate, + params: [{ name: "Nth percentile", type: 'int' }], + defaultParams: [95] + }); + + addFuncDef({ + name: 'diffSeries', + params: optionalSeriesRefArgs, + defaultParams: ['#A'], + category: categories.Calculate, + }); + + addFuncDef({ + name: 'stddevSeries', + params: optionalSeriesRefArgs, + defaultParams: [''], + category: categories.Calculate, + }); + + addFuncDef({ + name: 'divideSeries', + params: optionalSeriesRefArgs, + defaultParams: ['#A'], + category: categories.Calculate, + }); + + addFuncDef({ + name: 'multiplySeries', + params: optionalSeriesRefArgs, + defaultParams: ['#A'], + category: categories.Calculate, + }); + + addFuncDef({ + name: 'asPercent', + params: optionalSeriesRefArgs, + defaultParams: ['#A'], + category: categories.Calculate, + }); + + addFuncDef({ + name: 'group', + params: optionalSeriesRefArgs, + defaultParams: ['#A', '#B'], + category: categories.Combine, + }); + + addFuncDef({ + name: 'sumSeries', + shortName: 'sum', + category: categories.Combine, + params: optionalSeriesRefArgs, + defaultParams: [''], + }); + + addFuncDef({ + name: 'averageSeries', + shortName: 'avg', + category: categories.Combine, + params: optionalSeriesRefArgs, + defaultParams: [''], + }); + + addFuncDef({ + name: 'rangeOfSeries', + category: categories.Combine + }); + + addFuncDef({ + name: 'percentileOfSeries', + category: categories.Combine, + params: [{ name: 'n', type: 'int' }, { name: 'interpolate', type: 'boolean', options: ['true', 'false'] }], + defaultParams: [95, 'false'] + }); + + addFuncDef({ + name: 'sumSeriesWithWildcards', + category: categories.Combine, + params: [ + { name: "node", type: "int" }, + { name: "node", type: "int", optional: true }, + { name: "node", type: "int", optional: true }, + { name: "node", type: "int", optional: true } + ], + defaultParams: [3] + }); + + addFuncDef({ + name: 'maxSeries', + shortName: 'max', + category: categories.Combine, + }); + + addFuncDef({ + name: 'minSeries', + shortName: 'min', + category: categories.Combine, + }); + + addFuncDef({ + name: 'averageSeriesWithWildcards', + category: categories.Combine, + params: [ + { name: "node", type: "int" }, + { name: "node", type: "int", optional: true }, + ], + defaultParams: [3] + }); + + addFuncDef({ + name: "alias", + category: categories.Special, + params: [{ name: "alias", type: 'string' }], + defaultParams: ['alias'] + }); + + addFuncDef({ + name: "aliasSub", + category: categories.Special, + params: [{ name: "search", type: 'string' }, { name: "replace", type: 'string' }], + defaultParams: ['', '\\1'] + }); + + addFuncDef({ + name: "stacked", + category: categories.Special, + params: [{ name: "stack", type: 'string' }], + defaultParams: ['stacked'] + }); + + addFuncDef({ + name: "consolidateBy", + category: categories.Special, + params: [ + { + name: 'function', + type: 'string', + options: ['sum', 'average', 'min', 'max'] + } + ], + defaultParams: ['max'] + }); + + addFuncDef({ + name: "cumulative", + category: categories.Special, + params: [], + defaultParams: [] + }); + + addFuncDef({ + name: "groupByNode", + category: categories.Special, + params: [ + { + name: "node", + type: "int", + options: [0,1,2,3,4,5,6,7,8,9,10,12] + }, + { + name: "function", + type: "string", + options: ['sum', 'avg', 'maxSeries'] + } + ], + defaultParams: [3, "sum"] + }); + + addFuncDef({ + name: 'aliasByNode', + category: categories.Special, + params: [ + { name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + ], + defaultParams: [3] + }); + + addFuncDef({ + name: 'substr', + category: categories.Special, + params: [ + { name: "start", type: "int", options: [-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,12] }, + { name: "stop", type: "int", options: [-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,12] }, + ], + defaultParams: [0, 0] + }); + + addFuncDef({ + name: 'sortByName', + category: categories.Special, + params: [{ name: 'natural', type: 'boolean', options: ['true', 'false'], optional: true }], + defaultParams: ['false'] + }); + + addFuncDef({ + name: 'sortByMaxima', + category: categories.Special + }); + + addFuncDef({ + name: 'sortByMinima', + category: categories.Special + }); + + addFuncDef({ + name: 'sortByTotal', + category: categories.Special + }); + + addFuncDef({ + name: 'aliasByMetric', + category: categories.Special, + }); + + addFuncDef({ + name: 'randomWalk', + fake: true, + category: categories.Special, + params: [{ name: "name", type: "string", }], + defaultParams: ['randomWalk'] + }); + + addFuncDef({ + name: 'countSeries', + category: categories.Special + }); + + addFuncDef({ + name: 'constantLine', + category: categories.Special, + params: [{ name: "value", type: "int", }], + defaultParams: [10] + }); + + addFuncDef({ + name: 'cactiStyle', + category: categories.Special, + }); + + addFuncDef({ + name: 'keepLastValue', + category: categories.Special, + params: [{ name: "n", type: "int", }], + defaultParams: [100] + }); + + addFuncDef({ + name: "changed", + category: categories.Special, + params: [], + defaultParams: [] + }); + + addFuncDef({ + name: 'scale', + category: categories.Transform, + params: [{ name: "factor", type: "int", }], + defaultParams: [1] + }); + + addFuncDef({ + name: 'offset', + category: categories.Transform, + params: [{ name: "amount", type: "int", }], + defaultParams: [10] + }); + + addFuncDef({ + name: 'transformNull', + category: categories.Transform, + params: [{ name: "amount", type: "int", }], + defaultParams: [0] + }); + + addFuncDef({ + name: 'integral', + category: categories.Transform, + }); + + addFuncDef({ + name: 'derivative', + category: categories.Transform, + }); + + addFuncDef({ + name: 'nonNegativeDerivative', + category: categories.Transform, + params: [{ name: "max value or 0", type: "int", optional: true }], + defaultParams: [''] + }); + + addFuncDef({ + name: 'timeShift', + category: categories.Transform, + params: [{ name: "amount", type: "select", options: ['1h', '6h', '12h', '1d', '2d', '7d', '14d', '30d'] }], + defaultParams: ['1d'] + }); + + addFuncDef({ + name: 'timeStack', + category: categories.Transform, + params: [ + { name: "timeShiftUnit", type: "select", options: ['1h', '6h', '12h', '1d', '2d', '7d', '14d', '30d'] }, + { name: "timeShiftStart", type: "int" }, + { name: "timeShiftEnd", type: "int" } + ], + defaultParams: ['1d', 0, 7] + }); + + addFuncDef({ + name: 'summarize', + category: categories.Transform, + params: [ + { name: "interval", type: "string" }, + { name: "func", type: "select", options: ['sum', 'avg', 'min', 'max', 'last'] }, + { name: "alignToFrom", type: "boolean", optional: true, options: ['false', 'true'] }, + ], + defaultParams: ['1h', 'sum', 'false'] + }); + + addFuncDef({ + name: 'smartSummarize', + category: categories.Transform, + params: [{ name: "interval", type: "string" }, { name: "func", type: "select", options: ['sum', 'avg', 'min', 'max', 'last'] }], + defaultParams: ['1h', 'sum'] + }); + + addFuncDef({ + name: 'absolute', + category: categories.Transform, + }); + + addFuncDef({ + name: 'hitcount', + category: categories.Transform, + params: [{ name: "interval", type: "string" }], + defaultParams: ['10s'] + }); + + addFuncDef({ + name: 'log', + category: categories.Transform, + params: [{ name: "base", type: "int" }], + defaultParams: ['10'] + }); + + addFuncDef({ + name: 'averageAbove', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [25] + }); + + addFuncDef({ + name: 'averageBelow', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [25] + }); + + addFuncDef({ + name: 'currentAbove', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [25] + }); + + addFuncDef({ + name: 'currentBelow', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [25] + }); + + addFuncDef({ + name: 'maximumAbove', + category: categories.Filter, + params: [{ name: "value", type: "int" }], + defaultParams: [0] + }); + + addFuncDef({ + name: 'maximumBelow', + category: categories.Filter, + params: [{ name: "value", type: "int" }], + defaultParams: [0] + }); + + addFuncDef({ + name: 'minimumAbove', + category: categories.Filter, + params: [{ name: "value", type: "int" }], + defaultParams: [0] + }); + + addFuncDef({ + name: 'minimumBelow', + category: categories.Filter, + params: [{ name: "value", type: "int" }], + defaultParams: [0] + }); + + addFuncDef({ + name: 'limit', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'mostDeviant', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [10] + }); + + addFuncDef({ + name: "exclude", + category: categories.Filter, + params: [{ name: "exclude", type: 'string' }], + defaultParams: ['exclude'] + }); + + addFuncDef({ + name: 'highestCurrent', + category: categories.Filter, + params: [{ name: "count", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'highestMax', + category: categories.Filter, + params: [{ name: "count", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'lowestCurrent', + category: categories.Filter, + params: [{ name: "count", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'movingAverage', + category: categories.Filter, + params: [{ name: "windowSize", type: "int_or_interval", options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: [10] + }); + + addFuncDef({ + name: 'movingMedian', + category: categories.Filter, + params: [{ name: "windowSize", type: "int_or_interval", options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: ['5'] + }); + + addFuncDef({ + name: 'stdev', + category: categories.Filter, + params: [{ name: "n", type: "int" }, { name: "tolerance", type: "int" }], + defaultParams: [5,0.1] + }); + + addFuncDef({ + name: 'highestAverage', + category: categories.Filter, + params: [{ name: "count", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'lowestAverage', + category: categories.Filter, + params: [{ name: "count", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'removeAbovePercentile', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'removeAboveValue', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'removeBelowPercentile', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'removeBelowValue', + category: categories.Filter, + params: [{ name: "n", type: "int" }], + defaultParams: [5] + }); + + addFuncDef({ + name: 'useSeriesAbove', + category: categories.Filter, + params: [ + { name: "value", type: "int" }, + { name: "search", type: "string" }, + { name: "replace", type: "string" } + ], + defaultParams: [0, 'search', 'replace'] + }); + + //////////////////// + // Graphite 1.0.x // + //////////////////// + + addFuncDef({ + name: 'aggregateLine', + category: categories.Combine, + params: [{ name: "func", type: "select", options: ['sum', 'avg', 'min', 'max', 'last']}], + defaultParams: ['avg'], + version: '1.0' + }); + + addFuncDef({ + name: 'averageOutsidePercentile', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [95], + version: '1.0' + }); + + addFuncDef({ + name: 'delay', + category: categories.Transform, + params: [{ name: 'steps', type: 'int', }], + defaultParams: [1], + version: '1.0' + }); + + addFuncDef({ + name: 'exponentialMovingAverage', + category: categories.Calculate, + params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: [10], + version: '1.0' + }); + + addFuncDef({ + name: 'fallbackSeries', + category: categories.Special, + params: [{ name: 'fallback', type: 'string' }], + defaultParams: ['constantLine(0)'], + version: '1.0' + }); + + addFuncDef({ + name: "grep", + category: categories.Filter, + params: [{ name: "grep", type: 'string' }], + defaultParams: ['grep'], + version: '1.0' + }); + + addFuncDef({ + name: "groupByNodes", + category: categories.Special, + params: [ + { + name: "function", + type: "string", + options: ['sum', 'avg', 'maxSeries'] + }, + { name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "node", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + ], + defaultParams: ["sum", 3], + version: '1.0' + }); + + addFuncDef({ + name: 'integralByInterval', + category: categories.Transform, + params: [{ name: "intervalUnit", type: "select", options: ['1h', '6h', '12h', '1d', '2d', '7d', '14d', '30d'] }], + defaultParams: ['1d'], + version: '1.0' + }); + + addFuncDef({ + name: 'interpolate', + category: categories.Transform, + params: [{ name: 'limit', type: 'int', optional: true}], + defaultParams: [], + version: '1.0' + }); + + addFuncDef({ + name: 'invert', + category: categories.Transform, + version: '1.0' + }); + + addFuncDef({ + name: 'isNonNull', + category: categories.Combine, + version: '1.0' + }); + + addFuncDef({ + name: 'linearRegression', + category: categories.Calculate, + params: [ + { name: "startSourceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true }, + { name: "endSourceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true } + ], + defaultParams: [], + version: '1.0' + }); + + addFuncDef({ + name: 'mapSeries', + shortName: 'map', + params: [{ name: "node", type: 'int' }], + defaultParams: [3], + category: categories.Combine, + version: '1.0' + }); + + addFuncDef({ + name: 'movingMin', + category: categories.Calculate, + params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: [10], + version: '1.0' + }); + + addFuncDef({ + name: 'movingMax', + category: categories.Calculate, + params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: [10], + version: '1.0' + }); + + addFuncDef({ + name: 'movingSum', + category: categories.Calculate, + params: [{ name: 'windowSize', type: 'int_or_interval', options: ['5', '7', '10', '5min', '10min', '30min', '1hour'] }], + defaultParams: [10], + version: '1.0' + }); + + addFuncDef({ + name: "multiplySeriesWithWildcards", + category: categories.Calculate, + params: [ + { name: "position", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] }, + { name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + { name: "position", type: "int", options: [0,-1,-2,-3,-4,-5,-6,-7], optional: true }, + ], + defaultParams: [2], + version: '1.0' + }); + + addFuncDef({ + name: 'offsetToZero', + category: categories.Transform, + version: '1.0' + }); + + addFuncDef({ + name: 'pow', + category: categories.Transform, + params: [{ name: 'factor', type: 'int' }], + defaultParams: [10], + version: '1.0' + }); + + addFuncDef({ + name: 'powSeries', + category: categories.Transform, + params: optionalSeriesRefArgs, + defaultParams: [''], + version: '1.0' + }); + + addFuncDef({ + name: 'reduceSeries', + shortName: 'reduce', + params: [ + { name: "function", type: 'string', options: ['asPercent', 'diffSeries', 'divideSeries'] }, + { name: "reduceNode", type: 'int', options: [0,1,2,3,4,5,6,7,8,9,10,11,12,13] }, + { name: "reduceMatchers", type: 'string' }, + { name: "reduceMatchers", type: 'string' }, + ], + defaultParams: ['asPercent', 2, 'used_bytes', 'total_bytes'], + category: categories.Combine, + version: '1.0' + }); + + addFuncDef({ + name: 'removeBetweenPercentile', + category: categories.Filter, + params: [{ name: "n", type: "int", }], + defaultParams: [95], + version: '1.0' + }); + + addFuncDef({ + name: 'removeEmptySeries', + category: categories.Filter, + version: '1.0' + }); + + addFuncDef({ + name: 'squareRoot', + category: categories.Transform, + version: '1.0' + }); + + addFuncDef({ + name: 'timeSlice', + category: categories.Transform, + params: [ + { name: "startSliceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d']}, + { name: "endSliceAt", type: "select", options: ['-1h', '-6h', '-12h', '-1d', '-2d', '-7d', '-14d', '-30d'], optional: true } + ], + defaultParams: ['-1h'], + version: '1.0' + }); + + addFuncDef({ + name: 'weightedAverage', + category: categories.Filter, + params: [ + { name: 'other', type: 'value_or_series', optional: true }, + { name: "node", type: "int", options: [0,1,2,3,4,5,6,7,8,9,10,12] }, + ], + defaultParams: ['#A', 4], + version: '1.0' + }); + + addFuncDef({ + name: 'seriesByTag', + category: categories.Special, + params: [ + { name: "tagExpression", type: "string" }, + { name: "tagExpression", type: "string", optional: true }, + { name: "tagExpression", type: "string", optional: true }, + { name: "tagExpression", type: "string", optional: true }, + ], + version: '1.1' + }); + + addFuncDef({ + name: "groupByTags", + category: categories.Special, + params: [ + { + name: "function", + type: "string", + options: ['sum', 'avg', 'maxSeries'] + }, + { name: "tag", type: "string" }, + { name: "tag", type: "string", optional: true }, + { name: "tag", type: "string", optional: true }, + { name: "tag", type: "string", optional: true }, + ], + defaultParams: ["sum", "tag"], + version: '1.1' + }); + + addFuncDef({ + name: "aliasByTags", + category: categories.Special, + params: [ + { name: "tag", type: "string" }, + { name: "tag", type: "string", optional: true }, + { name: "tag", type: "string", optional: true }, + { name: "tag", type: "string", optional: true }, + ], + defaultParams: ["tag"], + version: '1.1' + }); + + _.each(categories, function(funcList, catName) { + categories[catName] = _.sortBy(funcList, 'name'); + }); + + function FuncInstance(funcDef, options) { + this.def = funcDef; + this.params = []; + + if (options && options.withDefaultParams) { + this.params = funcDef.defaultParams.slice(0); + } + + this.updateText(); + } + + FuncInstance.prototype.render = function(metricExp) { + var str = this.def.name + '('; + var parameters = _.map(this.params, function(value, index) { + + var paramType = this.def.params[index].type; + if (paramType === 'int' || paramType === 'value_or_series' || paramType === 'boolean') { + return value; + } + else if (paramType === 'int_or_interval' && $.isNumeric(value)) { + return value; + } + + return "'" + value + "'"; + + }.bind(this)); + + if (metricExp) { + parameters.unshift(metricExp); + } + + return str + parameters.join(', ') + ')'; + }; + + FuncInstance.prototype._hasMultipleParamsInString = function(strValue, index) { + if (strValue.indexOf(',') === -1) { + return false; + } + + return this.def.params[index + 1] && this.def.params[index + 1].optional; + }; + + FuncInstance.prototype.updateParam = function(strValue, index) { + // handle optional parameters + // if string contains ',' and next param is optional, split and update both + if (this._hasMultipleParamsInString(strValue, index)) { + _.each(strValue.split(','), function(partVal, idx) { + this.updateParam(partVal.trim(), index + idx); + }.bind(this)); + return; + } + + if (strValue === '' && this.def.params[index].optional) { + this.params.splice(index, 1); + } + else { + this.params[index] = strValue; + } + + this.updateText(); + }; + + FuncInstance.prototype.updateText = function () { + if (this.params.length === 0) { + this.text = this.def.name + '()'; + return; + } + + var text = this.def.name + '('; + text += this.params.join(', '); + text += ')'; + this.text = text; + }; + + function isVersionRelatedFunction(func, graphiteVersion) { + return version.isVersionGtOrEq(graphiteVersion, func.version) || !func.version; + } + + return { + createFuncInstance: function(funcDef, options) { + if (_.isString(funcDef)) { + if (!index[funcDef]) { + throw { message: 'Method not found ' + name }; + } + funcDef = index[funcDef]; + } + return new FuncInstance(funcDef, options); + }, + + getFuncDef: function(name) { + return index[name]; + }, + + getCategories: function(graphiteVersion) { + var filteredCategories = {}; + _.each(categories, function(functions, category) { + var filteredFuncs = _.filter(functions, function(func) { + return isVersionRelatedFunction(func, graphiteVersion); + }); + if (filteredFuncs.length) { + filteredCategories[category] = filteredFuncs; + } + }); + + return filteredCategories; + } + }; + +}); diff --git a/public/app/plugins/datasource/graphite/gfunc.ts b/public/app/plugins/datasource/graphite/gfunc.ts index a2510421ad4..855a9184c7e 100644 --- a/public/app/plugins/datasource/graphite/gfunc.ts +++ b/public/app/plugins/datasource/graphite/gfunc.ts @@ -1,4 +1,6 @@ import _ from 'lodash'; +import {isVersionGtOrEq} from 'app/core/utils/version'; + var index = []; var categories = { @@ -968,13 +970,7 @@ FuncInstance.prototype.updateText = function() { }; function isVersionRelatedFunction(func, graphiteVersion) { - return isVersionGreaterOrEqual(graphiteVersion, func.version) || !func.version; -} - -function isVersionGreaterOrEqual(a, b) { - var a_num = Number(a); - var b_num = Number(b); - return a_num >= b_num; + return isVersionGtOrEq(graphiteVersion, func.version) || !func.version; } export default { diff --git a/public/app/plugins/datasource/graphite/graphite_query.ts b/public/app/plugins/datasource/graphite/graphite_query.ts new file mode 100644 index 00000000000..2a70667fdac --- /dev/null +++ b/public/app/plugins/datasource/graphite/graphite_query.ts @@ -0,0 +1,284 @@ +import _ from 'lodash'; +import gfunc from './gfunc'; +import {Parser} from './parser'; + +export default class GraphiteQuery { + target: any; + functions: any[]; + segments: any[]; + tags: any[]; + error: any; + seriesByTagUsed: boolean; + checkOtherSegmentsIndex: number; + removeTagValue: string; + templateSrv: any; + scopedVars: any; + + /** @ngInject */ + constructor(target, templateSrv?, scopedVars?) { + this.target = target; + this.parseTarget(); + + this.removeTagValue = '-- remove tag --'; + } + + parseTarget() { + this.functions = []; + this.segments = []; + this.tags = []; + this.error = null; + + if (this.target.textEditor) { + return; + } + + var parser = new Parser(this.target.target); + var astNode = parser.getAst(); + if (astNode === null) { + this.checkOtherSegmentsIndex = 0; + return; + } + + if (astNode.type === 'error') { + this.error = astNode.message + " at position: " + astNode.pos; + this.target.textEditor = true; + return; + } + + try { + this.parseTargetRecursive(astNode, null, 0); + } catch (err) { + console.log('error parsing target:', err.message); + this.error = err.message; + this.target.textEditor = true; + } + + this.checkOtherSegmentsIndex = this.segments.length - 1; + this.checkForSeriesByTag(); + } + + checkForSeriesByTag() { + let seriesByTagFunc = _.find(this.functions, (func) => func.def.name === 'seriesByTag'); + if (seriesByTagFunc) { + this.seriesByTagUsed = true; + seriesByTagFunc.hidden = true; + let tags = this.splitSeriesByTagParams(seriesByTagFunc); + this.tags = tags; + } + } + + getSegmentPathUpTo(index) { + var arr = this.segments.slice(0, index); + + return _.reduce(arr, function(result, segment) { + return result ? (result + "." + segment.value) : segment.value; + }, ""); + } + + parseTargetRecursive(astNode, func, index) { + if (astNode === null) { + return null; + } + + switch (astNode.type) { + case 'function': + var innerFunc = gfunc.createFuncInstance(astNode.name, { withDefaultParams: false }); + _.each(astNode.params, (param, index) => { + this.parseTargetRecursive(param, innerFunc, index); + }); + + innerFunc.updateText(); + this.functions.push(innerFunc); + break; + case 'series-ref': + this.addFunctionParameter(func, astNode.value, index, this.segments.length > 0); + break; + case 'bool': + case 'string': + case 'number': + if ((index-1) >= func.def.params.length) { + throw { message: 'invalid number of parameters to method ' + func.def.name }; + } + var shiftBack = this.isShiftParamsBack(func); + this.addFunctionParameter(func, astNode.value, index, shiftBack); + break; + case 'metric': + if (this.segments.length > 0) { + if (astNode.segments.length !== 1) { + throw { message: 'Multiple metric params not supported, use text editor.' }; + } + this.addFunctionParameter(func, astNode.segments[0].value, index, true); + break; + } + + this.segments = astNode.segments; + } + } + + isShiftParamsBack(func) { + return func.def.name !== 'seriesByTag'; + } + + updateSegmentValue(segment, index) { + this.segments[index].value = segment.value; + } + + addSelectMetricSegment() { + this.segments.push({value: "select metric"}); + } + + addFunction(newFunc) { + this.functions.push(newFunc); + this.moveAliasFuncLast(); + } + + moveAliasFuncLast() { + var aliasFunc = _.find(this.functions, function(func) { + return func.def.name === 'alias' || + func.def.name === 'aliasByNode' || + func.def.name === 'aliasByMetric'; + }); + + if (aliasFunc) { + this.functions = _.without(this.functions, aliasFunc); + this.functions.push(aliasFunc); + } + } + + addFunctionParameter(func, value, index, shiftBack) { + if (shiftBack) { + index = Math.max(index - 1, 0); + } + func.params[index] = value; + } + + removeFunction(func) { + this.functions = _.without(this.functions, func); + } + + updateModelTarget(targets) { + // render query + if (!this.target.textEditor) { + var metricPath = this.getSegmentPathUpTo(this.segments.length); + this.target.target = _.reduce(this.functions, wrapFunction, metricPath); + } + + this.updateRenderedTarget(this.target, targets); + + // loop through other queries and update targetFull as needed + for (const target of targets || []) { + if (target.refId !== this.target.refId) { + this.updateRenderedTarget(target, targets); + } + } + } + + updateRenderedTarget(target, targets) { + // render nested query + var targetsByRefId = _.keyBy(targets, 'refId'); + + // no references to self + delete targetsByRefId[target.refId]; + + var nestedSeriesRefRegex = /\#([A-Z])/g; + var targetWithNestedQueries = target.target; + + // Keep interpolating until there are no query references + // The reason for the loop is that the referenced query might contain another reference to another query + while (targetWithNestedQueries.match(nestedSeriesRefRegex)) { + var updated = targetWithNestedQueries.replace(nestedSeriesRefRegex, (match, g1) => { + var t = targetsByRefId[g1]; + if (!t) { + return match; + } + + // no circular references + delete targetsByRefId[g1]; + return t.target; + }); + + if (updated === targetWithNestedQueries) { + break; + } + + targetWithNestedQueries = updated; + } + + delete target.targetFull; + if (target.target !== targetWithNestedQueries) { + target.targetFull = targetWithNestedQueries; + } + } + + splitSeriesByTagParams(func) { + const tagPattern = /([^\!=~]+)([\!=~]+)([^\!=~]+)/; + return _.flatten(_.map(func.params, (param: string) => { + let matches = tagPattern.exec(param); + if (matches) { + let tag = matches.slice(1); + if (tag.length === 3) { + return { + key: tag[0], + operator: tag[1], + value: tag[2] + }; + } + } + return []; + })); + } + + getSeriesByTagFuncIndex() { + return _.findIndex(this.functions, (func) => func.def.name === 'seriesByTag'); + } + + getSeriesByTagFunc() { + let seriesByTagFuncIndex = this.getSeriesByTagFuncIndex(); + if (seriesByTagFuncIndex >= 0) { + return this.functions[seriesByTagFuncIndex]; + } else { + return undefined; + } + } + + addTag(tag) { + let newTagParam = renderTagString(tag); + this.getSeriesByTagFunc().params.push(newTagParam); + this.tags.push(tag); + } + + removeTag(index) { + this.getSeriesByTagFunc().params.splice(index, 1); + this.tags.splice(index, 1); + } + + updateTag(tag, tagIndex) { + this.error = null; + + if (tag.key === this.removeTagValue) { + this.removeTag(tagIndex); + return; + } + + let newTagParam = renderTagString(tag); + this.getSeriesByTagFunc().params[tagIndex] = newTagParam; + this.tags[tagIndex] = tag; + } + + renderTagExpressions(excludeIndex = -1) { + return _.compact(_.map(this.tags, (tagExpr, index) => { + // Don't render tag that we want to lookup + if (index !== excludeIndex) { + return tagExpr.key + tagExpr.operator + tagExpr.value; + } + })); + } +} + +function wrapFunction(target, func) { + return func.render(target); +} + +function renderTagString(tag) { + return tag.key + tag.operator + tag.value; +} diff --git a/public/app/plugins/datasource/graphite/partials/query.editor.html b/public/app/plugins/datasource/graphite/partials/query.editor.html index 5646f005be9..b79164e1781 100755 --- a/public/app/plugins/datasource/graphite/partials/query.editor.html +++ b/public/app/plugins/datasource/graphite/partials/query.editor.html @@ -1,17 +1,53 @@ -
    - -
    +
    + +
    -
    +
    +
    + +
    + +
    + + + + + + + +
    + -
    - + + +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    -
    -
    +
    +
    diff --git a/public/app/plugins/datasource/graphite/query_ctrl.ts b/public/app/plugins/datasource/graphite/query_ctrl.ts index fd753fb77e1..c14836b4bdd 100644 --- a/public/app/plugins/datasource/graphite/query_ctrl.ts +++ b/public/app/plugins/datasource/graphite/query_ctrl.ts @@ -3,24 +3,39 @@ import './func_editor'; import _ from 'lodash'; import gfunc from './gfunc'; -import {Parser} from './parser'; +import GraphiteQuery from './graphite_query'; import {QueryCtrl} from 'app/plugins/sdk'; import appEvents from 'app/core/app_events'; +const GRAPHITE_TAG_OPERATORS = ['=', '!=', '=~', '!=~']; +const TAG_PREFIX = 'tag: '; + export class GraphiteQueryCtrl extends QueryCtrl { static templateUrl = 'partials/query.editor.html'; - functions: any[]; + queryModel: GraphiteQuery; segments: any[]; + addTagSegments: any[]; + removeTagValue: string; + supportsTags: boolean; /** @ngInject **/ constructor($scope, $injector, private uiSegmentSrv, private templateSrv) { super($scope, $injector); + this.supportsTags = this.datasource.supportsTags; if (this.target) { this.target.target = this.target.target || ''; - this.parseTarget(); + this.queryModel = new GraphiteQuery(this.target, templateSrv); + this.buildSegments(); } + + this.removeTagValue = '-- remove tag --'; + } + + parseTarget() { + this.queryModel.parseTarget(); + this.buildSegments(); } toggleEditorMode() { @@ -28,107 +43,31 @@ export class GraphiteQueryCtrl extends QueryCtrl { this.parseTarget(); } - parseTarget() { - this.functions = []; - this.segments = []; - this.error = null; + buildSegments() { + this.segments = _.map(this.queryModel.segments, segment => { + return this.uiSegmentSrv.newSegment(segment); + }); - if (this.target.textEditor) { - return; - } + let checkOtherSegmentsIndex = this.queryModel.checkOtherSegmentsIndex || 0; + this.checkOtherSegments(checkOtherSegmentsIndex); - var parser = new Parser(this.target.target); - var astNode = parser.getAst(); - if (astNode === null) { - this.checkOtherSegments(0); - return; - } - - if (astNode.type === 'error') { - this.error = astNode.message + " at position: " + astNode.pos; - this.target.textEditor = true; - return; - } - - try { - this.parseTargetRecursive(astNode, null, 0); - } catch (err) { - console.log('error parsing target:', err.message); - this.error = err.message; - this.target.textEditor = true; - } - - this.checkOtherSegments(this.segments.length - 1); - } - - addFunctionParameter(func, value, index, shiftBack) { - if (shiftBack) { - index = Math.max(index - 1, 0); - } - func.params[index] = value; - } - - parseTargetRecursive(astNode, func, index) { - if (astNode === null) { - return null; - } - - switch (astNode.type) { - case 'function': - var innerFunc = gfunc.createFuncInstance(astNode.name, { withDefaultParams: false }); - _.each(astNode.params, (param, index) => { - this.parseTargetRecursive(param, innerFunc, index); - }); - - innerFunc.updateText(); - this.functions.push(innerFunc); - break; - case 'series-ref': - this.addFunctionParameter(func, astNode.value, index, this.segments.length > 0); - break; - case 'bool': - case 'string': - case 'number': - if ((index-1) >= func.def.params.length) { - throw { message: 'invalid number of parameters to method ' + func.def.name }; - } - var shiftBack = this.isShiftParamsBack(func); - this.addFunctionParameter(func, astNode.value, index, shiftBack); - break; - case 'metric': - if (this.segments.length > 0) { - if (astNode.segments.length !== 1) { - throw { message: 'Multiple metric params not supported, use text editor.' }; - } - this.addFunctionParameter(func, astNode.segments[0].value, index, true); - break; - } - - this.segments = _.map(astNode.segments, segment => { - return this.uiSegmentSrv.newSegment(segment); - }); + if (this.queryModel.seriesByTagUsed) { + this.fixTagSegments(); } } - isShiftParamsBack(func) { - return func.def.name !== 'seriesByTag'; - } - - getSegmentPathUpTo(index) { - var arr = this.segments.slice(0, index); - - return _.reduce(arr, function(result, segment) { - return result ? (result + "." + segment.value) : segment.value; - }, ""); + addSelectMetricSegment() { + this.queryModel.addSelectMetricSegment(); + this.segments.push(this.uiSegmentSrv.newSelectMetric()); } checkOtherSegments(fromIndex) { if (fromIndex === 0) { - this.segments.push(this.uiSegmentSrv.newSelectMetric()); + this.addSelectMetricSegment(); return; } - var path = this.getSegmentPathUpTo(fromIndex + 1); + var path = this.queryModel.getSegmentPathUpTo(fromIndex + 1); if (path === "") { return Promise.resolve(); } @@ -136,12 +75,13 @@ export class GraphiteQueryCtrl extends QueryCtrl { return this.datasource.metricFindQuery(path).then(segments => { if (segments.length === 0) { if (path !== '') { + this.queryModel.segments = this.queryModel.segments.splice(0, fromIndex); this.segments = this.segments.splice(0, fromIndex); - this.segments.push(this.uiSegmentSrv.newSelectMetric()); + this.addSelectMetricSegment(); } } else if (segments[0].expandable) { if (this.segments.length === fromIndex) { - this.segments.push(this.uiSegmentSrv.newSelectMetric()); + this.addSelectMetricSegment(); } else { return this.checkOtherSegments(fromIndex + 1); } @@ -157,12 +97,8 @@ export class GraphiteQueryCtrl extends QueryCtrl { }); } - wrapFunction(target, func) { - return func.render(target); - } - getAltSegments(index) { - var query = index === 0 ? '*' : this.getSegmentPathUpTo(index) + '.*'; + var query = index === 0 ? '*' : this.queryModel.getSegmentPathUpTo(index) + '.*'; var options = {range: this.panelCtrl.range, requestId: "get-alt-segments"}; return this.datasource.metricFindQuery(query, options).then(segments => { @@ -183,17 +119,44 @@ export class GraphiteQueryCtrl extends QueryCtrl { // add wildcard option altSegments.unshift(this.uiSegmentSrv.newSegment('*')); - return altSegments; + + if (this.supportsTags && index === 0) { + this.removeTaggedEntry(altSegments); + return this.addAltTagSegments(index, altSegments); + } else { + return altSegments; + } }).catch(err => { return []; }); } + addAltTagSegments(index, altSegments) { + return this.getTagsAsSegments().then((tagSegments) => { + tagSegments = _.map(tagSegments, (segment) => { + segment.value = TAG_PREFIX + segment.value; + return segment; + }); + return altSegments.concat(...tagSegments); + }); + } + + removeTaggedEntry(altSegments) { + altSegments = _.remove(altSegments, (s) => s.value === '_tagged'); + } + segmentValueChanged(segment, segmentIndex) { this.error = null; + this.queryModel.updateSegmentValue(segment, segmentIndex); - if (this.functions.length > 0 && this.functions[0].def.fake) { - this.functions = []; + if (this.queryModel.functions.length > 0 && this.queryModel.functions[0].def.fake) { + this.queryModel.functions = []; + } + + if (segment.type === 'tag') { + let tag = removeTagPrefix(segment.value); + this.addSeriesByTagFunc(tag); + return; } if (segment.expandable) { @@ -202,81 +165,41 @@ export class GraphiteQueryCtrl extends QueryCtrl { this.targetChanged(); }); } else { - this.segments = this.segments.splice(0, segmentIndex + 1); + this.spliceSegments(segmentIndex + 1); } this.setSegmentFocus(segmentIndex + 1); this.targetChanged(); } + spliceSegments(index) { + this.segments = this.segments.splice(0, index); + this.queryModel.segments = this.queryModel.segments.splice(0, index); + } + + emptySegments() { + this.queryModel.segments = []; + this.segments = []; + } + targetTextChanged() { this.updateModelTarget(); this.refresh(); } updateModelTarget() { - // render query - if (!this.target.textEditor) { - var metricPath = this.getSegmentPathUpTo(this.segments.length); - this.target.target = _.reduce(this.functions, this.wrapFunction, metricPath); - } - - this.updateRenderedTarget(this.target); - - // loop through other queries and update targetFull as needed - for (const target of this.panelCtrl.panel.targets || []) { - if (target.refId !== this.target.refId) { - this.updateRenderedTarget(target); - } - } - } - - updateRenderedTarget(target) { - // render nested query - var targetsByRefId = _.keyBy(this.panelCtrl.panel.targets, 'refId'); - - // no references to self - delete targetsByRefId[target.refId]; - - var nestedSeriesRefRegex = /\#([A-Z])/g; - var targetWithNestedQueries = target.target; - - // Keep interpolating until there are no query references - // The reason for the loop is that the referenced query might contain another reference to another query - while (targetWithNestedQueries.match(nestedSeriesRefRegex)) { - var updated = targetWithNestedQueries.replace(nestedSeriesRefRegex, (match, g1) => { - var t = targetsByRefId[g1]; - if (!t) { - return match; - } - - // no circular references - delete targetsByRefId[g1]; - return t.target; - }); - - if (updated === targetWithNestedQueries) { - break; - } - - targetWithNestedQueries = updated; - } - - delete target.targetFull; - if (target.target !== targetWithNestedQueries) { - target.targetFull = targetWithNestedQueries; - } + this.queryModel.updateModelTarget(this.panelCtrl.panel.targets); } targetChanged() { - if (this.error) { + if (this.queryModel.error) { return; } - var oldTarget = this.target.target; + var oldTarget = this.queryModel.target.target; this.updateModelTarget(); - if (this.target.target !== oldTarget) { + if (this.queryModel.target !== oldTarget) { var lastSegment = this.segments.length > 0 ? this.segments[this.segments.length - 1] : {}; if (lastSegment.value !== 'select metric') { this.panelCtrl.refresh(); @@ -284,39 +207,41 @@ export class GraphiteQueryCtrl extends QueryCtrl { } } - removeFunction(func) { - this.functions = _.without(this.functions, func); - this.targetChanged(); - } - addFunction(funcDef) { var newFunc = gfunc.createFuncInstance(funcDef, { withDefaultParams: true }); newFunc.added = true; - this.functions.push(newFunc); - - this.moveAliasFuncLast(); + this.queryModel.addFunction(newFunc); this.smartlyHandleNewAliasByNode(newFunc); if (this.segments.length === 1 && this.segments[0].fake) { - this.segments = []; + this.emptySegments(); } if (!newFunc.params.length && newFunc.added) { this.targetChanged(); } + + if (newFunc.def.name === 'seriesByTag') { + this.parseTarget(); + } } - moveAliasFuncLast() { - var aliasFunc = _.find(this.functions, function(func) { - return func.def.name === 'alias' || - func.def.name === 'aliasByNode' || - func.def.name === 'aliasByMetric'; - }); + removeFunction(func) { + this.queryModel.removeFunction(func); + this.targetChanged(); + } - if (aliasFunc) { - this.functions = _.without(this.functions, aliasFunc); - this.functions.push(aliasFunc); - } + addSeriesByTagFunc(tag) { + let funcDef = gfunc.getFuncDef('seriesByTag'); + let newFunc = gfunc.createFuncInstance(funcDef, { withDefaultParams: false }); + let tagParam = `${tag}=select tag value`; + newFunc.params = [tagParam]; + this.queryModel.addFunction(newFunc); + newFunc.added = true; + + this.emptySegments(); + this.targetChanged(); + this.parseTarget(); } smartlyHandleNewAliasByNode(func) { @@ -325,7 +250,7 @@ export class GraphiteQueryCtrl extends QueryCtrl { } for (var i = 0; i < this.segments.length; i++) { - if (this.segments[i].value.indexOf('*') >= 0) { + if (this.segments[i].value.indexOf('*') >= 0) { func.params[0] = i; func.added = false; this.targetChanged(); @@ -333,4 +258,90 @@ export class GraphiteQueryCtrl extends QueryCtrl { } } } + + getAllTags() { + return this.datasource.getTags().then((values) => { + let altTags = _.map(values, 'text'); + altTags.splice(0, 0, this.removeTagValue); + return mapToDropdownOptions(altTags); + }); + } + + getTags(index, tagPrefix) { + let tagExpressions = this.queryModel.renderTagExpressions(index); + return this.datasource.getTagsAutoComplete(tagExpressions, tagPrefix) + .then((values) => { + let altTags = _.map(values, 'text'); + altTags.splice(0, 0, this.removeTagValue); + return mapToDropdownOptions(altTags); + }); + } + + getTagsAsSegments() { + let tagExpressions = this.queryModel.renderTagExpressions(); + return this.datasource.getTagsAutoComplete(tagExpressions) + .then((values) => { + return _.map(values, (val) => { + return this.uiSegmentSrv.newSegment({value: val.text, type: 'tag', expandable: false}); + }); + }); + } + + getTagOperators() { + return mapToDropdownOptions(GRAPHITE_TAG_OPERATORS); + } + + getAllTagValues(tag) { + let tagKey = tag.key; + return this.datasource.getTagValues(tagKey).then((values) => { + let altValues = _.map(values, 'text'); + return mapToDropdownOptions(altValues); + }); + } + + getTagValues(tag, index, valuePrefix) { + let tagExpressions = this.queryModel.renderTagExpressions(index); + let tagKey = tag.key; + return this.datasource.getTagValuesAutoComplete(tagExpressions, tagKey, valuePrefix).then((values) => { + let altValues = _.map(values, 'text'); + return mapToDropdownOptions(altValues); + }); + } + + tagChanged(tag, tagIndex) { + this.queryModel.updateTag(tag, tagIndex); + this.targetChanged(); + } + + addNewTag(segment) { + let newTagKey = segment.value; + let newTag = {key: newTagKey, operator: '=', value: 'select tag value'}; + this.queryModel.addTag(newTag); + this.targetChanged(); + this.fixTagSegments(); + } + + removeTag(index) { + this.queryModel.removeTag(index); + this.targetChanged(); + } + + fixTagSegments() { + // Adding tag with the same name as just removed works incorrectly if single segment is used (instead of array) + this.addTagSegments = [this.uiSegmentSrv.newPlusButton()]; + } + + showDelimiter(index) { + return index !== this.queryModel.tags.length - 1; + } +} + +function mapToDropdownOptions(results) { + return _.map(results, (value) => { + return {text: value, value: value}; + }); +} + +function removeTagPrefix(value: string): string { + return value.replace(TAG_PREFIX, ''); } diff --git a/public/app/plugins/datasource/graphite/specs/query_ctrl_specs.ts b/public/app/plugins/datasource/graphite/specs/query_ctrl_specs.ts index bbcd90bd40f..177c1e2a0d6 100644 --- a/public/app/plugins/datasource/graphite/specs/query_ctrl_specs.ts +++ b/public/app/plugins/datasource/graphite/specs/query_ctrl_specs.ts @@ -48,7 +48,7 @@ describe('GraphiteQueryCtrl', function() { }); it('should parse expression and build function model', function() { - expect(ctx.ctrl.functions.length).to.be(2); + expect(ctx.ctrl.queryModel.functions.length).to.be(2); }); }); @@ -61,7 +61,7 @@ describe('GraphiteQueryCtrl', function() { }); it('should add function with correct node number', function() { - expect(ctx.ctrl.functions[0].params[0]).to.be(2); + expect(ctx.ctrl.queryModel.functions[0].params[0]).to.be(2); }); it('should update target', function() { @@ -99,7 +99,7 @@ describe('GraphiteQueryCtrl', function() { }); it('should add both series refs as params', function() { - expect(ctx.ctrl.functions[0].params.length).to.be(2); + expect(ctx.ctrl.queryModel.functions[0].params.length).to.be(2); }); }); @@ -115,7 +115,7 @@ describe('GraphiteQueryCtrl', function() { }); it('should add function param', function() { - expect(ctx.ctrl.functions[0].params.length).to.be(1); + expect(ctx.ctrl.queryModel.functions[0].params.length).to.be(1); }); }); @@ -131,7 +131,7 @@ describe('GraphiteQueryCtrl', function() { }); it('should have correct func params', function() { - expect(ctx.ctrl.functions[0].params.length).to.be(1); + expect(ctx.ctrl.queryModel.functions[0].params.length).to.be(1); }); }); @@ -210,4 +210,113 @@ describe('GraphiteQueryCtrl', function() { }); }); + describe('when adding seriesByTag function', function() { + beforeEach(function() { + ctx.ctrl.target.target = ''; + ctx.ctrl.datasource.metricFindQuery = sinon.stub().returns(ctx.$q.when([{expandable: false}])); + ctx.ctrl.parseTarget(); + ctx.ctrl.addFunction(gfunc.getFuncDef('seriesByTag')); + }); + + it('should update functions', function() { + expect(ctx.ctrl.queryModel.getSeriesByTagFuncIndex()).to.be(0); + }); + + it('should update seriesByTagUsed flag', function() { + expect(ctx.ctrl.queryModel.seriesByTagUsed).to.be(true); + }); + + it('should update target', function() { + expect(ctx.ctrl.target.target).to.be('seriesByTag()'); + }); + + it('should call refresh', function() { + expect(ctx.panelCtrl.refresh.called).to.be(true); + }); + }); + + describe('when parsing seriesByTag function', function() { + beforeEach(function() { + ctx.ctrl.target.target = "seriesByTag('tag1=value1', 'tag2!=~value2')"; + ctx.ctrl.datasource.metricFindQuery = sinon.stub().returns(ctx.$q.when([{expandable: false}])); + ctx.ctrl.parseTarget(); + }); + + it('should add tags', function() { + const expected = [ + {key: 'tag1', operator: '=', value: 'value1'}, + {key: 'tag2', operator: '!=~', value: 'value2'} + ]; + expect(ctx.ctrl.queryModel.tags).to.eql(expected); + }); + + it('should add plus button', function() { + expect(ctx.ctrl.addTagSegments.length).to.be(1); + }); + }); + + describe('when tag added', function() { + beforeEach(function() { + ctx.ctrl.target.target = "seriesByTag()"; + ctx.ctrl.datasource.metricFindQuery = sinon.stub().returns(ctx.$q.when([{expandable: false}])); + ctx.ctrl.parseTarget(); + ctx.ctrl.addNewTag({value: 'tag1'}); + }); + + it('should update tags with default value', function() { + const expected = [ + {key: 'tag1', operator: '=', value: 'select tag value'} + ]; + expect(ctx.ctrl.queryModel.tags).to.eql(expected); + }); + + it('should update target', function() { + const expected = "seriesByTag('tag1=select tag value')"; + expect(ctx.ctrl.target.target).to.eql(expected); + }); + }); + + describe('when tag changed', function() { + beforeEach(function() { + ctx.ctrl.target.target = "seriesByTag('tag1=value1', 'tag2!=~value2')"; + ctx.ctrl.datasource.metricFindQuery = sinon.stub().returns(ctx.$q.when([{expandable: false}])); + ctx.ctrl.parseTarget(); + ctx.ctrl.tagChanged({key: 'tag1', operator: '=', value: 'new_value'}, 0); + }); + + it('should update tags', function() { + const expected = [ + {key: 'tag1', operator: '=', value: 'new_value'}, + {key: 'tag2', operator: '!=~', value: 'value2'} + ]; + expect(ctx.ctrl.queryModel.tags).to.eql(expected); + }); + + it('should update target', function() { + const expected = "seriesByTag('tag1=new_value', 'tag2!=~value2')"; + expect(ctx.ctrl.target.target).to.eql(expected); + }); + }); + + describe('when tag removed', function() { + beforeEach(function() { + ctx.ctrl.target.target = "seriesByTag('tag1=value1', 'tag2!=~value2')"; + ctx.ctrl.datasource.metricFindQuery = sinon.stub().returns(ctx.$q.when([{expandable: false}])); + ctx.ctrl.parseTarget(); + ctx.ctrl.removeTag(0); + }); + + it('should update tags', function() { + const expected = [ + {key: 'tag2', operator: '!=~', value: 'value2'} + ]; + expect(ctx.ctrl.queryModel.tags).to.eql(expected); + }); + + it('should update target', function() { + const expected = "seriesByTag('tag2!=~value2')"; + expect(ctx.ctrl.target.target).to.eql(expected); + }); + }); + }); diff --git a/public/app/plugins/datasource/influxdb/datasource.ts b/public/app/plugins/datasource/influxdb/datasource.ts index c0aa246c093..2414217a367 100644 --- a/public/app/plugins/datasource/influxdb/datasource.ts +++ b/public/app/plugins/datasource/influxdb/datasource.ts @@ -4,8 +4,7 @@ import * as dateMath from 'app/core/utils/datemath'; import InfluxSeries from './influx_series'; import InfluxQuery from './influx_query'; import ResponseParser from './response_parser'; -import InfluxQueryBuilder from './query_builder'; - +import {InfluxQueryBuilder} from './query_builder'; export default class InfluxDatasource { type: string; @@ -190,10 +189,13 @@ export default class InfluxDatasource { } testDatasource() { - return this.metricFindQuery('SHOW DATABASES').then(res => { - let found = _.find(res, {text: this.database}); - if (!found) { - return { status: "error", message: "Could not find the specified database name." }; + var queryBuilder = new InfluxQueryBuilder({measurement: '', tags: []}, this.database); + var query = queryBuilder.buildExploreQuery('RETENTION POLICIES'); + + return this._seriesQuery(query).then(res => { + let error = _.get(res, 'results[0].error'); + if (error) { + return { status: "error", message: error }; } return { status: "success", message: "Data source is working" }; }).catch(err => { diff --git a/public/app/plugins/datasource/influxdb/query_builder.d.ts b/public/app/plugins/datasource/influxdb/query_builder.d.ts deleted file mode 100644 index c3318b8e133..00000000000 --- a/public/app/plugins/datasource/influxdb/query_builder.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -declare var test: any; -export default test; diff --git a/public/app/plugins/datasource/influxdb/query_builder.js b/public/app/plugins/datasource/influxdb/query_builder.js deleted file mode 100644 index d8051338838..00000000000 --- a/public/app/plugins/datasource/influxdb/query_builder.js +++ /dev/null @@ -1,105 +0,0 @@ -define([ - 'lodash' -], -function (_) { - 'use strict'; - - function InfluxQueryBuilder(target, database) { - this.target = target; - this.database = database; - } - - function renderTagCondition (tag, index) { - var str = ""; - var operator = tag.operator; - var value = tag.value; - if (index > 0) { - str = (tag.condition || 'AND') + ' '; - } - - if (!operator) { - if (/^\/.*\/$/.test(tag.value)) { - operator = '=~'; - } else { - operator = '='; - } - } - - // quote value unless regex or number - if (operator !== '=~' && operator !== '!~' && isNaN(+value)) { - value = "'" + value + "'"; - } - - return str + '"' + tag.key + '" ' + operator + ' ' + value; - } - - var p = InfluxQueryBuilder.prototype; - - p.build = function() { - return this.target.rawQuery ? this._modifyRawQuery() : this._buildQuery(); - }; - - p.buildExploreQuery = function(type, withKey, withMeasurementFilter) { - var query; - var measurement; - - if (type === 'TAG_KEYS') { - query = 'SHOW TAG KEYS'; - measurement = this.target.measurement; - } else if (type === 'TAG_VALUES') { - query = 'SHOW TAG VALUES'; - measurement = this.target.measurement; - } else if (type === 'MEASUREMENTS') { - query = 'SHOW MEASUREMENTS'; - if (withMeasurementFilter) - { - query += ' WITH MEASUREMENT =~ /' + withMeasurementFilter +'/'; - } - } else if (type === 'FIELDS') { - if (!this.target.measurement.match('^/.*/')) { - return 'SHOW FIELD KEYS FROM "' + this.target.measurement + '"'; - } else { - return 'SHOW FIELD KEYS FROM ' + this.target.measurement; - } - } else if (type === 'RETENTION POLICIES') { - query = 'SHOW RETENTION POLICIES on "' + this.database + '"'; - return query; - } - - if (measurement) { - if (!measurement.match('^/.*/') && !measurement.match(/^merge\(.*\)/)) { - measurement = '"' + measurement+ '"'; - } - query += ' FROM ' + measurement; - } - - if (withKey) { - query += ' WITH KEY = "' + withKey + '"'; - } - - if (this.target.tags && this.target.tags.length > 0) { - var whereConditions = _.reduce(this.target.tags, function(memo, tag) { - // do not add a condition for the key we want to explore for - if (tag.key === withKey) { - return memo; - } - memo.push(renderTagCondition(tag, memo.length)); - return memo; - }, []); - - if (whereConditions.length > 0) { - query += ' WHERE ' + whereConditions.join(' '); - } - } - if (type === 'MEASUREMENTS') - { - query += ' LIMIT 100'; - //Solve issue #2524 by limiting the number of measurements returned - //LIMIT must be after WITH MEASUREMENT and WHERE clauses - //This also could be used for TAG KEYS and TAG VALUES, if desired - } - return query; - }; - - return InfluxQueryBuilder; -}); diff --git a/public/app/plugins/datasource/influxdb/query_builder.ts b/public/app/plugins/datasource/influxdb/query_builder.ts new file mode 100644 index 00000000000..d4442e84c37 --- /dev/null +++ b/public/app/plugins/datasource/influxdb/query_builder.ts @@ -0,0 +1,112 @@ +import _ from 'lodash'; + +function renderTagCondition(tag, index) { + var str = ''; + var operator = tag.operator; + var value = tag.value; + if (index > 0) { + str = (tag.condition || 'AND') + ' '; + } + + if (!operator) { + if (/^\/.*\/$/.test(tag.value)) { + operator = '=~'; + } else { + operator = '='; + } + } + + // quote value unless regex or number + if (operator !== '=~' && operator !== '!~' && isNaN(+value)) { + value = "'" + value + "'"; + } + + return str + '"' + tag.key + '" ' + operator + ' ' + value; +} + +export class InfluxQueryBuilder { + + constructor(private target, private database?) { } + + buildExploreQuery(type: string, withKey?: string, withMeasurementFilter?: string) { + var query; + var measurement; + var policy; + + if (type === 'TAG_KEYS') { + query = 'SHOW TAG KEYS'; + measurement = this.target.measurement; + policy = this.target.policy; + } else if (type === 'TAG_VALUES') { + query = 'SHOW TAG VALUES'; + measurement = this.target.measurement; + policy = this.target.policy; + } else if (type === 'MEASUREMENTS') { + query = 'SHOW MEASUREMENTS'; + if (withMeasurementFilter) { + query += ' WITH MEASUREMENT =~ /' + withMeasurementFilter + '/'; + } + } else if (type === 'FIELDS') { + measurement = this.target.measurement; + policy = this.target.policy; + + if (!measurement.match('^/.*/')) { + measurement = '"' + measurement + '"'; + + if (policy && policy !== 'default') { + policy = '"' + policy + '"'; + measurement = policy + '.' + measurement; + } + } + + return 'SHOW FIELD KEYS FROM ' + measurement; + + } else if (type === 'RETENTION POLICIES') { + query = 'SHOW RETENTION POLICIES on "' + this.database + '"'; + return query; + } + + if (measurement) { + if (!measurement.match('^/.*/') && !measurement.match(/^merge\(.*\)/)) { + measurement = '"' + measurement + '"'; + } + + if (policy && policy !== 'default') { + policy = '"' + policy + '"'; + measurement = policy + '.' + measurement; + } + + query += ' FROM ' + measurement; + } + + if (withKey) { + query += ' WITH KEY = "' + withKey + '"'; + } + + if (this.target.tags && this.target.tags.length > 0) { + var whereConditions = _.reduce( + this.target.tags, + function(memo, tag) { + // do not add a condition for the key we want to explore for + if (tag.key === withKey) { + return memo; + } + memo.push(renderTagCondition(tag, memo.length)); + return memo; + }, + [], + ); + + if (whereConditions.length > 0) { + query += ' WHERE ' + whereConditions.join(' '); + } + } + if (type === 'MEASUREMENTS') { + query += ' LIMIT 100'; + //Solve issue #2524 by limiting the number of measurements returned + //LIMIT must be after WITH MEASUREMENT and WHERE clauses + //This also could be used for TAG KEYS and TAG VALUES, if desired + } + return query; + } +} diff --git a/public/app/plugins/datasource/influxdb/query_ctrl.ts b/public/app/plugins/datasource/influxdb/query_ctrl.ts index 5c5bece75a2..5583bec9f53 100644 --- a/public/app/plugins/datasource/influxdb/query_ctrl.ts +++ b/public/app/plugins/datasource/influxdb/query_ctrl.ts @@ -1,8 +1,6 @@ -/// - import angular from 'angular'; import _ from 'lodash'; -import InfluxQueryBuilder from './query_builder'; +import {InfluxQueryBuilder} from './query_builder'; import InfluxQuery from './influx_query'; import queryPart from './query_part'; import {QueryCtrl} from 'app/plugins/sdk'; diff --git a/public/app/plugins/datasource/influxdb/specs/query_builder_specs.ts b/public/app/plugins/datasource/influxdb/specs/query_builder.jest.ts similarity index 50% rename from public/app/plugins/datasource/influxdb/specs/query_builder_specs.ts rename to public/app/plugins/datasource/influxdb/specs/query_builder.jest.ts index 86b1d4f08dd..a044093a225 100644 --- a/public/app/plugins/datasource/influxdb/specs/query_builder_specs.ts +++ b/public/app/plugins/datasource/influxdb/specs/query_builder.jest.ts @@ -1,103 +1,121 @@ -import {describe, it, expect} from 'test/lib/common'; -import InfluxQueryBuilder from '../query_builder'; +import { describe, it, expect } from 'test/lib/common'; +import { InfluxQueryBuilder } from '../query_builder'; describe('InfluxQueryBuilder', function() { - describe('when building explore queries', function() { - it('should only have measurement condition in tag keys query given query with measurement', function() { var builder = new InfluxQueryBuilder({ measurement: 'cpu', tags: [] }); var query = builder.buildExploreQuery('TAG_KEYS'); - expect(query).to.be('SHOW TAG KEYS FROM "cpu"'); + expect(query).toBe('SHOW TAG KEYS FROM "cpu"'); }); it('should handle regex measurement in tag keys query', function() { var builder = new InfluxQueryBuilder({ - measurement: '/.*/', tags: [] + measurement: '/.*/', + tags: [], }); var query = builder.buildExploreQuery('TAG_KEYS'); - expect(query).to.be('SHOW TAG KEYS FROM /.*/'); + expect(query).toBe('SHOW TAG KEYS FROM /.*/'); }); it('should have no conditions in tags keys query given query with no measurement or tag', function() { var builder = new InfluxQueryBuilder({ measurement: '', tags: [] }); var query = builder.buildExploreQuery('TAG_KEYS'); - expect(query).to.be('SHOW TAG KEYS'); + expect(query).toBe('SHOW TAG KEYS'); }); it('should have where condition in tag keys query with tags', function() { - var builder = new InfluxQueryBuilder({ measurement: '', tags: [{key: 'host', value: 'se1'}] }); + var builder = new InfluxQueryBuilder({ measurement: '', tags: [{ key: 'host', value: 'se1' }] }); var query = builder.buildExploreQuery('TAG_KEYS'); - expect(query).to.be("SHOW TAG KEYS WHERE \"host\" = 'se1'"); + expect(query).toBe('SHOW TAG KEYS WHERE "host" = \'se1\''); }); it('should have no conditions in measurement query for query with no tags', function() { var builder = new InfluxQueryBuilder({ measurement: '', tags: [] }); var query = builder.buildExploreQuery('MEASUREMENTS'); - expect(query).to.be('SHOW MEASUREMENTS LIMIT 100'); + expect(query).toBe('SHOW MEASUREMENTS LIMIT 100'); }); it('should have no conditions in measurement query for query with no tags and empty query', function() { var builder = new InfluxQueryBuilder({ measurement: '', tags: [] }); var query = builder.buildExploreQuery('MEASUREMENTS', undefined, ''); - expect(query).to.be('SHOW MEASUREMENTS LIMIT 100'); + expect(query).toBe('SHOW MEASUREMENTS LIMIT 100'); }); it('should have WITH MEASUREMENT in measurement query for non-empty query with no tags', function() { var builder = new InfluxQueryBuilder({ measurement: '', tags: [] }); var query = builder.buildExploreQuery('MEASUREMENTS', undefined, 'something'); - expect(query).to.be('SHOW MEASUREMENTS WITH MEASUREMENT =~ /something/ LIMIT 100'); + expect(query).toBe('SHOW MEASUREMENTS WITH MEASUREMENT =~ /something/ LIMIT 100'); }); it('should have WITH MEASUREMENT WHERE in measurement query for non-empty query with tags', function() { - var builder = new InfluxQueryBuilder({ measurement: '', tags: [{key: 'app', value: 'email'}] }); - var query = builder.buildExploreQuery('MEASUREMENTS', undefined, 'something'); - expect(query).to.be("SHOW MEASUREMENTS WITH MEASUREMENT =~ /something/ WHERE \"app\" = 'email' LIMIT 100"); + var builder = new InfluxQueryBuilder({ measurement: '', tags: [{ key: 'app', value: 'email' }] }); + var query = builder.buildExploreQuery('MEASUREMENTS', undefined, 'something'); + expect(query).toBe('SHOW MEASUREMENTS WITH MEASUREMENT =~ /something/ WHERE "app" = \'email\' LIMIT 100'); }); it('should have where condition in measurement query for query with tags', function() { - var builder = new InfluxQueryBuilder({measurement: '', tags: [{key: 'app', value: 'email'}]}); + var builder = new InfluxQueryBuilder({ measurement: '', tags: [{ key: 'app', value: 'email' }] }); var query = builder.buildExploreQuery('MEASUREMENTS'); - expect(query).to.be("SHOW MEASUREMENTS WHERE \"app\" = 'email' LIMIT 100"); + expect(query).toBe('SHOW MEASUREMENTS WHERE "app" = \'email\' LIMIT 100'); }); it('should have where tag name IN filter in tag values query for query with one tag', function() { - var builder = new InfluxQueryBuilder({measurement: '', tags: [{key: 'app', value: 'asdsadsad'}]}); + var builder = new InfluxQueryBuilder({ measurement: '', tags: [{ key: 'app', value: 'asdsadsad' }] }); var query = builder.buildExploreQuery('TAG_VALUES', 'app'); - expect(query).to.be('SHOW TAG VALUES WITH KEY = "app"'); + expect(query).toBe('SHOW TAG VALUES WITH KEY = "app"'); }); it('should have measurement tag condition and tag name IN filter in tag values query', function() { - var builder = new InfluxQueryBuilder({measurement: 'cpu', tags: [{key: 'app', value: 'email'}, {key: 'host', value: 'server1'}]}); + var builder = new InfluxQueryBuilder({ + measurement: 'cpu', + tags: [{ key: 'app', value: 'email' }, { key: 'host', value: 'server1' }], + }); var query = builder.buildExploreQuery('TAG_VALUES', 'app'); - expect(query).to.be('SHOW TAG VALUES FROM "cpu" WITH KEY = "app" WHERE "host" = \'server1\''); + expect(query).toBe('SHOW TAG VALUES FROM "cpu" WITH KEY = "app" WHERE "host" = \'server1\''); + }); + + it('should select from policy correctly if policy is specified', function() { + var builder = new InfluxQueryBuilder({ + measurement: 'cpu', + policy: 'one_week', + tags: [{ key: 'app', value: 'email' }, { key: 'host', value: 'server1' }], + }); + var query = builder.buildExploreQuery('TAG_VALUES', 'app'); + expect(query).toBe('SHOW TAG VALUES FROM "one_week"."cpu" WITH KEY = "app" WHERE "host" = \'server1\''); + }); + + it('should not includ policy when policy is default', function() { + var builder = new InfluxQueryBuilder({ measurement: 'cpu', policy: 'default', tags: [] }); + var query = builder.buildExploreQuery('TAG_VALUES', 'app'); + expect(query).toBe('SHOW TAG VALUES FROM "cpu" WITH KEY = "app"'); }); it('should switch to regex operator in tag condition', function() { var builder = new InfluxQueryBuilder({ measurement: 'cpu', - tags: [{key: 'host', value: '/server.*/'}] + tags: [{ key: 'host', value: '/server.*/' }], }); var query = builder.buildExploreQuery('TAG_VALUES', 'app'); - expect(query).to.be('SHOW TAG VALUES FROM "cpu" WITH KEY = "app" WHERE "host" =~ /server.*/'); + expect(query).toBe('SHOW TAG VALUES FROM "cpu" WITH KEY = "app" WHERE "host" =~ /server.*/'); }); it('should build show field query', function() { - var builder = new InfluxQueryBuilder({measurement: 'cpu', tags: [{key: 'app', value: 'email'}]}); + var builder = new InfluxQueryBuilder({ measurement: 'cpu', tags: [{ key: 'app', value: 'email' }] }); var query = builder.buildExploreQuery('FIELDS'); - expect(query).to.be('SHOW FIELD KEYS FROM "cpu"'); + expect(query).toBe('SHOW FIELD KEYS FROM "cpu"'); }); it('should build show field query with regexp', function() { - var builder = new InfluxQueryBuilder({measurement: '/$var/', tags: [{key: 'app', value: 'email'}]}); + var builder = new InfluxQueryBuilder({ measurement: '/$var/', tags: [{ key: 'app', value: 'email' }] }); var query = builder.buildExploreQuery('FIELDS'); - expect(query).to.be('SHOW FIELD KEYS FROM /$var/'); + expect(query).toBe('SHOW FIELD KEYS FROM /$var/'); }); it('should build show retention policies query', function() { - var builder = new InfluxQueryBuilder({measurement: 'cpu', tags: []}, 'site'); + var builder = new InfluxQueryBuilder({ measurement: 'cpu', tags: [] }, 'site'); var query = builder.buildExploreQuery('RETENTION POLICIES'); - expect(query).to.be('SHOW RETENTION POLICIES on "site"'); + expect(query).toBe('SHOW RETENTION POLICIES on "site"'); }); }); }); diff --git a/public/app/plugins/datasource/mysql/datasource.ts b/public/app/plugins/datasource/mysql/datasource.ts index e35896c0d04..ac5ccfeb5ca 100644 --- a/public/app/plugins/datasource/mysql/datasource.ts +++ b/public/app/plugins/datasource/mysql/datasource.ts @@ -15,9 +15,13 @@ export class MysqlDatasource { this.responseParser = new ResponseParser(this.$q); } - interpolateVariable(value) { + interpolateVariable(value, variable) { if (typeof value === 'string') { - return value; + if (variable.multi || variable.includeAll) { + return '\'' + value + '\''; + } else { + return value; + } } if (typeof value === 'number') { diff --git a/public/app/plugins/datasource/mysql/partials/annotations.editor.html b/public/app/plugins/datasource/mysql/partials/annotations.editor.html index 09581e2a552..b34eff5b011 100644 --- a/public/app/plugins/datasource/mysql/partials/annotations.editor.html +++ b/public/app/plugins/datasource/mysql/partials/annotations.editor.html @@ -21,7 +21,6 @@ An annotation is an event that is overlayed on top of graphs. The query can have up to four columns per row, the time_sec column is mandatory. Annotation rendering is expensive so it is important to limit the number of rows returned. - column with alias: time_sec for the annotation event. Format is UTC in seconds, use UNIX_TIMESTAMP(column) -- column with alias title for the annotation title - column with alias: text for the annotation text - column with alias: tags for annotation tags. This is a comma separated string of tags e.g. 'tag1,tag2' diff --git a/public/app/plugins/datasource/mysql/partials/query.editor.html b/public/app/plugins/datasource/mysql/partials/query.editor.html index a7e993afd7f..22d64c9190f 100644 --- a/public/app/plugins/datasource/mysql/partials/query.editor.html +++ b/public/app/plugins/datasource/mysql/partials/query.editor.html @@ -49,7 +49,15 @@ Macros: - $__time(column) -> UNIX_TIMESTAMP(column) as time_sec - $__timeFilter(column) -> UNIX_TIMESTAMP(time_date_time) ≥ 1492750877 AND UNIX_TIMESTAMP(time_date_time) ≤ 1492750877 - $__unixEpochFilter(column) -> time_unix_epoch > 1492750877 AND time_unix_epoch < 1492750877 -- $__timeGroup(column,'5m') -> (extract(epoch from "dateColumn")/extract(epoch from '5m'::interval))::int +- $__timeGroup(column,'5m') -> cast(cast(UNIX_TIMESTAMP(column)/(300) as signed)*300 as signed) + +Example of group by and order by with $__timeGroup: +SELECT + $__timeGroup(timestamp_col, '1h') AS time, + sum(value_double) as value +FROM yourtable +GROUP BY 1 +ORDER BY 1 Or build your own conditionals using these macros which just return the values: - $__timeFrom() -> FROM_UNIXTIME(1492750877) diff --git a/public/app/plugins/datasource/mysql/specs/datasource_specs.ts b/public/app/plugins/datasource/mysql/specs/datasource_specs.ts index f579ad15410..eb63f9c5b37 100644 --- a/public/app/plugins/datasource/mysql/specs/datasource_specs.ts +++ b/public/app/plugins/datasource/mysql/specs/datasource_specs.ts @@ -2,6 +2,7 @@ import {describe, beforeEach, it, expect, angularMocks} from 'test/lib/common'; import moment from 'moment'; import helpers from 'test/specs/helpers'; import {MysqlDatasource} from '../datasource'; +import {CustomVariable} from 'app/features/templating/custom_variable'; describe('MySQLDatasource', function() { var ctx = new helpers.ServiceTestContext(); @@ -195,22 +196,41 @@ describe('MySQLDatasource', function() { }); describe('When interpolating variables', () => { + beforeEach(function() { + ctx.variable = new CustomVariable({},{}); + }); + describe('and value is a string', () => { it('should return an unquoted value', () => { - expect(ctx.ds.interpolateVariable('abc')).to.eql('abc'); + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('abc'); }); }); describe('and value is a number', () => { it('should return an unquoted value', () => { - expect(ctx.ds.interpolateVariable(1000)).to.eql(1000); + expect(ctx.ds.interpolateVariable(1000, ctx.variable)).to.eql(1000); }); }); describe('and value is an array of strings', () => { it('should return comma separated quoted values', () => { - expect(ctx.ds.interpolateVariable(['a', 'b', 'c'])).to.eql('\'a\',\'b\',\'c\''); + expect(ctx.ds.interpolateVariable(['a', 'b', 'c'], ctx.variable)).to.eql('\'a\',\'b\',\'c\''); }); }); + + describe('and variable allows multi-value and value is a string', () => { + it('should return a quoted value', () => { + ctx.variable.multi = true; + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('\'abc\''); + }); + }); + + describe('and variable allows all and value is a string', () => { + it('should return a quoted value', () => { + ctx.variable.includeAll = true; + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('\'abc\''); + }); + }); + }); }); diff --git a/public/app/plugins/datasource/opentsdb/datasource.js b/public/app/plugins/datasource/opentsdb/datasource.js index 4d51b117ed4..7315485c6db 100644 --- a/public/app/plugins/datasource/opentsdb/datasource.js +++ b/public/app/plugins/datasource/opentsdb/datasource.js @@ -441,7 +441,7 @@ function (angular, _, dateMath) { } function mapMetricsToTargets(metrics, options, tsdbVersion) { - var interpolatedTagValue; + var interpolatedTagValue, arrTagV; return _.map(metrics, function(metricData) { if (tsdbVersion === 3) { return metricData.query.index; @@ -453,7 +453,8 @@ function (angular, _, dateMath) { return target.metric === metricData.metric && _.every(target.tags, function(tagV, tagK) { interpolatedTagValue = templateSrv.replace(tagV, options.scopedVars, 'pipe'); - return metricData.tags[tagK] === interpolatedTagValue || interpolatedTagValue === "*"; + arrTagV = interpolatedTagValue.split('|'); + return _.includes(arrTagV, metricData.tags[tagK]) || interpolatedTagValue === "*"; }); } }); diff --git a/public/app/plugins/datasource/postgres/datasource.ts b/public/app/plugins/datasource/postgres/datasource.ts index 68471f035ff..af3d83f50d8 100644 --- a/public/app/plugins/datasource/postgres/datasource.ts +++ b/public/app/plugins/datasource/postgres/datasource.ts @@ -15,9 +15,13 @@ export class PostgresDatasource { this.responseParser = new ResponseParser(this.$q); } - interpolateVariable(value) { + interpolateVariable(value, variable) { if (typeof value === 'string') { - return value; + if (variable.multi || variable.includeAll) { + return '\'' + value + '\''; + } else { + return value; + } } if (typeof value === 'number') { diff --git a/public/app/plugins/datasource/postgres/partials/annotations.editor.html b/public/app/plugins/datasource/postgres/partials/annotations.editor.html index 07b838e739a..b56f7523087 100644 --- a/public/app/plugins/datasource/postgres/partials/annotations.editor.html +++ b/public/app/plugins/datasource/postgres/partials/annotations.editor.html @@ -21,7 +21,6 @@ An annotation is an event that is overlayed on top of graphs. The query can have up to four columns per row, the time column is mandatory. Annotation rendering is expensive so it is important to limit the number of rows returned. - column with alias: time for the annotation event. Format is UTC in seconds, use extract(epoch from column) as "time" -- column with alias title for the annotation title - column with alias: text for the annotation text - column with alias: tags for annotation tags. This is a comma separated string of tags e.g. 'tag1,tag2' diff --git a/public/app/plugins/datasource/postgres/partials/query.editor.html b/public/app/plugins/datasource/postgres/partials/query.editor.html index 1939fc47ecb..574fca33901 100644 --- a/public/app/plugins/datasource/postgres/partials/query.editor.html +++ b/public/app/plugins/datasource/postgres/partials/query.editor.html @@ -48,19 +48,17 @@ Table: Macros: - $__time(column) -> column as "time" - $__timeEpoch -> extract(epoch from column) as "time" -- $__timeFilter(column) -> column ≥ to_timestamp(1492750877) AND column ≤ to_timestamp(1492750877) +- $__timeFilter(column) -> extract(epoch from column) BETWEEN 1492750877 AND 1492750877 - $__unixEpochFilter(column) -> column > 1492750877 AND column < 1492750877 - -To group by time use $__timeGroup: --> (extract(epoch from column)/extract(epoch from column::interval))::int +- $__timeGroup(column,'5m') -> (extract(epoch from "dateColumn")/300)::bigint*300 Example of group by and order by with $__timeGroup: SELECT - min(date_time_col) AS time_sec, - sum(value_double) as value + $__timeGroup(date_time_col, '1h') AS time, + sum(value) as value FROM yourtable -group by $__timeGroup(date_time_col, '1h') -order by $__timeGroup(date_time_col, '1h') ASC +GROUP BY time +ORDER BY time Or build your own conditionals using these macros which just return the values: - $__timeFrom() -> to_timestamp(1492750877) diff --git a/public/app/plugins/datasource/postgres/specs/datasource_specs.ts b/public/app/plugins/datasource/postgres/specs/datasource_specs.ts index 3f83bb76e7b..0c0b3ca303a 100644 --- a/public/app/plugins/datasource/postgres/specs/datasource_specs.ts +++ b/public/app/plugins/datasource/postgres/specs/datasource_specs.ts @@ -2,6 +2,7 @@ import {describe, beforeEach, it, expect, angularMocks} from 'test/lib/common'; import moment from 'moment'; import helpers from 'test/specs/helpers'; import {PostgresDatasource} from '../datasource'; +import {CustomVariable} from 'app/features/templating/custom_variable'; describe('PostgreSQLDatasource', function() { var ctx = new helpers.ServiceTestContext(); @@ -195,22 +196,41 @@ describe('PostgreSQLDatasource', function() { }); describe('When interpolating variables', () => { + beforeEach(function() { + ctx.variable = new CustomVariable({},{}); + }); + describe('and value is a string', () => { it('should return an unquoted value', () => { - expect(ctx.ds.interpolateVariable('abc')).to.eql('abc'); + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('abc'); }); }); describe('and value is a number', () => { it('should return an unquoted value', () => { - expect(ctx.ds.interpolateVariable(1000)).to.eql(1000); + expect(ctx.ds.interpolateVariable(1000, ctx.variable)).to.eql(1000); }); }); describe('and value is an array of strings', () => { it('should return comma separated quoted values', () => { - expect(ctx.ds.interpolateVariable(['a', 'b', 'c'])).to.eql('\'a\',\'b\',\'c\''); + expect(ctx.ds.interpolateVariable(['a', 'b', 'c'], ctx.variable)).to.eql('\'a\',\'b\',\'c\''); }); }); + + describe('and variable allows multi-value and is a string', () => { + it('should return a quoted value', () => { + ctx.variable.multi = true; + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('\'abc\''); + }); + }); + + describe('and variable allows all and is a string', () => { + it('should return a quoted value', () => { + ctx.variable.includeAll = true; + expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('\'abc\''); + }); + }); + }); }); diff --git a/public/app/plugins/datasource/prometheus/datasource.ts b/public/app/plugins/datasource/prometheus/datasource.ts index c4939b5b2fa..ec295760d49 100644 --- a/public/app/plugins/datasource/prometheus/datasource.ts +++ b/public/app/plugins/datasource/prometheus/datasource.ts @@ -19,6 +19,7 @@ export class PrometheusDatasource { basicAuth: any; withCredentials: any; metricsNameCache: any; + interval: string; /** @ngInject */ constructor(instanceSettings, @@ -34,6 +35,7 @@ export class PrometheusDatasource { this.directUrl = instanceSettings.directUrl; this.basicAuth = instanceSettings.basicAuth; this.withCredentials = instanceSettings.withCredentials; + this.interval = instanceSettings.jsonData.timeInterval || '15s'; } _request(method, url, requestId?) { diff --git a/public/app/plugins/datasource/prometheus/partials/config.html b/public/app/plugins/datasource/prometheus/partials/config.html index 1b817a2186d..3bb43253d4d 100644 --- a/public/app/plugins/datasource/prometheus/partials/config.html +++ b/public/app/plugins/datasource/prometheus/partials/config.html @@ -1,3 +1,16 @@ +
    +
    +
    + Scrape interval + + + Set this to your global scrape interval defined in your Prometheus config file. This will be used as a lower limit for + the Prometheus step query parameter. + +
    +
    +
    + diff --git a/public/app/plugins/datasource/prometheus/specs/datasource_specs.ts b/public/app/plugins/datasource/prometheus/specs/datasource_specs.ts index 9d528c735de..a7d18f3f158 100644 --- a/public/app/plugins/datasource/prometheus/specs/datasource_specs.ts +++ b/public/app/plugins/datasource/prometheus/specs/datasource_specs.ts @@ -5,7 +5,7 @@ import {PrometheusDatasource} from '../datasource'; describe('PrometheusDatasource', function() { var ctx = new helpers.ServiceTestContext(); - var instanceSettings = {url: 'proxied', directUrl: 'direct', user: 'test', password: 'mupp' }; + var instanceSettings = {url: 'proxied', directUrl: 'direct', user: 'test', password: 'mupp', jsonData: {}}; beforeEach(angularMocks.module('grafana.core')); beforeEach(angularMocks.module('grafana.services')); diff --git a/public/app/plugins/datasource/prometheus/specs/metric_find_query_specs.ts b/public/app/plugins/datasource/prometheus/specs/metric_find_query_specs.ts index 9186d9c4403..bb051d7328d 100644 --- a/public/app/plugins/datasource/prometheus/specs/metric_find_query_specs.ts +++ b/public/app/plugins/datasource/prometheus/specs/metric_find_query_specs.ts @@ -8,7 +8,7 @@ import PrometheusMetricFindQuery from '../metric_find_query'; describe('PrometheusMetricFindQuery', function() { var ctx = new helpers.ServiceTestContext(); - var instanceSettings = {url: 'proxied', directUrl: 'direct', user: 'test', password: 'mupp' }; + var instanceSettings = {url: 'proxied', directUrl: 'direct', user: 'test', password: 'mupp', jsonData: {}}; beforeEach(angularMocks.module('grafana.core')); beforeEach(angularMocks.module('grafana.services')); diff --git a/public/app/plugins/panel/alertlist/editor.html b/public/app/plugins/panel/alertlist/editor.html index b8cee3b3317..36c989dd72c 100644 --- a/public/app/plugins/panel/alertlist/editor.html +++ b/public/app/plugins/panel/alertlist/editor.html @@ -4,22 +4,22 @@
    Show
    - +
    Max items - +
    Sort order
    - +
    -
    +
    State filter
    diff --git a/public/app/plugins/panel/alertlist/module.html b/public/app/plugins/panel/alertlist/module.html index ecedab9cbb9..6689ea84e8c 100644 --- a/public/app/plugins/panel/alertlist/module.html +++ b/public/app/plugins/panel/alertlist/module.html @@ -1,31 +1,35 @@ -
    -
    -
      -
    1. -
      -
      -
      - -
      -
      -

      - - {{alert.name}} - -

      -

      - {{alert.stateModel.text}} - for {{alert.newStateDateAgo}} -

      -
      -
      -
      -
    2. -
    -
    +
    +
    + {{ctrl.noAlertsMessage}} +
    -
    -
      +
      +
        +
      1. +
        +
        +
        + +
        +
        +

        + + {{alert.name}} + +

        +

        + {{alert.stateModel.text}} + for {{alert.newStateDateAgo}} +

        +
        +
        +
        +
      2. +
      +
      + +
      +
      1. @@ -42,7 +46,9 @@
      2. diff --git a/public/app/plugins/panel/alertlist/module.ts b/public/app/plugins/panel/alertlist/module.ts index 704b28fa451..a481ca0524d 100644 --- a/public/app/plugins/panel/alertlist/module.ts +++ b/public/app/plugins/panel/alertlist/module.ts @@ -3,7 +3,7 @@ import _ from 'lodash'; import moment from 'moment'; import alertDef from '../../../features/alerting/alert_def'; -import {PanelCtrl} from 'app/plugins/sdk'; +import { PanelCtrl } from 'app/plugins/sdk'; import * as dateMath from 'app/core/utils/datemath'; @@ -11,19 +11,20 @@ class AlertListPanel extends PanelCtrl { static templateUrl = 'module.html'; showOptions = [ - {text: 'Current state', value: 'current'}, - {text: 'Recent state changes', value: 'changes'} + { text: 'Current state', value: 'current' }, + { text: 'Recent state changes', value: 'changes' } ]; sortOrderOptions = [ - {text: 'Alphabetical (asc)', value: 1}, - {text: 'Alphabetical (desc)', value: 2}, - {text: 'Importance', value: 3}, + { text: 'Alphabetical (asc)', value: 1 }, + { text: 'Alphabetical (desc)', value: 2 }, + { text: 'Importance', value: 3 }, ]; stateFilter: any = {}; currentAlerts: any = []; alertHistory: any = []; + noAlertsMessage: string; // Set and populate defaults panelDefaults = { show: 'current', @@ -40,8 +41,7 @@ class AlertListPanel extends PanelCtrl { this.scrollable = true; this.events.on('init-edit-mode', this.onInitEditMode.bind(this)); - this.events.on('render', this.onRender.bind(this)); - this.events.on('refresh', this.onRender.bind(this)); + this.events.on('refresh', this.onRefresh.bind(this)); for (let key in this.panel.stateFilter) { this.stateFilter[this.panel.stateFilter[key]] = true; @@ -53,7 +53,7 @@ class AlertListPanel extends PanelCtrl { return _.sortBy(alerts, a => { return alertDef.alertStateSortScore[a.state]; }); } - var result = _.sortBy(alerts, a => { return a.name.toLowerCase();}); + var result = _.sortBy(alerts, a => { return a.name.toLowerCase(); }); if (this.panel.sortOrder === 2) { result.reverse(); } @@ -71,10 +71,10 @@ class AlertListPanel extends PanelCtrl { } this.panel.stateFilter = result; - this.onRender(); + this.onRefresh(); } - onRender() { + onRefresh() { if (this.panel.show === 'current') { this.getCurrentAlertState(); } @@ -106,6 +106,7 @@ class AlertListPanel extends PanelCtrl { al.info = alertDef.getAlertAnnotationInfo(al); return al; }); + this.noAlertsMessage = this.alertHistory.length === 0 ? 'No alerts in current time range' : ''; }); } @@ -125,6 +126,7 @@ class AlertListPanel extends PanelCtrl { al.newStateDateAgo = moment(al.newStateDate).locale('en').fromNow(true); return al; })); + this.noAlertsMessage = this.currentAlerts.length === 0 ? 'No alerts' : ''; }); } diff --git a/public/app/plugins/panel/dashlist/module.html b/public/app/plugins/panel/dashlist/module.html index b5c59862e5d..7f71811ac08 100644 --- a/public/app/plugins/panel/dashlist/module.html +++ b/public/app/plugins/panel/dashlist/module.html @@ -8,7 +8,7 @@ {{dash.title}} - + diff --git a/public/app/plugins/panel/dashlist/module.ts b/public/app/plugins/panel/dashlist/module.ts index 2a53a2caede..d0391763367 100644 --- a/public/app/plugins/panel/dashlist/module.ts +++ b/public/app/plugins/panel/dashlist/module.ts @@ -22,7 +22,7 @@ class DashListCtrl extends PanelCtrl { }; /** @ngInject */ - constructor($scope, $injector, private backendSrv) { + constructor($scope, $injector, private backendSrv, private dashboardSrv) { super($scope, $injector); _.defaults(this.panel, this.panelDefaults); this.scrollable = true; @@ -108,6 +108,17 @@ class DashListCtrl extends PanelCtrl { }); } + starDashboard(dash, evt) { + this.dashboardSrv.starDashboard(dash.id, dash.isStarred).then(newState => { + dash.isStarred = newState; + }); + + if (evt) { + evt.stopPropagation(); + evt.preventDefault(); + } + } + getRecentDashboards() { this.groups[1].show = this.panel.recent; if (!this.panel.recent) { diff --git a/public/app/plugins/panel/graph/data_processor.ts b/public/app/plugins/panel/graph/data_processor.ts index 089e4c7dcea..b737c3610be 100644 --- a/public/app/plugins/panel/graph/data_processor.ts +++ b/public/app/plugins/panel/graph/data_processor.ts @@ -25,12 +25,20 @@ export class DataProcessor { switch (this.panel.xaxis.mode) { case 'series': - case 'histogram': case 'time': { return options.dataList.map((item, index) => { return this.timeSeriesHandler(item, index, options); }); } + case 'histogram': { + let histogramDataList = [{ + target: 'count', + datapoints: _.concat([], _.flatten(_.map(options.dataList, 'datapoints'))) + }]; + return histogramDataList.map((item, index) => { + return this.timeSeriesHandler(item, index, options); + }); + } case 'field': { return this.customHandler(firstItem); } diff --git a/public/app/plugins/panel/graph/graph.ts b/public/app/plugins/panel/graph/graph.ts index 497ae2f9e08..82bad53164c 100755 --- a/public/app/plugins/panel/graph/graph.ts +++ b/public/app/plugins/panel/graph/graph.ts @@ -313,11 +313,7 @@ function graphDirective($rootScope, timeSrv, popoverSrv, contextSrv) { let ticks = panel.xaxis.buckets || panelWidth / 50; bucketSize = tickStep(histMin, histMax, ticks); let histogram = convertValuesToHistogram(values, bucketSize); - data[0].data = histogram; - data[0].alias = data[0].label = data[0].id = "count"; - data = [data[0]]; - options.series.bars.barWidth = bucketSize * 0.8; } else { bucketSize = 0; @@ -379,20 +375,8 @@ function graphDirective($rootScope, timeSrv, popoverSrv, contextSrv) { var sortOrder = panel.legend.sortDesc; var haveSortBy = sortBy !== null || sortBy !== undefined; var haveSortOrder = sortOrder !== null || sortOrder !== undefined; - - if (panel.stack && haveSortBy && haveSortOrder) { - var desc = desc = panel.legend.sortDesc === true ? -1 : 1; - series.sort((x, y) => { - if (x.stats[sortBy] > y.stats[sortBy]) { - return 1 * desc; - } - if (x.stats[sortBy] < y.stats[sortBy]) { - return -1 * desc; - } - - return 0; - }); - } + var shouldSortBy = panel.stack && haveSortBy && haveSortOrder; + var sortDesc = panel.legend.sortDesc === true ? -1 : 1; series.sort((x, y) => { if (x.zindex > y.zindex) { @@ -403,6 +387,15 @@ function graphDirective($rootScope, timeSrv, popoverSrv, contextSrv) { return -1; } + if (shouldSortBy) { + if (x.stats[sortBy] > y.stats[sortBy]) { + return 1 * sortDesc; + } + if (x.stats[sortBy] < y.stats[sortBy]) { + return -1 * sortDesc; + } + } + return 0; }); diff --git a/public/app/plugins/panel/graph/histogram.ts b/public/app/plugins/panel/graph/histogram.ts index c60782942f2..8b2be9efcf7 100644 --- a/public/app/plugins/panel/graph/histogram.ts +++ b/public/app/plugins/panel/graph/histogram.ts @@ -1,18 +1,21 @@ import _ from 'lodash'; +import TimeSeries from 'app/core/time_series2'; /** * Convert series into array of series values. * @param data Array of series */ -export function getSeriesValues(data: any): number[] { +export function getSeriesValues(dataList: TimeSeries[]): number[] { + const VALUE_INDEX = 0; let values = []; // Count histogam stats - for (let i = 0; i < data.length; i++) { - let series = data[i]; - for (let j = 0; j < series.data.length; j++) { - if (series.data[j][1] !== null) { - values.push(series.data[j][1]); + for (let i = 0; i < dataList.length; i++) { + let series = dataList[i]; + let datapoints = series.datapoints; + for (let j = 0; j < datapoints.length; j++) { + if (datapoints[j][VALUE_INDEX] !== null) { + values.push(datapoints[j][VALUE_INDEX]); } } } diff --git a/public/app/plugins/panel/graph/series_overrides_ctrl.js b/public/app/plugins/panel/graph/series_overrides_ctrl.js index 2df993ff70e..5ee5b5e8e47 100644 --- a/public/app/plugins/panel/graph/series_overrides_ctrl.js +++ b/public/app/plugins/panel/graph/series_overrides_ctrl.js @@ -29,7 +29,7 @@ define([ $scope.setOverride = function(item, subItem) { // handle color overrides if (item.propertyName === 'color') { - $scope.openColorSelector(); + $scope.openColorSelector($scope.override['color']); return; } @@ -52,15 +52,17 @@ define([ $scope.ctrl.render(); }; - $scope.openColorSelector = function() { + $scope.openColorSelector = function(color) { + var fakeSeries = {color: color}; popoverSrv.show({ element: $element.find(".dropdown")[0], position: 'top center', openOn: 'click', - template: '', + template: '', model: { autoClose: true, colorSelected: $scope.colorSelected, + series: fakeSeries }, onClose: function() { $scope.ctrl.render(); diff --git a/public/app/plugins/panel/graph/specs/histogram.jest.ts b/public/app/plugins/panel/graph/specs/histogram.jest.ts index 0469b33f259..5b7e34ffa3a 100644 --- a/public/app/plugins/panel/graph/specs/histogram.jest.ts +++ b/public/app/plugins/panel/graph/specs/histogram.jest.ts @@ -37,7 +37,9 @@ describe('Graph Histogam Converter', function () { beforeEach(() => { data = [ { - data: [[0, 1], [0, 2], [0, 10], [0, 11], [0, 17], [0, 20], [0, 29]] + datapoints: [ + [1, 0], [2, 0], [10, 0], [11, 0], [17, 0], [20, 0], [29, 0] + ] } ]; }); @@ -50,7 +52,7 @@ describe('Graph Histogam Converter', function () { }); it('Should skip null values', () => { - data[0].data.push([0, null]); + data[0].datapoints.push([null, 0]); let expected = [1, 2, 10, 11, 17, 20, 29]; diff --git a/public/app/plugins/panel/heatmap/heatmap_data_converter.ts b/public/app/plugins/panel/heatmap/heatmap_data_converter.ts index 31360aa552e..ca33b455427 100644 --- a/public/app/plugins/panel/heatmap/heatmap_data_converter.ts +++ b/public/app/plugins/panel/heatmap/heatmap_data_converter.ts @@ -35,7 +35,16 @@ function elasticHistogramToHeatmap(seriesList) { bucket = heatmap[time] = {x: time, buckets: {}}; } - bucket.buckets[bound] = {y: bound, count: count, values: [], points: []}; + bucket.buckets[bound] = { + y: bound, + count: count, + bounds: { + top: null, + bottom: bound + }, + values: [], + points: [] + }; } } diff --git a/public/app/plugins/panel/heatmap/heatmap_tooltip.ts b/public/app/plugins/panel/heatmap/heatmap_tooltip.ts index 99ef854cc29..938573f64e0 100644 --- a/public/app/plugins/panel/heatmap/heatmap_tooltip.ts +++ b/public/app/plugins/panel/heatmap/heatmap_tooltip.ts @@ -83,7 +83,7 @@ export class HeatmapTooltip { let xData = data.buckets[xBucketIndex]; // Search in special 'zero' bucket also let yData = _.find(xData.buckets, (bucket, bucketIndex) => { - return bucket.bounds.bottom === yBucketIndex || bucketIndex === yBucketIndex; + return bucket.bounds.bottom === yBucketIndex || bucketIndex === yBucketIndex.toString(); }); let tooltipTimeFormat = 'YYYY-MM-DD HH:mm:ss'; @@ -168,7 +168,8 @@ export class HeatmapTooltip { let yBucketSize = this.scope.ctrl.data.yBucketSize; let {min, max, ticks} = this.scope.ctrl.data.yAxis; let histogramData = _.map(xBucket.buckets, bucket => { - return [bucket.bounds.bottom, bucket.values.length]; + let count = bucket.count !== undefined ? bucket.count : bucket.values.length; + return [bucket.bounds.bottom, count]; }); histogramData = _.filter(histogramData, d => { return d[0] >= min && d[0] <= max; diff --git a/public/app/plugins/panel/heatmap/specs/heatmap_data_converter.jest.ts b/public/app/plugins/panel/heatmap/specs/heatmap_data_converter.jest.ts index fcebbd1c256..241ec9c9b32 100644 --- a/public/app/plugins/panel/heatmap/specs/heatmap_data_converter.jest.ts +++ b/public/app/plugins/panel/heatmap/specs/heatmap_data_converter.jest.ts @@ -222,23 +222,23 @@ describe('ES Histogram converter', () => { '1422774000000': { x: 1422774000000, buckets: { - '1': { y: 1, count: 1, values: [], points: [] }, - '2': { y: 2, count: 5, values: [], points: [] }, - '3': { y: 3, count: 0, values: [], points: [] } + '1': { y: 1, count: 1, values: [], points: [], bounds: {bottom: 1, top: null}}, + '2': { y: 2, count: 5, values: [], points: [], bounds: {bottom: 2, top: null}}, + '3': { y: 3, count: 0, values: [], points: [], bounds: {bottom: 3, top: null}} } }, '1422774060000': { x: 1422774060000, buckets: { - '1': { y: 1, count: 0, values: [], points: [] }, - '2': { y: 2, count: 3, values: [], points: [] }, - '3': { y: 3, count: 1, values: [], points: [] } + '1': { y: 1, count: 0, values: [], points: [], bounds: {bottom: 1, top: null}}, + '2': { y: 2, count: 3, values: [], points: [], bounds: {bottom: 2, top: null}}, + '3': { y: 3, count: 1, values: [], points: [], bounds: {bottom: 3, top: null}} } }, }; let heatmap = elasticHistogramToHeatmap(ctx.series); - expect(heatmap).toMatchObject(expectedHeatmap); + expect(heatmap).toEqual(expectedHeatmap); }); }); }); diff --git a/public/app/plugins/panel/singlestat/module.ts b/public/app/plugins/panel/singlestat/module.ts index 05b8f9323cf..cd1e35039dc 100644 --- a/public/app/plugins/panel/singlestat/module.ts +++ b/public/app/plugins/panel/singlestat/module.ts @@ -2,7 +2,7 @@ import _ from 'lodash'; import $ from 'jquery'; import 'vendor/flot/jquery.flot'; import 'vendor/flot/jquery.flot.gauge'; -import 'app/features/panellinks/linkSrv'; +import 'app/features/panellinks/link_srv'; import kbn from 'app/core/utils/kbn'; import config from 'app/core/config'; diff --git a/public/app/plugins/panel/table/renderer.ts b/public/app/plugins/panel/table/renderer.ts index b2c1907058e..88cef17bc76 100644 --- a/public/app/plugins/panel/table/renderer.ts +++ b/public/app/plugins/panel/table/renderer.ts @@ -104,7 +104,7 @@ export class TableRenderer { return '-'; } - if (_.isString(v)) { + if (_.isString(v) || _.isArray(v)) { return this.defaultCellFormatter(v, column.style); } diff --git a/public/app/plugins/panel/table/specs/renderer.jest.ts b/public/app/plugins/panel/table/specs/renderer.jest.ts index 61ecbeca30f..48e4caaadfb 100644 --- a/public/app/plugins/panel/table/specs/renderer.jest.ts +++ b/public/app/plugins/panel/table/specs/renderer.jest.ts @@ -14,9 +14,10 @@ describe('when rendering table', () => { {text: 'United', unit: 'bps'}, {text: 'Sanitized'}, {text: 'Link'}, + {text: 'Array'}, ]; table.rows = [ - [1388556366666, 1230, 40, undefined, "", "", "my.host.com", "host1"] + [1388556366666, 1230, 40, undefined, "", "", "my.host.com", "host1", ["value1", "value2"]] ]; var panel = { @@ -66,6 +67,12 @@ describe('when rendering table', () => { linkUrl: "/dashboard?param=$__cell¶m_1=$__cell_1¶m_2=$__cell_2", linkTooltip: "$__cell $__cell_1 $__cell_6", linkTargetBlank: true + }, + { + pattern: 'Array', + type: 'number', + unit: 'ms', + decimals: 3 } ] }; @@ -182,6 +189,11 @@ describe('when rendering table', () => { `; expect(normalize(html)).toBe(normalize(expectedHtml)); }); + + it('Array column should not use number as formatter', () => { + var html = renderer.renderCell(8, 0, ['value1', 'value2']); + expect(html).toBe('value1, value2'); + }); }); }); diff --git a/public/app/plugins/panel/table/specs/transformers.jest.ts b/public/app/plugins/panel/table/specs/transformers.jest.ts index 68af0ca7319..5f86266701e 100644 --- a/public/app/plugins/panel/table/specs/transformers.jest.ts +++ b/public/app/plugins/panel/table/specs/transformers.jest.ts @@ -154,7 +154,7 @@ describe('when transforming time series table', () => { var rawData = { annotations: [ { - min: 1000, + time: 1000, text: 'hej', tags: ['tags', 'asd'], title: 'title', diff --git a/public/app/plugins/panel/table/transformers.ts b/public/app/plugins/panel/table/transformers.ts index e23adb43f19..9a94f191646 100644 --- a/public/app/plugins/panel/table/transformers.ts +++ b/public/app/plugins/panel/table/transformers.ts @@ -124,7 +124,7 @@ transformers['annotations'] = { for (var i = 0; i < data.annotations.length; i++) { var evt = data.annotations[i]; - model.rows.push([evt.min, evt.title, evt.text, evt.tags]); + model.rows.push([evt.time, evt.title, evt.text, evt.tags]); } } }; diff --git a/public/app/plugins/panel/text/editor.html b/public/app/plugins/panel/text/editor.html index 8e1283a39ba..eab53dc7615 100644 --- a/public/app/plugins/panel/text/editor.html +++ b/public/app/plugins/panel/text/editor.html @@ -15,5 +15,9 @@ (This area uses Markdown. HTML is not supported) - +
        +
        + + +
        +
        diff --git a/public/app/plugins/panel/text/module.ts b/public/app/plugins/panel/text/module.ts index 7df4874c6d1..6c4e6a11418 100644 --- a/public/app/plugins/panel/text/module.ts +++ b/public/app/plugins/panel/text/module.ts @@ -24,6 +24,11 @@ export class TextPanelCtrl extends PanelCtrl { this.events.on('init-edit-mode', this.onInitEditMode.bind(this)); this.events.on('refresh', this.onRefresh.bind(this)); this.events.on('render', this.onRender.bind(this)); + $scope.$watch('ctrl.panel.content', + _.throttle(() => { + this.render(); + }, 1000) + ); } onInitEditMode() { @@ -67,7 +72,9 @@ export class TextPanelCtrl extends PanelCtrl { }); } - this.updateContent(this.remarkable.render(content)); + this.$scope.$applyAsync(() => { + this.updateContent(this.remarkable.render(content)); + }); } updateContent(html) { diff --git a/public/dashboards/home.json b/public/dashboards/home.json index 9e6d6dddb6d..ff69bb6f856 100644 --- a/public/dashboards/home.json +++ b/public/dashboards/home.json @@ -76,7 +76,7 @@ "to": "now" }, "timepicker": { - "enable": false, + "hidden": true, "refresh_intervals": [ "5s", "10s", diff --git a/public/sass/components/_panel_alertlist.scss b/public/sass/components/_panel_alertlist.scss index c8e8354f4d7..f124c0c4b8c 100644 --- a/public/sass/components/_panel_alertlist.scss +++ b/public/sass/components/_panel_alertlist.scss @@ -1,3 +1,12 @@ .panel-alert-list { overflow-y: auto; } + +.panel-alert-list__no-alerts { + display: flex; + align-items: center; + justify-content: center; + width: 100%; + height: calc(100% - 30px); +} + diff --git a/public/sass/components/_panel_dashlist.scss b/public/sass/components/_panel_dashlist.scss index ecd2caeab4e..3c643fdd945 100644 --- a/public/sass/components/_panel_dashlist.scss +++ b/public/sass/components/_panel_dashlist.scss @@ -14,9 +14,11 @@ background-color: $tight-form-bg; border: $input-btn-border-width solid $input-label-border-color; .fa { - float: right; padding-top: 3px; } + .dashlist-star { + float: right; + } .fa-star { color: $orange; } diff --git a/public/test/core/utils/version_specs.ts b/public/test/core/utils/version_specs.ts new file mode 100644 index 00000000000..a057c8e16bd --- /dev/null +++ b/public/test/core/utils/version_specs.ts @@ -0,0 +1,55 @@ +import {describe, beforeEach, it, expect} from 'test/lib/common'; + +import {SemVersion, isVersionGtOrEq} from 'app/core/utils/version'; + +describe("SemVersion", () => { + let version = '1.0.0-alpha.1'; + + describe('parsing', () => { + it('should parse version properly', () => { + let semver = new SemVersion(version); + expect(semver.major).to.be(1); + expect(semver.minor).to.be(0); + expect(semver.patch).to.be(0); + expect(semver.meta).to.be('alpha.1'); + }); + }); + + describe('comparing', () => { + beforeEach(() => { + version = '3.4.5'; + }); + + it('should detect greater version properly', () => { + let semver = new SemVersion(version); + let cases = [ + {value: '3.4.5', expected: true}, + {value: '3.4.4', expected: true}, + {value: '3.4.6', expected: false}, + {value: '4', expected: false}, + {value: '3.5', expected: false}, + ]; + cases.forEach((testCase) => { + expect(semver.isGtOrEq(testCase.value)).to.be(testCase.expected); + }); + }); + }); + + describe('isVersionGtOrEq', () => { + it('should compare versions properly (a >= b)', () => { + let cases = [ + {values: ['3.4.5', '3.4.5'], expected: true}, + {values: ['3.4.5', '3.4.4'] , expected: true}, + {values: ['3.4.5', '3.4.6'], expected: false}, + {values: ['3.4', '3.4.0'], expected: true}, + {values: ['3', '3.0.0'], expected: true}, + {values: ['3.1.1-beta1', '3.1'], expected: true}, + {values: ['3.4.5', '4'], expected: false}, + {values: ['3.4.5', '3.5'], expected: false}, + ]; + cases.forEach((testCase) => { + expect(isVersionGtOrEq(testCase.values[0], testCase.values[1])).to.be(testCase.expected); + }); + }); + }); +}); diff --git a/public/test/mocks/angular.ts b/public/test/mocks/angular.ts new file mode 100644 index 00000000000..185d64214fd --- /dev/null +++ b/public/test/mocks/angular.ts @@ -0,0 +1,17 @@ +export default class AngularJSMock { + service: any; + controller: any; + directive: any; + + constructor() { + this.service = jest.fn(); + this.controller = jest.fn(); + this.directive = jest.fn(); + } + + module() { + return this; + } +} + +module.exports = AngularJSMock; diff --git a/scripts/build/Dockerfile b/scripts/build/Dockerfile index 89b0a1a46dd..6e48b42c8e8 100644 --- a/scripts/build/Dockerfile +++ b/scripts/build/Dockerfile @@ -21,7 +21,7 @@ RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A170311380 RUN curl --silent --location https://rpm.nodesource.com/setup_6.x | bash - && \ yum install -y nodejs --nogpgcheck -ENV GOLANG_VERSION 1.9.1 +ENV GOLANG_VERSION 1.9.2 RUN wget https://dl.yarnpkg.com/rpm/yarn.repo -O /etc/yum.repos.d/yarn.repo && \ yum install -y yarn --nogpgcheck && \ diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md index 7670fc87a51..781c89eea69 100644 --- a/vendor/github.com/lib/pq/README.md +++ b/vendor/github.com/lib/pq/README.md @@ -1,5 +1,6 @@ # pq - A pure Go postgres driver for Go's database/sql package +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) [![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) ## Install diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 338a0bc1879..fadb88e5eac 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -35,8 +35,12 @@ var ( errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") ) +// Driver is the Postgres database driver. type Driver struct{} +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. func (d *Driver) Open(name string) (driver.Conn, error) { return Open(name) } @@ -78,6 +82,8 @@ func (s transactionStatus) String() string { panic("not reached") } +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. type Dialer interface { Dial(network, address string) (net.Conn, error) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) @@ -149,11 +155,7 @@ func (cn *conn) handleDriverSettings(o values) (err error) { if err != nil { return err } - err = boolSetting("binary_parameters", &cn.binaryParameters) - if err != nil { - return err - } - return nil + return boolSetting("binary_parameters", &cn.binaryParameters) } func (cn *conn) handlePgpass(o values) { @@ -165,11 +167,16 @@ func (cn *conn) handlePgpass(o values) { if filename == "" { // XXX this code doesn't work on Windows where the default filename is // XXX %APPDATA%\postgresql\pgpass.conf - user, err := user.Current() - if err != nil { - return + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir } - filename = filepath.Join(user.HomeDir, ".pgpass") + filename = filepath.Join(userHome, ".pgpass") } fileinfo, err := os.Stat(filename) if err != nil { @@ -237,10 +244,14 @@ func (cn *conn) writeBuf(b byte) *writeBuf { } } +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. func Open(name string) (_ driver.Conn, err error) { return DialOpen(defaultDialer{}, name) } +// DialOpen opens a new connection to the database using a dialer. func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { // Handle any panics during connection initialization. Note that we // specifically do *not* want to use errRecover(), as that would turn any @@ -1431,7 +1442,8 @@ func (rs *rows) NextResultSet() error { // // tblname := "my_table" // data := "my_data" -// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go index 6d252ecee21..b2c3582c84a 100644 --- a/vendor/github.com/lib/pq/doc.go +++ b/vendor/github.com/lib/pq/doc.go @@ -11,7 +11,8 @@ using this package directly. For example: ) func main() { - db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) if err != nil { log.Fatal(err) } @@ -23,7 +24,8 @@ using this package directly. For example: You can also connect to a database using a URL. For example: - db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) Connection String Parameters @@ -43,21 +45,28 @@ supported: * dbname - The name of the database to connect to * user - The user to sign in as * password - The user's password - * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) * port - The port to bind to. (default is 5432) - * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) * fallback_application_name - An application_name to fall back to if one isn't provided. - * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. * sslcert - Cert file location. The file must contain PEM encoded data. * sslkey - Key file location. The file must contain PEM encoded data. - * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. Valid values for sslmode are: * disable - No SSL * require - Always SSL (skip verification) - * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) - * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING for more information about connection string parameters. @@ -68,7 +77,7 @@ Use single quotes for values that contain whitespace: A backslash will escape the next character in values: - "user=space\ man password='it\'s valid' + "user=space\ man password='it\'s valid'" Note that the connection parameter client_encoding (which sets the text encoding for the connection) may be set but must be "UTF8", @@ -129,7 +138,8 @@ This package returns the following types for values from the PostgreSQL backend: - integer types smallint, integer, and bigint are returned as int64 - floating-point types real and double precision are returned as float64 - character types char, varchar, and text are returned as string - - temporal types date, time, timetz, timestamp, and timestamptz are returned as time.Time + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time - the boolean type is returned as bool - the bytea type is returned as []byte @@ -229,7 +239,7 @@ for more information). Note that the channel name will be truncated to 63 bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at -http://godoc.org/github.com/lib/pq/listen_example. +http://godoc.org/github.com/lib/pq/examples/listen. */ package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 88a322cda82..3b0d365f296 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -367,8 +367,15 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro timeSep := daySep + 3 day := p.mustAtoi(str, daySep+1, timeSep) + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + var hour, minute, second int - if len(str) > monSep+len("01-01")+1 { + if len(str) > minLen { p.expect(str, ' ', timeSep) minSep := timeSep + 3 p.expect(str, ':', minSep) @@ -424,7 +431,8 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) } var isoYear int - if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + + if isBC { isoYear = 1 - year remainderIdx += 3 } else { diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 09f94244b9b..412c6ac1e2b 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -60,7 +60,7 @@ type ListenerConn struct { replyChan chan message } -// Creates a new ListenerConn. Use NewListener instead. +// NewListenerConn creates a new ListenerConn. Use NewListener instead. func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { return newDialListenerConn(defaultDialer{}, name, notificationChan) } @@ -214,17 +214,17 @@ func (l *ListenerConn) listenerConnMain() { // this ListenerConn is done } -// Send a LISTEN query to the server. See ExecSimpleQuery. +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Listen(channel string) (bool, error) { return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) } -// Send an UNLISTEN query to the server. See ExecSimpleQuery. +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Unlisten(channel string) (bool, error) { return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) } -// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. func (l *ListenerConn) UnlistenAll() (bool, error) { return l.ExecSimpleQuery("UNLISTEN *") } @@ -267,8 +267,8 @@ func (l *ListenerConn) sendSimpleQuery(q string) (err error) { return nil } -// Execute a "simple query" (i.e. one with no bindable parameters) on the -// connection. The possible return values are: +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: // 1) "executed" is true; the query was executed to completion on the // database server. If the query failed, err will be set to the error // returned by the database, otherwise err will be nil. @@ -333,6 +333,7 @@ func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { } } +// Close closes the connection. func (l *ListenerConn) Close() error { l.connectionLock.Lock() if l.err != nil { @@ -346,7 +347,7 @@ func (l *ListenerConn) Close() error { return l.cn.c.Close() } -// Err() returns the reason the connection was closed. It is not safe to call +// Err returns the reason the connection was closed. It is not safe to call // this function until l.Notify has been closed. func (l *ListenerConn) Err() error { return l.err @@ -354,32 +355,43 @@ func (l *ListenerConn) Err() error { var errListenerClosed = errors.New("pq: Listener has been closed") +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. var ErrChannelNotOpen = errors.New("pq: channel is not open") +// ListenerEventType is an enumeration of listener event types. type ListenerEventType int const ( - // Emitted only when the database connection has been initially - // initialized. err will always be nil. + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. ListenerEventConnected ListenerEventType = iota - // Emitted after a database connection has been lost, either because of an - // error or because Close has been called. err will be set to the reason - // the database connection was lost. + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. ListenerEventDisconnected - // Emitted after a database connection has been re-established after - // connection loss. err will always be nil. After this event has been - // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. ListenerEventReconnected - // Emitted after a connection to the database was attempted, but failed. - // err will be set to an error describing why the connection attempt did - // not succeed. + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. ListenerEventConnectionAttemptFailed ) +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. type EventCallbackType func(event ListenerEventType, err error) // Listener provides an interface for listening to notifications from a @@ -454,9 +466,9 @@ func NewDialListener(d Dialer, return l } -// Returns the notification channel for this listener. This is the same -// channel as Notify, and will not be recreated during the life time of the -// Listener. +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. func (l *Listener) NotificationChannel() <-chan *Notification { return l.Notify } @@ -639,7 +651,7 @@ func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notificatio // close and then return the error message from the connection, as // per ListenerConn's interface. if err != nil { - for _ = range notificationChan { + for range notificationChan { } doneChan <- cn.Err() return diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000000..7a512d67c2b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +Some more examples can be found in the "examples" folder. + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000000..95ec014e8cc --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000000..db1f5f20686 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,685 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000000..41de8b856c2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1684 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000000..84f84995517 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000000..81d05dfe573 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000000..f450791717b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000000..232313cc084 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,208 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000000..07448445582 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000000..5958822f9c6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000000..190362f25df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000000..bf18884e0e3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,357 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000000..3caeca0491b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/vendor.json b/vendor/vendor.json index b8f12b2e36c..2fb3bcc2f8d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -461,10 +461,10 @@ "revisionTime": "2017-02-10T14:05:23Z" }, { - "checksumSHA1": "RYMOEINLFNWIJk8aKNifSlPhg9U=", + "checksumSHA1": "3HVfwgLpCDH8JX211UWdrSi/GU4=", "path": "github.com/lib/pq", - "revision": "23da1db4f16d9658a86ae9b717c245fc078f10f1", - "revisionTime": "2017-09-18T17:50:43Z" + "revision": "b609790bd85edf8e9ab7e0f8912750a786177bcf", + "revisionTime": "2017-10-22T19:20:43Z" }, { "checksumSHA1": "jaCQF1par6Jl8g+V2Cgp0n/0wSc=", @@ -712,6 +712,12 @@ "path": "gopkg.in/macaron.v1", "revision": "a325110f8b392bce3e5cdeb8c44bf98078ada3be", "revisionTime": "2017-02-13T09:12:08Z" + }, + { + "checksumSHA1": "RDJpJQwkF012L6m/2BJizyOksNw=", + "path": "gopkg.in/yaml.v2", + "revision": "eb3733d160e74a9c7e442f435eb3bea458e1d19f", + "revisionTime": "2017-08-12T16:00:11Z" } ], "rootPath": "github.com/grafana/grafana"