grafana/pkg/services/ngalert/api/util.go

233 lines
7.8 KiB
Go
Raw Normal View History

package api
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"gopkg.in/yaml.v3"
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/services/accesscontrol"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/datasourceproxy"
"github.com/grafana/grafana/pkg/services/datasources"
Inhouse alerting api (#33129) * init * autogens AM route * POST dashboards/db spec * POST alert-notifications spec * fix description * re inits vendor, updates grafana to master * go mod updates * alerting routes * renames to receivers * prometheus endpoints * align config endpoint with cortex, include templates * Change grafana receiver type * Update receivers.go * rename struct to stop swagger thrashing * add rules API * index html * standalone swagger ui html page * Update README.md * Expose GrafanaManagedAlert properties * Some fixes - /api/v1/rules/{Namespace} should return a map - update ExtendedUpsertAlertDefinitionCommand properties * am alerts routes * rename prom swagger section for clarity, remove example endpoints * Add missing json and yaml tags * folder perms * make folders POST again * fix grafana receiver type * rename fodler->namespace for perms * make ruler json again * PR fixes * silences * fix Ok -> Ack * Add id to POST /api/v1/silences (#9) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * Add POST /api/v1/alerts (#10) Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in> * fix silences * Add testing endpoints * removes grpc replace directives * [wip] starts validation * pkg cleanup * go mod tidy * ignores vendor dir * Change response type for Cortex/Loki alerts * receiver unmarshaling tests * ability to split routes between AM & Grafana * api marshaling & validation * begins work on routing lib * [hack] ignores embedded field in generation * path specific datasource for alerting * align endpoint names with cloud * single route per Alerting config * removes unused routing pkg * regens spec * adds datasource param to ruler/prom route paths * Modifications for supporting migration * Apply suggestions from code review * hack for cleaning circular refs in swagger definition * generates files * minor fixes for prom endpoints * decorate prom apis with required: true where applicable * Revert "generates files" This reverts commit ef7e97558477d79bcad416e043b04dbd04a2c8f7. * removes server autogen * Update imported structs from ngalert * Fix listing rules response * Update github.com/prometheus/common dependency * Update get silence response * Update get silences response * adds ruler validation & backend switching * Fix GET /alertmanager/{DatasourceId}/config/api/v1/alerts response * Distinct gettable and postable grafana receivers * Remove permissions routes * Latest JSON specs * Fix testing routes * inline yaml annotation on apirulenode * yaml test & yamlv3 + comments * Fix yaml annotations for embedded type * Rename DatasourceId path parameter * Implement Backend.String() * backend zero value is a real backend * exports DiscoveryBase * Fix GO initialisms * Silences: Use PostableSilence as the base struct for creating silences * Use type alias instead of struct embedding * More fixes to alertmanager silencing routes * post and spec JSONs * Split rule config to postable/gettable * Fix empty POST /silences payload Recreating the generated JSON specs fixes the issue without further modifications * better yaml unmarshaling for nested yaml docs in cortex-am configs * regens spec * re-adds config.receivers * omitempty to align with prometheus API behavior * Prefix routes with /api * Update Alertmanager models * Make adjustments to follow the Alertmanager API * ruler: add for and annotations to grafana alert (#45) * Modify testing API routes * Fix grafana rule for field type * Move PostableUserConfig validation to this library * Fix PostableUserConfig YAML encoding/decoding * Use common fields for grafana and lotex rules * Add namespace id in GettableGrafanaRule * Apply suggestions from code review * fixup * more changes * Apply suggestions from code review * aligns structure pre merge * fix new imports & tests * updates tooling readme * goimports * lint * more linting!! * revive lint Co-authored-by: Sofia Papagiannaki <papagian@gmail.com> Co-authored-by: Domas <domasx2@gmail.com> Co-authored-by: Sofia Papagiannaki <papagian@users.noreply.github.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Co-authored-by: gotjosh <josue@grafana.com> Co-authored-by: David Parrott <stomp.box.yo@gmail.com> Co-authored-by: Kyle Brandt <kyle@grafana.com>
2021-04-19 13:26:04 -05:00
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/web"
)
var searchRegex = regexp.MustCompile(`\{(\w+)\}`)
func toMacaronPath(path string) string {
return string(searchRegex.ReplaceAllFunc([]byte(path), func(s []byte) []byte {
m := string(s[1 : len(s)-1])
return []byte(fmt.Sprintf(":%s", m))
}))
}
func getDatasourceByUID(ctx *contextmodel.ReqContext, cache datasources.CacheService, expectedType apimodels.Backend) (*datasources.DataSource, error) {
datasourceUID := web.Params(ctx.Req)[":DatasourceUID"]
Caching: Refactor enterprise query caching middleware to a wire service (#65616) * define initial service and add to wire * update caching service interface * add skipQueryCache header handler and update metrics query function to use it * add caching service as a dependency to query service * working caching impl * propagate cache status to frontend in response * beginning of improvements suggested by Lean - separate caching logic from query logic. * more changes to simplify query function * Decided to revert renaming of function * Remove error status from cache request * add extra documentation * Move query caching duration metric to query package * add a little bit of documentation * wip: convert resource caching * Change return type of query service QueryData to a QueryDataResponse with Headers * update codeowners * change X-Cache value to const * use resource caching in endpoint handlers * write resource headers to response even if it's not a cache hit * fix panic caused by lack of nil check * update unit test * remove NONE header - shouldn't show up in OSS * Convert everything to use the plugin middleware * revert a few more things * clean up unused vars * start reverting resource caching, start to implement in plugin middleware * revert more, fix typo * Update caching interfaces - resource caching now has a separate cache method * continue wiring up new resource caching conventions - still in progress * add more safety to implementation * remove some unused objects * remove some code that I left in by accident * add some comments, fix codeowners, fix duplicate registration * fix source of panic in resource middleware * Update client decorator test to provide an empty response object * create tests for caching middleware * fix unit test * Update pkg/services/caching/service.go Co-authored-by: Arati R. <33031346+suntala@users.noreply.github.com> * improve error message in error log * quick docs update * Remove use of mockery. Update return signature to return an explicit hit/miss bool * create unit test for empty request context * rename caching metrics to make it clear they pertain to caching * Update pkg/services/pluginsintegration/clientmiddleware/caching_middleware.go Co-authored-by: Marcus Efraimsson <marcus.efraimsson@gmail.com> * Add clarifying comments to cache skip middleware func * Add comment pointing to the resource cache update call * fix unit tests (missing dependency) * try to fix mystery syntax error * fix a panic * Caching: Introduce feature toggle to caching service refactor (#66323) * introduce new feature toggle * hide calls to new service behind a feature flag * remove licensing flag from toggle (misunderstood what it was for) * fix unit tests * rerun toggle gen --------- Co-authored-by: Arati R. <33031346+suntala@users.noreply.github.com> Co-authored-by: Marcus Efraimsson <marcus.efraimsson@gmail.com>
2023-04-12 11:30:33 -05:00
ds, err := cache.GetDatasourceByUID(ctx.Req.Context(), datasourceUID, ctx.SignedInUser, ctx.SkipDSCache)
if err != nil {
return nil, err
}
switch expectedType {
case apimodels.AlertmanagerBackend:
if ds.Type != "alertmanager" {
return nil, unexpectedDatasourceTypeError(ds.Type, "alertmanager")
}
case apimodels.LoTexRulerBackend:
if ds.Type != "loki" && ds.Type != "prometheus" {
return nil, unexpectedDatasourceTypeError(ds.Type, "loki, prometheus")
}
default:
return nil, unexpectedDatasourceTypeError(ds.Type, expectedType.String())
}
return ds, nil
}
// macaron unsafely asserts the http.ResponseWriter is an http.CloseNotifier, which will panic.
// Here we impl it, which will ensure this no longer happens, but neither will we take
// advantage cancelling upstream requests when the downstream has closed.
// NB: http.CloseNotifier is a deprecated ifc from before the context pkg.
type safeMacaronWrapper struct {
http.ResponseWriter
}
func (w *safeMacaronWrapper) CloseNotify() <-chan bool {
return make(chan bool)
}
// createProxyContext creates a new request context that is provided down to the data source proxy.
// The request context
// 1. overwrites the underlying response writer used by a *contextmodel.ReqContext because AlertingProxy needs to intercept
// the response from the data source to analyze it and probably change
// 2. elevates the current user permissions to Editor if both conditions are met: RBAC is enabled, user does not have Editor role.
// This is needed to bypass the plugin authorization, which still relies on the legacy roles.
// This elevation can be considered safe because all upstream calls are protected by the RBAC on web request router level.
func (p *AlertingProxy) createProxyContext(ctx *contextmodel.ReqContext, request *http.Request, response *response.NormalResponse) *contextmodel.ReqContext {
cpy := *ctx
cpyMCtx := *cpy.Context
cpyMCtx.Resp = web.NewResponseWriter(ctx.Req.Method, &safeMacaronWrapper{response})
cpy.Context = &cpyMCtx
cpy.Req = request
// If RBAC is enabled, the actions are checked upstream and if the user gets here then it is allowed to do an action against a datasource.
// Some data sources require legacy Editor role in order to perform mutating operations. In this case, we elevate permissions for the context that we
// will provide downstream.
// TODO (yuri) remove this after RBAC for plugins is implemented
if !ctx.SignedInUser.HasRole(org.RoleEditor) {
newUser := *ctx.SignedInUser
newUser.OrgRole = org.RoleEditor
cpy.SignedInUser = &newUser
}
return &cpy
}
type AlertingProxy struct {
DataProxy *datasourceproxy.DataSourceProxyService
ac accesscontrol.AccessControl
}
// withReq proxies a different request
func (p *AlertingProxy) withReq(
ctx *contextmodel.ReqContext,
method string,
u *url.URL,
body io.Reader,
extractor func(*response.NormalResponse) (any, error),
headers map[string]string,
) response.Response {
req, err := http.NewRequest(method, u.String(), body)
if err != nil {
return ErrResp(http.StatusBadRequest, err, "")
}
for h, v := range headers {
req.Header.Add(h, v)
}
// this response will be populated by the response from the datasource
resp := response.CreateNormalResponse(make(http.Header), nil, 0)
proxyContext := p.createProxyContext(ctx, req, resp)
datasourceID := web.Params(ctx.Req)[":DatasourceID"]
if datasourceID != "" {
recipient, err := strconv.ParseInt(web.Params(ctx.Req)[":DatasourceID"], 10, 64)
if err != nil {
return ErrResp(http.StatusBadRequest, err, "DatasourceID is invalid")
}
p.DataProxy.ProxyDatasourceRequestWithID(proxyContext, recipient)
} else {
datasourceUID := web.Params(ctx.Req)[":DatasourceUID"]
if datasourceUID == "" {
return ErrResp(http.StatusBadRequest, err, "DatasourceUID is empty")
}
p.DataProxy.ProxyDatasourceRequestWithUID(proxyContext, datasourceUID)
}
status := resp.Status()
if status >= 400 {
errMessage := string(resp.Body())
// if Content-Type is application/json
// and it is successfully decoded and contains a message
// return this as response error message
if strings.HasPrefix(resp.Header().Get("Content-Type"), "application/json") {
var m map[string]any
if err := json.Unmarshal(resp.Body(), &m); err == nil {
if message, ok := m["message"]; ok {
errMessageStr, isString := message.(string)
if isString {
errMessage = errMessageStr
}
}
}
} else if strings.HasPrefix(resp.Header().Get("Content-Type"), "text/html") {
// if Content-Type is text/html
// do not return the body
errMessage = "redacted html"
}
return ErrResp(status, errors.New(errMessage), "")
}
t, err := extractor(resp)
if err != nil {
return ErrResp(http.StatusInternalServerError, err, "")
}
b, err := json.Marshal(t)
if err != nil {
return ErrResp(http.StatusInternalServerError, err, "")
}
return response.JSON(status, b)
}
func yamlExtractor(v any) func(*response.NormalResponse) (any, error) {
return func(resp *response.NormalResponse) (any, error) {
contentType := resp.Header().Get("Content-Type")
if !strings.Contains(contentType, "yaml") {
return nil, fmt.Errorf("unexpected content type from upstream. expected YAML, got %v", contentType)
}
decoder := yaml.NewDecoder(bytes.NewReader(resp.Body()))
decoder.KnownFields(true)
err := decoder.Decode(v)
return v, err
}
}
func jsonExtractor(v any) func(*response.NormalResponse) (any, error) {
if v == nil {
// json unmarshal expects a pointer
v = &map[string]any{}
}
return func(resp *response.NormalResponse) (any, error) {
contentType := resp.Header().Get("Content-Type")
if !strings.Contains(contentType, "json") {
return nil, fmt.Errorf("unexpected content type from upstream. expected JSON, got %v", contentType)
}
return v, json.Unmarshal(resp.Body(), v)
}
}
func messageExtractor(resp *response.NormalResponse) (any, error) {
return map[string]string{"message": string(resp.Body())}, nil
}
// ErrorResp creates a response with a visible error
func ErrResp(status int, err error, msg string, args ...any) *response.NormalResponse {
if msg != "" {
formattedMsg := fmt.Sprintf(msg, args...)
err = fmt.Errorf("%s: %w", formattedMsg, err)
}
return response.Error(status, err.Error(), err)
}
// accessForbiddenResp creates a response of forbidden access.
func accessForbiddenResp() response.Response {
//nolint:stylecheck // Grandfathered capitalization of error.
return ErrResp(http.StatusForbidden, errors.New("Permission denied"), "")
}
func containsProvisionedAlerts(provenances map[string]ngmodels.Provenance, rules []*ngmodels.AlertRule) bool {
if len(provenances) == 0 {
return false
}
for _, rule := range rules {
provenance, ok := provenances[rule.UID]
if ok && provenance != ngmodels.ProvenanceNone {
return true
}
}
return false
}