CloudWatch/Logs: Fix query error when results are sparse (#24702)

This commit is contained in:
Andrej Ocenas 2020-05-14 20:36:06 +02:00 committed by GitHub
parent bf1e5aa16c
commit bc8c05137b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 41 additions and 7 deletions

View File

@ -8,13 +8,33 @@ import (
)
func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*data.Frame, error) {
nonEmptyRows := make([][]*cloudwatchlogs.ResultField, 0)
// Sometimes CloudWatch can send empty rows
for _, row := range response.Results {
if len(row) == 0 {
continue
}
if len(row) == 1 {
if row[0].Value == nil {
continue
}
// Sometimes it sends row with only timestamp
if _, err := time.Parse(cloudWatchTSFormat, *row[0].Value); err == nil {
continue
}
}
nonEmptyRows = append(nonEmptyRows, row)
}
rowCount := len(nonEmptyRows)
fieldValues := make(map[string]interface{})
// Maintaining a list of field names in the order returned from CloudWatch
// as just iterating over fieldValues would not give a consistent order
fieldNames := make([]*string, 0)
for _, row := range response.Results {
for i, row := range nonEmptyRows {
for _, resultField := range row {
// Strip @ptr field from results as it's not needed
if *resultField.Field == "@ptr" {
@ -26,9 +46,9 @@ func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*d
// Check if field is time field
if _, err := time.Parse(cloudWatchTSFormat, *resultField.Value); err == nil {
fieldValues[*resultField.Field] = make([]*time.Time, 0)
fieldValues[*resultField.Field] = make([]*time.Time, rowCount)
} else {
fieldValues[*resultField.Field] = make([]*string, 0)
fieldValues[*resultField.Field] = make([]*string, rowCount)
}
}
@ -38,9 +58,9 @@ func logsResultsToDataframes(response *cloudwatchlogs.GetQueryResultsOutput) (*d
return nil, err
}
fieldValues[*resultField.Field] = append(timeField, &parsedTime)
timeField[i] = &parsedTime
} else {
fieldValues[*resultField.Field] = append(fieldValues[*resultField.Field].([]*string), resultField.Value)
fieldValues[*resultField.Field].([]*string)[i] = resultField.Value
}
}
}
@ -112,7 +132,11 @@ func groupResults(results *data.Frame, groupingFieldNames []string) ([]*data.Fra
func generateGroupKey(fields []*data.Field, row int) string {
groupKey := ""
for _, field := range fields {
groupKey += *field.At(row).(*string)
if strField, ok := field.At(row).(*string); ok {
if strField != nil {
groupKey += *strField
}
}
}
return groupKey

View File

@ -80,6 +80,13 @@ func TestLogsResultsToDataframes(t *testing.T) {
},
// Sometimes cloudwatch returns empty row
{},
// or rows with only timestamp
{
&cloudwatchlogs.ResultField{
Field: aws.String("@timestamp"),
Value: aws.String("2020-03-02 17:04:05.000"),
},
},
{
&cloudwatchlogs.ResultField{
Field: aws.String("@ptr"),
@ -205,20 +212,23 @@ func TestGroupKeyGeneration(t *testing.T) {
aws.String("fakelog-a"),
aws.String("fakelog-b"),
aws.String("fakelog-c"),
nil,
})
streamField := data.NewField("stream", data.Labels{}, []*string{
aws.String("stream-a"),
aws.String("stream-b"),
aws.String("stream-c"),
aws.String("stream-d"),
})
fakeFields := []*data.Field{logField, streamField}
expectedKeys := []string{"fakelog-astream-a", "fakelog-bstream-b", "fakelog-cstream-c"}
expectedKeys := []string{"fakelog-astream-a", "fakelog-bstream-b", "fakelog-cstream-c", "stream-d"}
assert.Equal(t, expectedKeys[0], generateGroupKey(fakeFields, 0))
assert.Equal(t, expectedKeys[1], generateGroupKey(fakeFields, 1))
assert.Equal(t, expectedKeys[2], generateGroupKey(fakeFields, 2))
assert.Equal(t, expectedKeys[3], generateGroupKey(fakeFields, 3))
}
func TestGroupingResults(t *testing.T) {