Skip to content

Commit

Permalink
Pull request #94: Upgrade Go dependencies, update linter & releaser
Browse files Browse the repository at this point in the history
Merge in PRODUCT/squirreldb from upgrade-deps-2024-08-27 to main

* commit 'a8330a9e7cee73c67743cf3e31f37f956df908cc':
  Tidy go mod, please linters
  Prevent new metric IDs from overflowing int64
  Upgrade replaced dependency
  Update releaser
  Update & please linter
  Upgrade Go dependencies
  • Loading branch information
Thomas Delbende authored and cavanierc committed Sep 10, 2024
2 parents e441433 + a8330a9 commit be36031
Show file tree
Hide file tree
Showing 25 changed files with 1,290 additions and 197 deletions.
1 change: 1 addition & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ linters:
- interfacebloat # Warn when an interface has too many methods, not useful.
# Disable deprecated and replaced linter.
- execinquery
- exportloopref
# We use maintidx to lint function complexity.
- gocyclo
- cyclop
Expand Down
2 changes: 1 addition & 1 deletion .goreleaser.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ version: 2
project_name: squirreldb
snapshot:
# Our version is based on the date.
name_template: '{{ .Env.SQUIRRELDB_VERSION }}'
version_template: '{{ .Env.SQUIRRELDB_VERSION }}'
before:
hooks:
- go mod download
Expand Down
11 changes: 11 additions & 0 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ const (
remoteReadMaxBytesInFrame = 1048576 // 1 MiB (Prometheus default)
)

var allowedProtoMsgs = []config.RemoteWriteProtoMsg{ //nolint:gochecknoglobals
config.RemoteWriteProtoMsgV1,
config.RemoteWriteProtoMsgV2,
}

var (
regexErrInvalidMatcher = regexp.MustCompile(fmt.Sprintf("^%s", remotestorage.ErrInvalidMatcher))
regexErrMissingTenantHeader = regexp.MustCompile(fmt.Sprintf("^%s", remotestorage.ErrMissingTenantHeader))
Expand Down Expand Up @@ -163,6 +168,7 @@ func NewPrometheus(
metricRegistry,
nil,
rwEnabled,
allowedProtoMsgs,
otlpEnabled,
)

Expand Down Expand Up @@ -1079,6 +1085,11 @@ type interceptor struct {

func (i *interceptor) WriteHeader(rc int) {
i.status = rc

if rc == http.StatusNoContent {
// Write it now, because the Write() method won't be called.
i.OrigWriter.WriteHeader(rc)
}
}

func (i *interceptor) Write(b []byte) (int, error) {
Expand Down
12 changes: 7 additions & 5 deletions api/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"github.com/bleemeo/squirreldb/dummy"
"github.com/bleemeo/squirreldb/logger"
"github.com/bleemeo/squirreldb/types"

"github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
Expand Down Expand Up @@ -411,21 +410,23 @@ func TestWriteHandler(t *testing.T) {
{Name: tenantLabelName, Value: tenantValue},
{Name: "__name__", Value: "na-me"},
},
expectStatus: http.StatusOK,
expectStatus: http.StatusNoContent,
expectedMetricsCount: 0,
},
{
name: "invalid-label-name",
labels: []prompb.Label{
{Name: tenantLabelName, Value: tenantValue},
{Name: "__name__", Value: "name"},
{Name: "la-bel", Value: "val"},
},
expectStatus: http.StatusOK,
expectStatus: http.StatusNoContent,
expectedMetricsCount: 0,
},
{
name: "missing-tenant-header",
labels: []prompb.Label{
{Name: "__name__", Value: "name"},
{Name: "label", Value: "value"},
},
expectStatus: http.StatusBadRequest,
Expand All @@ -437,9 +438,10 @@ func TestWriteHandler(t *testing.T) {
name: "invalid-mutable-label",
labels: []prompb.Label{
{Name: tenantLabelName, Value: tenantValue},
{Name: "__name__", Value: "name"},
{Name: "group", Value: "my_group"},
},
expectStatus: http.StatusOK,
expectStatus: http.StatusNoContent,
expectedMetricsCount: 1,
absentMatchers: []*labels.Matcher{
{Name: "group", Value: "my_group"},
Expand Down Expand Up @@ -481,7 +483,7 @@ func TestWriteHandler(t *testing.T) {
true,
reg,
)
writeHandler := remote.NewWriteHandler(log.NewLogfmtLogger(os.Stderr), reg, appendable)
writeHandler := remote.NewWriteHandler(log.NewLogfmtLogger(os.Stderr), reg, appendable, allowedProtoMsgs)

now := time.Now()
wr := &prompb.WriteRequest{
Expand Down
2 changes: 1 addition & 1 deletion api/promql/limiting_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func (idx *limitingIndex) Search(
return r, err //nolint:wrapcheck
}

totalSeries := atomic.AddUint32(idx.returnedSeries, uint32(r.Count()))
totalSeries := atomic.AddUint32(idx.returnedSeries, uint32(r.Count())) //nolint:gosec
if idx.maxTotalSeries != 0 && totalSeries > idx.maxTotalSeries {
return &dummy.MetricsLabel{}, errors.New("too many series evaluated by this PromQL")
}
Expand Down
6 changes: 3 additions & 3 deletions api/promql/queryable.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (s Store) makePerRequestData(r *http.Request) (perRequest, error) {
return perRequest{}, err
}

maxEvaluatedSeries = uint32(tmp)
maxEvaluatedSeries = uint32(tmp) //nolint:gosec
}

limitIndex := &limitingIndex{
Expand Down Expand Up @@ -379,7 +379,7 @@ func (q *querier) Select(ctx context.Context, sortSeries bool, hints *storage.Se

// LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier.
func (q *querier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { //nolint: lll
func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { //nolint: lll
minT := time.UnixMilli(q.mint)
maxT := time.UnixMilli(q.maxt)

Expand All @@ -394,7 +394,7 @@ func (q *querier) LabelValues(ctx context.Context, name string, matchers ...*lab
}

// LabelNames returns all the unique label names present in the block in sorted order.
func (q *querier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { //nolint: lll
func (q *querier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { //nolint: lll
minT := time.UnixMilli(q.mint)
maxT := time.UnixMilli(q.maxt)

Expand Down
20 changes: 10 additions & 10 deletions api/remotestorage/remotestorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,23 +137,23 @@ func (r *RemoteStorage) metricsFromTimeSeries(
requests := make([]types.LookupRequest, 0, len(pendingTimeSeries))

for _, promSeries := range pendingTimeSeries {
min := int64(math.MaxInt64)
max := int64(math.MinInt64)
tMin := int64(math.MaxInt64)
tMax := int64(math.MinInt64)

for _, s := range promSeries.Samples {
if min > s.Timestamp {
min = s.Timestamp
if tMin > s.Timestamp {
tMin = s.Timestamp
}

if max < s.Timestamp {
max = s.Timestamp
if tMax < s.Timestamp {
tMax = s.Timestamp
}
}

if min < time.Now().Add(-tsdb.MaxPastDelay).Unix()*1000 {
if tMin < time.Now().Add(-tsdb.MaxPastDelay).Unix()*1000 {
r.lastLogPointInPastLock.Lock()
if time.Since(r.lastLogPointInPastAt) > pointInPastLogPeriod {
log.Warn().Msgf("Points with timestamp %v will be ignored by pre-aggregation", time.Unix(min/1000, 0))
log.Warn().Msgf("Points with timestamp %v will be ignored by pre-aggregation", time.Unix(tMin/1000, 0))
r.lastLogPointInPastAt = time.Now()
}
r.lastLogPointInPastLock.Unlock()
Expand All @@ -162,8 +162,8 @@ func (r *RemoteStorage) metricsFromTimeSeries(
requests = append(requests, types.LookupRequest{
Labels: promSeries.Labels,
TTLSeconds: timeToLiveSeconds,
End: time.Unix(max/1000, max%1000),
Start: time.Unix(min/1000, min%1000),
End: time.Unix(tMax/1000, tMax%1000),
Start: time.Unix(tMin/1000, tMin%1000),
})
}

Expand Down
12 changes: 6 additions & 6 deletions batch/batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1983,17 +1983,17 @@ func TestBatch_write(t *testing.T) { //nolint:maintidx

func Test_randomDuration(t *testing.T) {
target := 50 * time.Millisecond
min := 40 * time.Millisecond
max := 60 * time.Millisecond
tMin := 40 * time.Millisecond
tMax := 60 * time.Millisecond

for range 100 {
got := randomDuration(target)
if got < min {
t.Errorf("randomDuration() = %v, want >= %v", got, min)
if got < tMin {
t.Errorf("randomDuration() = %v, want >= %v", got, tMin)
}

if max < got {
t.Errorf("randomDuration() = %v, want <= %v", got, max)
if tMax < got {
t.Errorf("randomDuration() = %v, want <= %v", got, tMax)
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ set -e
USER_UID=$(id -u)

# Should be the same as run-tests.sh
GORELEASER_VERSION="v2.1.0"
GORELEASER_VERSION="v2.2.0"

case "$1" in
"")
Expand Down
4 changes: 2 additions & 2 deletions cassandra/index/bulk_deleted.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ func (d *deleter) Delete(ctx context.Context) error {
allDeleteIDs := roaring.NewBitmap(d.deleteIDs...)

for _, shard := range shards.Slice() {
shard := int32(shard)
shard := int32(shard) //nolint:gosec

it := maybePresent[shard]
if it == nil || !it.Any() {
Expand Down Expand Up @@ -202,7 +202,7 @@ func (d *deleter) Delete(ctx context.Context) error {
concurrentDelete,
func(ctx context.Context, work chan<- func() error) error {
for _, id := range d.deleteIDs {
id := types.MetricID(id)
id := types.MetricID(id) //nolint:gosec
task := func() error {
err := d.c.store.DeleteID2Labels(ctx, id)
if err != nil && !errors.Is(err, gocql.ErrNotFound) {
Expand Down
2 changes: 1 addition & 1 deletion cassandra/index/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func (c *labelsLookupCache) Drop(ids []uint64) int {
defer c.l.Unlock()

for _, id := range ids {
delete(c.cache, types.MetricID(id))
delete(c.cache, types.MetricID(id)) //nolint:gosec
}

return len(c.cache)
Expand Down
Loading

0 comments on commit be36031

Please sign in to comment.