Skip to content

Commit

Permalink
gosec: handle integer conversions and potential overflows
Browse files Browse the repository at this point in the history
  • Loading branch information
oxzi committed Oct 9, 2024
1 parent 28b06b7 commit 4ab50de
Show file tree
Hide file tree
Showing 5 changed files with 34 additions and 21 deletions.
2 changes: 1 addition & 1 deletion cmd/icingadb-migrate/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ func convertDowntimeRows(
Author: row.AuthorName,
Comment: row.CommentData,
IsFlexible: types.Bool{Bool: row.IsFixed == 0, Valid: true},
FlexibleDuration: uint64(row.Duration) * 1000,
FlexibleDuration: uint64(max(row.Duration, 0)) * 1000, // #nosec G115 -- IDO duration is a BIGINT; shouldn't be negative
ScheduledStartTime: scheduledStart,
ScheduledEndTime: scheduledEnd,
StartTime: startTime,
Expand Down
11 changes: 7 additions & 4 deletions pkg/icingadb/cleanup.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func (stmt *CleanupStmt) CleanupOlderThan(
defer db.Log(ctx, q, &counter).Stop()

for {
var rowsDeleted int64
var rowsDeleted uint64

err := retry.WithBackoff(
ctx,
Expand All @@ -45,7 +45,10 @@ func (stmt *CleanupStmt) CleanupOlderThan(
return database.CantPerformQuery(err, q)
}

rowsDeleted, err = rs.RowsAffected()
i, err := rs.RowsAffected()
if err != nil && i >= 0 {
rowsDeleted = uint64(i)
}

return err
},
Expand All @@ -57,15 +60,15 @@ func (stmt *CleanupStmt) CleanupOlderThan(
return 0, err
}

counter.Add(uint64(rowsDeleted))
counter.Add(rowsDeleted)

for _, onSuccess := range onSuccess {
if err := onSuccess(ctx, make([]struct{}, rowsDeleted)); err != nil {
return 0, err
}
}

if rowsDeleted < int64(count) {
if rowsDeleted < count {
break
}
}
Expand Down
18 changes: 9 additions & 9 deletions pkg/icingadb/delta_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,14 +208,14 @@ func testDeltaVerifyResult(t *testing.T, name string, expected map[uint64]uint64
}

func BenchmarkDelta(b *testing.B) {
for n := 1 << 10; n <= 1<<20; n <<= 1 {
b.Run(strconv.Itoa(n), func(b *testing.B) {
for n := uint64(1 << 10); n <= 1<<20; n <<= 1 {
b.Run(strconv.FormatUint(n, 10), func(b *testing.B) {
benchmarkDelta(b, n)
})
}
}

func benchmarkDelta(b *testing.B, numEntities int) {
func benchmarkDelta(b *testing.B, numEntities uint64) {
chActual := make([]chan database.Entity, b.N)
chDesired := make([]chan database.Entity, b.N)
for i := 0; i < b.N; i++ {
Expand All @@ -231,20 +231,20 @@ func benchmarkDelta(b *testing.B, numEntities int) {
binary.BigEndian.PutUint64(e.PropertiesChecksum, checksum)
return e
}
for i := 0; i < numEntities; i++ {
for i := uint64(0); i < numEntities; i++ {
// each iteration writes exactly one entity to each channel
var eActual, eDesired database.Entity
switch i % 3 {
case 0: // distinct IDs
eActual = makeEndpoint(1, uint64(i), uint64(i))
eDesired = makeEndpoint(2, uint64(i), uint64(i))
eActual = makeEndpoint(1, i, i)
eDesired = makeEndpoint(2, i, i)
case 1: // same ID, same checksum
e := makeEndpoint(3, uint64(i), uint64(i))
e := makeEndpoint(3, i, i)
eActual = e
eDesired = e
case 2: // same ID, different checksum
eActual = makeEndpoint(4, uint64(i), uint64(i))
eDesired = makeEndpoint(4, uint64(i), uint64(i+1))
eActual = makeEndpoint(4, i, i)
eDesired = makeEndpoint(4, i, i+1)
}
for _, ch := range chActual {
ch <- eActual
Expand Down
17 changes: 16 additions & 1 deletion pkg/icingadb/history/retention.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/icinga/icingadb/pkg/icingaredis/telemetry"
"github.com/pkg/errors"
"go.uber.org/zap"
"math"
"time"
)

Expand Down Expand Up @@ -172,6 +173,20 @@ func (r *Retention) Start(ctx context.Context) error {
r.logger.Debugf("Skipping history retention for category %s", stmt.Category)
continue
}
// #nosec G115 -- First if-clause checks int overflow
if days > math.MaxInt || time.Now().AddDate(0, 0, -int(days)).Before(time.Unix(0, 0)) {
// Ensure that the time cannot overflow.
//
// First, make sure that days do not exceed max int, as otherwise casts will overflow and later subtractions
// may overflow as well. By being capped to [0, max int), for each current timestamp t, t-days > min int.
// As the database schema uses an unsigned timestamp, the second check ensures that the relative offset,
// later called olderThan, does not overflow.
//
// For the periodic.Start function below, time marches forward while the value of days is static.
// Subtracting days from an increasing timestamp will also not result in a negative or overflowing value.
r.logger.Errorf("Skipping history retention for category %s as %d days will overflow", stmt.Category, days)
continue
}

r.logger.Debugw(
fmt.Sprintf("Starting history retention for category %s", stmt.Category),
Expand All @@ -182,7 +197,7 @@ func (r *Retention) Start(ctx context.Context) error {

stmt := stmt
periodic.Start(ctx, r.interval, func(tick periodic.Tick) {
olderThan := tick.Time.AddDate(0, 0, -int(days))
olderThan := tick.Time.AddDate(0, 0, -int(days)) // #nosec G115 -- int overflow checked above

r.logger.Debugf("Cleaning up historical data for category %s from table %s older than %s",
stmt.Category, stmt.Table, olderThan)
Expand Down
7 changes: 1 addition & 6 deletions pkg/icingadb/types/notification_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,12 @@ type NotificationType uint16
func (nt *NotificationType) UnmarshalText(text []byte) error {
s := string(text)

i, err := strconv.ParseUint(s, 10, 64)
i, err := strconv.ParseUint(s, 10, 16)
if err != nil {
return types.CantParseUint64(err, s)
}

n := NotificationType(i)
if uint64(n) != i {
// Truncated due to above cast, obviously too high
return badNotificationType(s)
}

if _, ok := notificationTypes[n]; !ok {
return badNotificationType(s)
}
Expand Down

0 comments on commit 4ab50de

Please sign in to comment.