Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

server: fix unexported-return lint issue #19052

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion server/auth/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ func (as *authStore) IsAuthEnabled() bool {
}

// NewAuthStore creates a new AuthStore.
func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) *authStore {
func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) AuthStore {
if lg == nil {
lg = zap.NewNop()
}
Expand Down
10 changes: 7 additions & 3 deletions server/auth/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,12 +116,16 @@ func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testin

// The UserAdd function cannot generate old etcd version user data (user's option is nil)
// add special users through the underlying interface
addUserWithNoOption(as)
asImpl, ok := as.(*authStore)
if !ok {
t.Fatal(errors.New("addUserWithNoOption: needs an AuthStore implementation"))
}
addUserWithNoOption(asImpl)

tearDown := func(_ *testing.T) {
as.Close()
}
return as, tearDown
return asImpl, tearDown
}

func addUserWithNoOption(as *authStore) {
Expand All @@ -136,7 +140,7 @@ func addUserWithNoOption(as *authStore) {
as.refreshRangePermCache(tx)
}

func enableAuthAndCreateRoot(as *authStore) error {
func enableAuthAndCreateRoot(as AuthStore) error {
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", HashedPassword: encodePassword("root"), Options: &authpb.UserAddOptions{NoPassword: false}})
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion server/etcdserver/adapters.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type serverVersionAdapter struct {
*EtcdServer
}

func NewServerVersionAdapter(s *EtcdServer) *serverVersionAdapter {
func NewServerVersionAdapter(s *EtcdServer) serverversion.Server {
return &serverVersionAdapter{
EtcdServer: s,
}
Expand Down
14 changes: 13 additions & 1 deletion server/etcdserver/apply/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,18 @@ type Result struct {

type applyFunc func(r *pb.InternalRaftRequest) *Result

// ApplierMembership defines the applier membership interface.
type ApplierMembership interface {
// ClusterVersionSet sets the version of the cluster.
ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3)

// ClusterMemberAttrSet sets a cluster member's attributes, if the member is not removed.
ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3)

// DowngradeInfoSet sets the downgrade info.
DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3)
}

// applierV3 is the interface for processing V3 raft messages
type applierV3 interface {
// Apply executes the generic portion of application logic for the current applier, but
Expand Down Expand Up @@ -403,7 +415,7 @@ type applierMembership struct {
snapshotServer SnapshotServer
}

func NewApplierMembership(lg *zap.Logger, cluster *membership.RaftCluster, snapshotServer SnapshotServer) *applierMembership {
func NewApplierMembership(lg *zap.Logger, cluster *membership.RaftCluster, snapshotServer SnapshotServer) ApplierMembership {
return &applierMembership{
lg: lg,
cluster: cluster,
Expand Down
10 changes: 8 additions & 2 deletions server/mock/mockstorage/storage_recorder.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,26 @@ import (
"github.com/coreos/go-semver/semver"

"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/server/v3/storage"
"go.etcd.io/raft/v3"
"go.etcd.io/raft/v3/raftpb"
)

type StorageRecorder interface {
storage.Storage
testutil.Recorder
}

type storageRecorder struct {
testutil.Recorder
dbPath string // must have '/' suffix if set
}

func NewStorageRecorder(db string) *storageRecorder {
func NewStorageRecorder(db string) StorageRecorder {
return &storageRecorder{&testutil.RecorderBuffered{}, db}
}

func NewStorageRecorderStream(db string) *storageRecorder {
func NewStorageRecorderStream(db string) StorageRecorder {
return &storageRecorder{testutil.NewRecorderStream(), db}
}

Expand Down
3 changes: 3 additions & 0 deletions server/storage/mvcc/kvstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ type store struct {

// NewStore returns a new store. It is useful to create a store inside
// mvcc pkg. It should only be used for testing externally.
// revive:disable:unexported-return this is used internally in the mvcc pkg
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
if lg == nil {
lg = zap.NewNop()
Expand Down Expand Up @@ -132,6 +133,8 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi
return s
}

// revive:enable:unexported-return

func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
if ctx == nil || ctx.Err() != nil {
select {
Expand Down
2 changes: 1 addition & 1 deletion server/storage/mvcc/watchable_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ var _ WatchableKV = (*watchableStore)(nil)
// cancel operations.
type cancelFunc func()

func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV {
s := newWatchableStore(lg, b, le, cfg)
s.wg.Add(2)
go s.syncWatchersLoop()
Expand Down
26 changes: 18 additions & 8 deletions server/storage/mvcc/watchable_store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package mvcc

import (
"errors"
"fmt"
"reflect"
"sync"
Expand Down Expand Up @@ -44,7 +45,7 @@ func TestWatch(t *testing.T) {
defer w.Close()

w.Watch(0, testKey, nil, 0)
if !s.synced.contains(string(testKey)) {
if !s.(*watchableStore).synced.contains(string(testKey)) {
// the key must have had an entry in synced
t.Errorf("existence = false, want true")
}
Expand All @@ -67,7 +68,7 @@ func TestNewWatcherCancel(t *testing.T) {
t.Error(err)
}

if s.synced.contains(string(testKey)) {
if s.(*watchableStore).synced.contains(string(testKey)) {
// the key shoud have been deleted
t.Errorf("existence = true, want false")
}
Expand Down Expand Up @@ -340,7 +341,11 @@ func TestWatchNoEventLossOnCompact(t *testing.T) {
require.NoError(t, err)
}
// fill up w.Chan() with 1 buf via 2 compacted watch response
s.syncWatchers([]mvccpb.Event{})
sImpl, ok := s.(*watchableStore)
if !ok {
t.Fatal(errors.New("TestWatchNoEventLossOnCompact: needs a WatchableKV implementation"))
}
sImpl.syncWatchers([]mvccpb.Event{})

for len(watchers) > 0 {
resp := <-w.Chan()
Expand All @@ -355,7 +360,7 @@ func TestWatchNoEventLossOnCompact(t *testing.T) {
require.Equalf(t, nextRev, ev.Kv.ModRevision, "got event revision %d but want %d for watcher with watch ID %d", ev.Kv.ModRevision, nextRev, resp.WatchID)
nextRev++
}
if nextRev == s.rev()+1 {
if nextRev == sImpl.rev()+1 {
delete(watchers, resp.WatchID)
}
}
Expand Down Expand Up @@ -566,10 +571,15 @@ func TestWatchBatchUnsynced(t *testing.T) {
}
assert.Equal(t, tc.expectRevisionBatches, revisionBatches)

s.store.revMu.Lock()
defer s.store.revMu.Unlock()
assert.Equal(t, 1, s.synced.size())
assert.Equal(t, 0, s.unsynced.size())
sImpl, ok := s.(*watchableStore)
if !ok {
t.Fatal(errors.New("TestWatchBatchUnsynced: needs a WatchableKV implementation"))
}

sImpl.store.revMu.Lock()
defer sImpl.store.revMu.Unlock()
assert.Equal(t, 1, sImpl.synced.size())
assert.Equal(t, 0, sImpl.unsynced.size())
})
}
}
Expand Down
10 changes: 9 additions & 1 deletion server/storage/schema/alarm.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,20 @@ import (
"go.etcd.io/etcd/server/v3/storage/backend"
)

type AlarmBackend interface {
CreateAlarmBucket()
MustPutAlarm(alarm *etcdserverpb.AlarmMember)
MustDeleteAlarm(alarm *etcdserverpb.AlarmMember)
GetAllAlarms() ([]*etcdserverpb.AlarmMember, error)
ForceCommit()
}

type alarmBackend struct {
lg *zap.Logger
be backend.Backend
}

func NewAlarmBackend(lg *zap.Logger, be backend.Backend) *alarmBackend {
func NewAlarmBackend(lg *zap.Logger, be backend.Backend) AlarmBackend {
return &alarmBackend{
lg: lg,
be: be,
Expand Down
2 changes: 1 addition & 1 deletion server/storage/schema/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ type authBackend struct {

var _ auth.AuthBackend = (*authBackend)(nil)

func NewAuthBackend(lg *zap.Logger, be backend.Backend) *authBackend {
func NewAuthBackend(lg *zap.Logger, be backend.Backend) auth.AuthBackend {
return &authBackend{
be: be,
lg: lg,
Expand Down
16 changes: 15 additions & 1 deletion server/storage/schema/membership.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,26 @@ const (
MemberRaftAttributesSuffix = "raftAttributes"
)

// MembershipBackend defines the membership backend interface.
type MembershipBackend interface {
MustSaveMemberToBackend(m *membership.Member)
TrimClusterFromBackend() error
MustDeleteMemberFromBackend(id types.ID)
MustReadMembersFromBackend() (map[types.ID]*membership.Member, map[types.ID]bool)
TrimMembershipFromBackend() error
MustSaveClusterVersionToBackend(ver *semver.Version)
MustSaveDowngradeToBackend(downgrade *version.DowngradeInfo)
MustCreateBackendBuckets()
ClusterVersionFromBackend() *semver.Version
DowngradeInfoFromBackend() *version.DowngradeInfo
}

type membershipBackend struct {
lg *zap.Logger
be backend.Backend
}

func NewMembershipBackend(lg *zap.Logger, be backend.Backend) *membershipBackend {
func NewMembershipBackend(lg *zap.Logger, be backend.Backend) MembershipBackend {
return &membershipBackend{
lg: lg,
be: be,
Expand Down
6 changes: 2 additions & 4 deletions server/storage/schema/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (

"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/storage/wal"
)

// Validate checks provided backend to confirm that schema used is supported.
Expand All @@ -47,10 +48,7 @@ func localBinaryVersion() semver.Version {
return semver.Version{Major: v.Major, Minor: v.Minor}
}

type WALVersion interface {
// MinimalEtcdVersion returns minimal etcd version able to interpret WAL log.
MinimalEtcdVersion() *semver.Version
}
type WALVersion = wal.WALVersion

// Migrate updates storage schema to provided target version.
// Downgrading requires that provided WAL doesn't contain unsupported entries.
Expand Down
7 changes: 6 additions & 1 deletion server/storage/wal/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,14 @@ import (
"go.etcd.io/raft/v3/raftpb"
)

type WALVersion interface {
// MinimalEtcdVersion returns minimal etcd version able to interpret WAL log.
MinimalEtcdVersion() *semver.Version
}

// ReadWALVersion reads remaining entries from opened WAL and returns struct
// that implements schema.WAL interface.
func ReadWALVersion(w *WAL) (*walVersion, error) {
func ReadWALVersion(w *WAL) (WALVersion, error) {
_, _, ents, err := w.ReadAll()
if err != nil {
return nil, err
Expand Down
Loading