Skip to content

Commit

Permalink
Stop using *_STORAGE_API_ENDPOINT environment variables (#759)
Browse files Browse the repository at this point in the history
* Support reading GCS storage API endpoint from file

* Update charts, docs

* Remove `AZURE_STORAGE_API_ENDPOINT` and replace with `domain` field instead

* Fix tests; Address review comments from @unmarshall

* Address review comments from @renormalize
  • Loading branch information
shreyas-s-rao authored Aug 16, 2024
1 parent 88ad0e1 commit 6428ff7
Show file tree
Hide file tree
Showing 10 changed files with 98 additions and 93 deletions.
4 changes: 2 additions & 2 deletions chart/etcd-backup-restore/templates/etcd-backup-secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ data:
{{- if .Values.backup.abs.emulatorEnabled }}
emulatorEnabled: {{ .Values.backup.abs.emulatorEnabled | b64enc}}
{{- end }}
{{- if .Values.backup.abs.storageAPIEndpoint }}
storageAPIEndpoint: {{ .Values.backup.abs.storageAPIEndpoint | b64enc}}
{{- if .Values.backup.abs.domain }}
domain: {{ .Values.backup.abs.domain | b64enc}}
{{- end }}
{{- else if eq .Values.backup.storageProvider "GCS" }}
serviceaccount.json : {{ .Values.backup.gcs.serviceAccountJson | b64enc }}
Expand Down
16 changes: 0 additions & 16 deletions chart/etcd-backup-restore/templates/etcd-statefulset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -199,25 +199,9 @@ spec:
key: "emulatorEnabled"
optional: true
{{- end }}
{{- if .Values.backup.abs.storageAPIEndpoint }}
- name: "AZURE_STORAGE_API_ENDPOINT"
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-etcd-backup
key: "storageAPIEndpoint"
optional: true
{{- end }}
{{- else if eq .Values.backup.storageProvider "GCS" }}
- name: "GOOGLE_APPLICATION_CREDENTIALS"
value: "/root/.gcp/serviceaccount.json"
{{- if .Values.backup.gcs.storageAPIEndpoint }}
- name: "GOOGLE_STORAGE_API_ENDPOINT"
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-etcd-backup
key: "storageAPIEndpoint"
optional: true
{{- end }}
{{- if .Values.backup.gcs.emulatorEnabled }}
- name: "GOOGLE_EMULATOR_ENABLED"
valueFrom:
Expand Down
2 changes: 1 addition & 1 deletion chart/etcd-backup-restore/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ backup:
# abs:
# storageAccount: storage-account-with-object-storage-privileges
# storageKey: storage-key-with-object-storage-privileges
# domain: non-default-domain-for-object-storage-service
# emulatorEnabled: boolean-float-to-enable-e2e-tests-to-use-azure-emulator # optional
# storageAPIEndpoint: endpoint-override-for-storage-api # if emulatorEnabled is true
# swift:
# authURL: identity-server-url
# domainName: domain-name
Expand Down
5 changes: 3 additions & 2 deletions docs/deployment/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,11 @@ The procedure to provide credentials to access the cloud provider object store v
* For `Google Cloud Storage`:
1. The service account json file should be provided in the `~/.gcp` as a `service-account-file.json` file.
2. The service account json file should be provided, and the file path should be made available as environment variable `GOOGLE_APPLICATION_CREDENTIALS`.
3. If using a storage API [endpoint override](https://pkg.go.dev/cloud.google.com/go#hdr-Endpoint_Override), such as a [regional endpoint](https://cloud.google.com/storage/docs/regional-endpoints) or a local GCS emulator endpoint, then the endpoint must be made available via environment variable `GOOGLE_STORAGE_API_ENDPOINT`, in the format `http[s]://host[:port]/storage/v1/`.
3. If using a storage API [endpoint override](https://pkg.go.dev/cloud.google.com/go#hdr-Endpoint_Override), such as a [regional endpoint](https://cloud.google.com/storage/docs/regional-endpoints) or a local GCS emulator endpoint, then the endpoint must be made available via a file named `storageAPIEndpoint` residing in the `~/.gcp` directory.

* For `Azure Blob storage`:
1. The secret file should be provided, and the file path should be made available as an environment variable: `AZURE_APPLICATION_CREDENTIALS`.
1. The JSON secret file should be provided, and the file path should be made available as an environment variable: `AZURE_APPLICATION_CREDENTIALS`.
2. The Azure Blob Storage domain can be overridden by providing it via an optional field `domain` in the above-mentioned JSON secret file.

* For `Openstack Swift`:
1. The secret file should be provided, and the file path should be made available as an environment variable: `OPENSTACK_APPLICATION_CREDENTIALS`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ data:
storageAccount: YWRtaW4= # admin
storageKey: YWRtaW4= # admin
# emulatorEnabled: dHJ1ZQ== # true (optional)
# storageAPIEndpoint: aHR0cDovL2F6dXJpdGUtc2VydmljZToxMDAwMA== # http://azurite-service:10000 (optional)
# domain: YmxvYi5jb3JlLmNoaW5hY2xvdWRhcGkuY24= # blob.core.chinacloudapi.cn (optional)
98 changes: 51 additions & 47 deletions pkg/snapstore/abs_snapstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,14 @@ import (

"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/sirupsen/logrus"
"k8s.io/utils/pointer"

brtypes "github.com/gardener/etcd-backup-restore/pkg/types"
)

const (
absCredentialDirectory = "AZURE_APPLICATION_CREDENTIALS"
absCredentialJSONFile = "AZURE_APPLICATION_CREDENTIALS_JSON"
// AzuriteEndpoint is the environment variable which indicates the endpoint at which the Azurite emulator is hosted
AzuriteEndpoint = "AZURE_STORAGE_API_ENDPOINT"
)

// ABSSnapStore is an ABS backed snapstore.
Expand All @@ -44,29 +43,35 @@ type ABSSnapStore struct {
}

type absCredentials struct {
BucketName string `json:"bucketName"`
SecretKey string `json:"storageKey"`
StorageAccount string `json:"storageAccount"`
BucketName string `json:"bucketName"`
StorageAccount string `json:"storageAccount"`
StorageKey string `json:"storageKey"`
Domain *string `json:"domain,omitempty"`
}

// NewABSSnapStore creates a new ABSSnapStore using a shared configuration and a specified bucket
func NewABSSnapStore(config *brtypes.SnapstoreConfig) (*ABSSnapStore, error) {
storageAccount, storageKey, err := getCredentials(getEnvPrefixString(config.IsSource))
absCreds, err := getCredentials(getEnvPrefixString(config.IsSource))
if err != nil {
return nil, fmt.Errorf("failed to get credentials: %v", err)
return nil, fmt.Errorf("failed to get sharedKeyCredential: %v", err)
}

credentials, err := azblob.NewSharedKeyCredential(storageAccount, storageKey)
sharedKeyCredential, err := azblob.NewSharedKeyCredential(absCreds.StorageAccount, absCreds.StorageKey)
if err != nil {
return nil, fmt.Errorf("failed to create shared key credentials: %v", err)
return nil, fmt.Errorf("failed to create shared key sharedKeyCredential: %v", err)
}

pipeline := azblob.NewPipeline(credentials, azblob.PipelineOptions{
pipeline := azblob.NewPipeline(sharedKeyCredential, azblob.PipelineOptions{
Retry: azblob.RetryOptions{
TryTimeout: downloadTimeout,
}})

blobURL, err := ConstructBlobServiceURL(credentials)
domain := brtypes.AzureBlobStorageGlobalDomain
if absCreds.Domain != nil {
domain = *absCreds.Domain
}

blobURL, err := ConstructBlobServiceURL(absCreds.StorageAccount, domain)
if err != nil {
return nil, err
}
Expand All @@ -78,69 +83,62 @@ func NewABSSnapStore(config *brtypes.SnapstoreConfig) (*ABSSnapStore, error) {
}

// ConstructBlobServiceURL constructs the Blob Service URL based on the activation status of the Azurite Emulator.
// It checks the environment variables for emulator configuration and constructs the URL accordingly.
// The function expects two environment variables:
// It checks the environment variable for emulator configuration and constructs the URL accordingly.
// The function expects the following environment variable:
// - AZURE_EMULATOR_ENABLED: Indicates whether the Azurite Emulator is enabled (expects "true" or "false").
// - AZURE_STORAGE_API_ENDPOINT: Specifies the Azurite Emulator endpoint when the emulator is enabled.
func ConstructBlobServiceURL(credentials *azblob.SharedKeyCredential) (*url.URL, error) {
defaultURL, err := url.Parse(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName))
if err != nil {
return nil, fmt.Errorf("failed to parse default service URL: %w", err)
}
func ConstructBlobServiceURL(storageAccount, domain string) (*url.URL, error) {
scheme := "https"

emulatorEnabled, ok := os.LookupEnv(EnvAzureEmulatorEnabled)
if !ok {
return defaultURL, nil
}
isEmulator, err := strconv.ParseBool(emulatorEnabled)
if err != nil {
return nil, fmt.Errorf("invalid value for %s: %s, error: %w", EnvAzureEmulatorEnabled, emulatorEnabled, err)
}
if !isEmulator {
return defaultURL, nil
}
endpoint, ok := os.LookupEnv(AzuriteEndpoint)
if !ok {
return nil, fmt.Errorf("%s environment variable not set while %s is true", AzuriteEndpoint, EnvAzureEmulatorEnabled)
if ok {
isEmulator, err := strconv.ParseBool(emulatorEnabled)
if err != nil {
return nil, fmt.Errorf("invalid value for %s: %s, error: %w", EnvAzureEmulatorEnabled, emulatorEnabled, err)
}
if isEmulator {
// TODO: going forward, use Azurite with HTTPS (TLS) communication
scheme = "http"
}
}
// Application protocol (http or https) is determined by the user of the Azurite, not by this function.
return url.Parse(fmt.Sprintf("%s/%s", endpoint, credentials.AccountName()))

return url.Parse(fmt.Sprintf("%s://%s.%s", scheme, storageAccount, domain))
}

func getCredentials(prefixString string) (string, string, error) {
func getCredentials(prefixString string) (*absCredentials, error) {
if filename, isSet := os.LookupEnv(prefixString + absCredentialJSONFile); isSet {
credentials, err := readABSCredentialsJSON(filename)
if err != nil {
return "", "", fmt.Errorf("error getting credentials using %v file", filename)
return nil, fmt.Errorf("error getting credentials using %v file with error %w", filename, err)
}
return credentials.StorageAccount, credentials.SecretKey, nil
return credentials, nil
}

// TODO: @renormalize Remove this extra handling in v0.31.0
// Check if a JSON file is present in the directory, if it is present -> the JSON file must be used for credentials.
if dir, isSet := os.LookupEnv(prefixString + absCredentialDirectory); isSet {
jsonCredentialFile, err := findFileWithExtensionInDir(dir, ".json")
if err != nil {
return "", "", fmt.Errorf("error while finding a JSON credential file in %v directory with error: %w", dir, err)
return nil, fmt.Errorf("error while finding a JSON credential file in %v directory with error: %w", dir, err)
}
if jsonCredentialFile != "" {
credentials, err := readABSCredentialsJSON(jsonCredentialFile)
if err != nil {
return "", "", fmt.Errorf("error getting credentials using %v JSON file in a directory with error: %w", jsonCredentialFile, err)
return nil, fmt.Errorf("error getting credentials using %v JSON file in a directory with error: %w", jsonCredentialFile, err)
}
return credentials.StorageAccount, credentials.SecretKey, nil
return credentials, nil
}
// Non JSON credential files might exist in the credential directory, do not return
}

if dir, isSet := os.LookupEnv(prefixString + absCredentialDirectory); isSet {
credentials, err := readABSCredentialFiles(dir)
if err != nil {
return "", "", fmt.Errorf("error getting credentials from %v dir", dir)
return nil, fmt.Errorf("error getting credentials from %v directory with error %w", dir, err)
}
return credentials.StorageAccount, credentials.SecretKey, nil
return credentials, nil
}

return "", "", fmt.Errorf("unable to get credentials")
return nil, fmt.Errorf("unable to get credentials")
}

func readABSCredentialsJSON(filename string) (*absCredentials, error) {
Expand Down Expand Up @@ -172,17 +170,23 @@ func readABSCredentialFiles(dirname string) (*absCredentials, error) {

for _, file := range files {
if file.Name() == "storageAccount" {
data, err := os.ReadFile(dirname + "/storageAccount")
data, err := os.ReadFile(path.Join(dirname, file.Name()))
if err != nil {
return nil, err
}
absConfig.StorageAccount = string(data)
} else if file.Name() == "storageKey" {
data, err := os.ReadFile(dirname + "/storageKey")
data, err := os.ReadFile(path.Join(dirname, file.Name()))
if err != nil {
return nil, err
}
absConfig.StorageKey = string(data)
} else if file.Name() == "domain" {
data, err := os.ReadFile(path.Join(dirname, file.Name()))
if err != nil {
return nil, err
}
absConfig.SecretKey = string(data)
absConfig.Domain = pointer.String(string(data))
}
}

Expand Down Expand Up @@ -430,7 +434,7 @@ func GetABSCredentialsLastModifiedTime() (time.Time, error) {
}

func isABSConfigEmpty(config *absCredentials) error {
if len(config.SecretKey) != 0 && len(config.StorageAccount) != 0 {
if len(config.StorageKey) != 0 && len(config.StorageAccount) != 0 {
return nil
}
return fmt.Errorf("azure object storage credentials: storageKey or storageAccount is missing")
Expand Down
2 changes: 1 addition & 1 deletion pkg/snapstore/abs_snapstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func newFakeABSSnapstore() brtypes.SnapStore {
newFakePolicyFactory(bucket, prefixV2, objectMap),
}
p := pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newFakePolicyFactory(bucket, prefixV2, objectMap)})
u, err := url.Parse(fmt.Sprintf("https://%s.%s", "dummyaccount", brtypes.AzureBlobStorageHostName))
u, err := url.Parse(fmt.Sprintf("https://%s.%s", "dummyaccount", brtypes.AzureBlobStorageGlobalDomain))
Expect(err).ShouldNot(HaveOccurred())
serviceURL := azblob.NewServiceURL(*u, p)
containerURL := serviceURL.NewContainerURL(bucket)
Expand Down
33 changes: 28 additions & 5 deletions pkg/snapstore/gcs_snapstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ const (
envStoreCredentials = "GOOGLE_APPLICATION_CREDENTIALS"
envStorageAPIEndpoint = "GOOGLE_STORAGE_API_ENDPOINT"
envSourceStoreCredentials = "SOURCE_GOOGLE_APPLICATION_CREDENTIALS"

storageAPIEndpointFileName = "storageAPIEndpoint"
)

// GCSSnapStore is snapstore with GCS object store as backend.
Expand Down Expand Up @@ -62,15 +64,20 @@ func NewGCSSnapStore(config *brtypes.SnapstoreConfig) (*GCSSnapStore, error) {
emulatorConfig.enabled = isEmulatorEnabled()
var opts []option.ClientOption // no need to explicitly set store credentials here since the Google SDK picks it up from the standard environment variable

if _, ok := os.LookupEnv(envSourceStoreCredentials); !ok || emulatorConfig.enabled { // do not set endpoint override when copying backups between buckets, since the buckets may reside on different regions
endpoint := strings.TrimSpace(os.Getenv(envStorageAPIEndpoint))
if gcsApplicationCredentialsPath, isSet := os.LookupEnv(getEnvPrefixString(config.IsSource) + envStoreCredentials); isSet {
storageAPIEndpointFilePath := path.Join(path.Dir(gcsApplicationCredentialsPath), storageAPIEndpointFileName)
endpoint, err := getGCSStorageAPIEndpoint(storageAPIEndpointFilePath)
if err != nil {
return nil, fmt.Errorf("error getting storage API endpoint from %v", storageAPIEndpointFilePath)
}
if endpoint != "" {
opts = append(opts, option.WithEndpoint(endpoint))
if emulatorConfig.enabled {
emulatorConfig.endpoint = endpoint
}
}
}

var chunkDirSuffix string
if emulatorConfig.enabled {
err := emulatorConfig.configureClient(opts)
Expand All @@ -96,6 +103,22 @@ func NewGCSSnapStore(config *brtypes.SnapstoreConfig) (*GCSSnapStore, error) {
return NewGCSSnapStoreFromClient(config.Container, config.Prefix, config.TempDir, config.MaxParallelChunkUploads, config.MinChunkSize, chunkDirSuffix, gcsClient), nil
}

func getGCSStorageAPIEndpoint(path string) (string, error) {
if _, err := os.Stat(path); err == nil {
data, err := os.ReadFile(path)
if err != nil {
return "", err
}
if len(data) == 0 {
return "", fmt.Errorf("file %s is empty", path)
}
return strings.TrimSpace(string(data)), nil
}

// support falling back to environment variable `GOOGLE_STORAGE_API_ENDPOINT`
return strings.TrimSpace(os.Getenv(envStorageAPIEndpoint)), nil
}

// NewGCSSnapStoreFromClient create new GCSSnapStore from shared configuration with specified bucket.
func NewGCSSnapStoreFromClient(bucket, prefix, tempDir string, maxParallelChunkUploads uint, minChunkSize int64, chunkDirSuffix string, cli stiface.Client) *GCSSnapStore {
return &GCSSnapStore{
Expand Down Expand Up @@ -306,11 +329,11 @@ func (s *GCSSnapStore) Delete(snap brtypes.Snapshot) error {

// GetGCSCredentialsLastModifiedTime returns the latest modification timestamp of the GCS credential file
func GetGCSCredentialsLastModifiedTime() (time.Time, error) {
if filename, isSet := os.LookupEnv(envStoreCredentials); isSet {
credentialFiles := []string{filename}
if credentialsFilePath, isSet := os.LookupEnv(envStoreCredentials); isSet {
credentialFiles := []string{credentialsFilePath}
gcsTimeStamp, err := getLatestCredentialsModifiedTime(credentialFiles)
if err != nil {
return time.Time{}, fmt.Errorf("failed to fetch file information of the GCS JSON credential file %v with error: %v", filename, err)
return time.Time{}, fmt.Errorf("failed to fetch file information of the GCS JSON credential dir %v with error: %v", path.Dir(credentialsFilePath), err)
}
return gcsTimeStamp, nil
}
Expand Down
25 changes: 9 additions & 16 deletions pkg/snapstore/snapstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -510,6 +510,7 @@ var _ = Describe("Dynamic access credential rotation test for each provider", fu

var _ = Describe("Blob Service URL construction for Azure", func() {
var credentials *azblob.SharedKeyCredential
domain := "test.domain"
BeforeEach(func() {
var err error
// test strings
Expand All @@ -519,39 +520,31 @@ var _ = Describe("Blob Service URL construction for Azure", func() {
})
Context(fmt.Sprintf("when the environment variable %q is not set", EnvAzureEmulatorEnabled), func() {
It("should return the default blob service URL", func() {
blobServiceURL, err := ConstructBlobServiceURL(credentials)
blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain)
Expect(err).ShouldNot(HaveOccurred())
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName)))
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), domain)))
})
})
Context(fmt.Sprintf("when the environment variable %q is set", EnvAzureEmulatorEnabled), func() {
Context("to values which are not \"true\"", func() {
It("should error when the environment variable is not \"true\" or \"false\"", func() {
GinkgoT().Setenv(EnvAzureEmulatorEnabled, "")
_, err := ConstructBlobServiceURL(credentials)
_, err := ConstructBlobServiceURL(credentials.AccountName(), domain)
Expect(err).Should(HaveOccurred())
})
It("should return the default blob service URL when the environment variable is set to \"false\"", func() {
GinkgoT().Setenv(EnvAzureEmulatorEnabled, "false")
blobServiceURL, err := ConstructBlobServiceURL(credentials)
blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain)
Expect(err).ShouldNot(HaveOccurred())
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName)))
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), domain)))
})
})
Context("to \"true\"", func() {
const endpoint string = "http://localhost:12345"
BeforeEach(func() {
It(fmt.Sprintf("should return the Azurite blob service URL with HTTP scheme"), func() {
GinkgoT().Setenv(EnvAzureEmulatorEnabled, "true")
})
It(fmt.Sprintf("should error when the %q environment variable is not set", AzuriteEndpoint), func() {
_, err := ConstructBlobServiceURL(credentials)
Expect(err).Should(HaveOccurred())
})
It(fmt.Sprintf("should return the Azurite blob service URL when the %q environment variable is set to %q", AzuriteEndpoint, endpoint), func() {
GinkgoT().Setenv(AzuriteEndpoint, endpoint)
blobServiceURL, err := ConstructBlobServiceURL(credentials)
blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain)
Expect(err).ShouldNot(HaveOccurred())
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("%s/%s", endpoint, credentials.AccountName())))
Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("http://%s.%s", credentials.AccountName(), domain)))
})
})
})
Expand Down
Loading

0 comments on commit 6428ff7

Please sign in to comment.