1
0
Fork 0

Allow configurable S3 path style for auto backups

AWS has deprecated the path-style URLs but it's required for some
S3-like alternatives such as Minio.
master
Jason Tackaberry 9 months ago
parent c315c0ecf6
commit e60a0b54fc
No known key found for this signature in database
GPG Key ID: 47A5F9BF12496033

@ -39,7 +39,7 @@ func DownloadFile(ctx context.Context, cfgPath string) (path string, errOK bool,
return "", false, fmt.Errorf("failed to parse auto-restore file: %s", err.Error()) return "", false, fmt.Errorf("failed to parse auto-restore file: %s", err.Error())
} }
sc := aws.NewS3Client(s3cfg.Endpoint, s3cfg.Region, s3cfg.AccessKeyID, s3cfg.SecretAccessKey, sc := aws.NewS3Client(s3cfg.Endpoint, s3cfg.Region, s3cfg.AccessKeyID, s3cfg.SecretAccessKey,
s3cfg.Bucket, s3cfg.Path) s3cfg.Bucket, s3cfg.Path, s3cfg.ForcePathStyle)
d := NewDownloader(sc) d := NewDownloader(sc)
// Create a temporary file to download to. // Create a temporary file to download to.

@ -20,16 +20,18 @@ type S3Config struct {
SecretAccessKey string `json:"secret_access_key"` SecretAccessKey string `json:"secret_access_key"`
Bucket string `json:"bucket"` Bucket string `json:"bucket"`
Path string `json:"path"` Path string `json:"path"`
ForcePathStyle bool `json:"force_path_style"`
} }
// S3Client is a client for uploading data to S3. // S3Client is a client for uploading data to S3.
type S3Client struct { type S3Client struct {
endpoint string endpoint string
region string region string
accessKey string accessKey string
secretKey string secretKey string
bucket string bucket string
key string key string
forcePathStyle bool
// These fields are used for testing via dependency injection. // These fields are used for testing via dependency injection.
uploader uploader uploader uploader
@ -37,14 +39,15 @@ type S3Client struct {
} }
// NewS3Client returns an instance of an S3Client. // NewS3Client returns an instance of an S3Client.
func NewS3Client(endpoint, region, accessKey, secretKey, bucket, key string) *S3Client { func NewS3Client(endpoint, region, accessKey, secretKey, bucket, key string, forcePathStyle bool) *S3Client {
return &S3Client{ return &S3Client{
endpoint: endpoint, endpoint: endpoint,
region: region, region: region,
accessKey: accessKey, accessKey: accessKey,
secretKey: secretKey, secretKey: secretKey,
bucket: bucket, bucket: bucket,
key: key, key: key,
forcePathStyle: forcePathStyle,
} }
} }
@ -108,9 +111,10 @@ func (s *S3Client) Download(ctx context.Context, writer io.WriterAt) error {
func (s *S3Client) createSession() (*session.Session, error) { func (s *S3Client) createSession() (*session.Session, error) {
sess, err := session.NewSession(&aws.Config{ sess, err := session.NewSession(&aws.Config{
Endpoint: aws.String(s.endpoint), Endpoint: aws.String(s.endpoint),
Region: aws.String(s.region), Region: aws.String(s.region),
Credentials: credentials.NewStaticCredentials(s.accessKey, s.secretKey, ""), Credentials: credentials.NewStaticCredentials(s.accessKey, s.secretKey, ""),
S3ForcePathStyle: aws.Bool(s.forcePathStyle),
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create S3 session: %w", err) return nil, fmt.Errorf("failed to create S3 session: %w", err)

@ -246,7 +246,7 @@ func startAutoBackups(ctx context.Context, cfg *Config, str *store.Store) (*back
} }
provider := store.NewProvider(str, false) provider := store.NewProvider(str, false)
sc := aws.NewS3Client(s3cfg.Endpoint, s3cfg.Region, s3cfg.AccessKeyID, s3cfg.SecretAccessKey, sc := aws.NewS3Client(s3cfg.Endpoint, s3cfg.Region, s3cfg.AccessKeyID, s3cfg.SecretAccessKey,
s3cfg.Bucket, s3cfg.Path) s3cfg.Bucket, s3cfg.Path, s3cfg.ForcePathStyle)
u := backup.NewUploader(sc, provider, time.Duration(uCfg.Interval), !uCfg.NoCompress) u := backup.NewUploader(sc, provider, time.Duration(uCfg.Interval), !uCfg.NoCompress)
u.Start(ctx, nil) u.Start(ctx, nil)
return u, nil return u, nil

Loading…
Cancel
Save