Skip to content

Commit 558eee8

Browse files
committed
Go fmt
1 parent cbaa374 commit 558eee8

37 files changed

+565
-568
lines changed

core/backend.go

+11-11
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
)
2727

2828
type Capabilities struct {
29-
MaxMultipartSize uint64
29+
MaxMultipartSize uint64
3030
// indicates that the blob store has native support for directories
3131
DirBlob bool
3232
Name string
@@ -43,7 +43,7 @@ type BlobItemOutput struct {
4343
Size uint64
4444
StorageClass *string
4545
// may be nil in list responses for backends that don't return metadata in listings
46-
Metadata map[string]*string
46+
Metadata map[string]*string
4747
}
4848

4949
type HeadBlobOutput struct {
@@ -194,7 +194,7 @@ type MultipartBlobAddInput struct {
194194

195195
type MultipartBlobAddOutput struct {
196196
RequestId string
197-
PartId *string
197+
PartId *string
198198
}
199199

200200
type MultipartBlobCopyInput struct {
@@ -207,7 +207,7 @@ type MultipartBlobCopyInput struct {
207207

208208
type MultipartBlobCopyOutput struct {
209209
RequestId string
210-
PartId *string
210+
PartId *string
211211
}
212212

213213
type MultipartBlobCommitOutput struct {
@@ -243,13 +243,13 @@ type MakeBucketOutput struct {
243243
RequestId string
244244
}
245245

246-
/// Implementations of all the functions here are expected to be
247-
/// concurrency-safe, except for
248-
///
249-
/// Init() is called exactly once before any other functions are
250-
/// called.
251-
///
252-
/// Capabilities()/Bucket() are expected to be const
246+
// Implementations of all the functions here are expected to be
247+
// concurrency-safe, except for:
248+
//
249+
// Init() is called exactly once before any other functions are
250+
// called.
251+
//
252+
// Capabilities()/Bucket() are expected to be const
253253
type StorageBackend interface {
254254
Init(key string) error
255255
Capabilities() *Capabilities

core/backend_adlv1.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,8 @@ func NewADLv1(bucket string, flags *cfg.FlagStorage, config *cfg.ADLv1Config) (*
151151
bucket: bucket,
152152
cap: Capabilities{
153153
//NoParallelMultipart: true,
154-
DirBlob: true,
155-
Name: "adl",
154+
DirBlob: true,
155+
Name: "adl",
156156
// ADLv1 fails with 404 if we upload data
157157
// larger than 30000000 bytes (28.6MB) (28MB
158158
// also failed in at one point, but as of

core/backend_azblob.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ func (b *AZBlob) refreshToken() (*azblob.ContainerURL, error) {
221221
// our token totally expired, renew inline before using it
222222
b.mu.Unlock()
223223
b.tokenRenewGate <- 1
224-
defer func() { <- b.tokenRenewGate } ()
224+
defer func() { <-b.tokenRenewGate }()
225225

226226
b.mu.Lock()
227227
// check again, because in the mean time maybe it's renewed
@@ -247,7 +247,7 @@ func (b *AZBlob) refreshToken() (*azblob.ContainerURL, error) {
247247
if err != nil {
248248
azbLog.Errorf("Unable to refresh token: %v", err)
249249
}
250-
<- b.tokenRenewGate
250+
<-b.tokenRenewGate
251251
}()
252252

253253
// if we cannot renew token, treat it as a
@@ -664,7 +664,7 @@ func (b *AZBlob) DeleteBlobs(param *DeleteBlobsInput) (ret *DeleteBlobsOutput, d
664664

665665
go func(key string) {
666666
defer func() {
667-
<- SmallActionsGate
667+
<-SmallActionsGate
668668
wg.Done()
669669
}()
670670

core/backend_gcs3.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ import (
3030
// GCS variant of S3
3131
type GCS3 struct {
3232
*S3Backend
33-
gcs *storage.Client
33+
gcs *storage.Client
3434
jsonCredFile string
3535
}
3636

@@ -102,12 +102,12 @@ func (s *GCS3) ListBlobs(param *ListBlobsInput) (*ListBlobsOutput, error) {
102102
})
103103
} else {
104104
items = append(items, BlobItemOutput{
105-
Key: &attrs.Name,
106-
ETag: &attrs.Etag,
105+
Key: &attrs.Name,
106+
ETag: &attrs.Etag,
107107
LastModified: &attrs.Updated,
108-
Size: uint64(attrs.Size),
108+
Size: uint64(attrs.Size),
109109
StorageClass: &attrs.StorageClass,
110-
Metadata: PMetadata(attrs.Metadata),
110+
Metadata: PMetadata(attrs.Metadata),
111111
})
112112
}
113113
n++

core/backend_s3.go

+14-14
Original file line numberDiff line numberDiff line change
@@ -54,20 +54,20 @@ type S3Backend struct {
5454
gcs bool
5555
v2Signer bool
5656

57-
iam bool
58-
iamToken atomic.Value
57+
iam bool
58+
iamToken atomic.Value
5959
iamTokenExpiration time.Time
60-
iamRefreshTimer *time.Timer
60+
iamRefreshTimer *time.Timer
6161
}
6262

6363
func NewS3(bucket string, flags *cfg.FlagStorage, config *cfg.S3Config) (*S3Backend, error) {
6464
if config.MultipartCopyThreshold == 0 {
65-
config.MultipartCopyThreshold = 128*1024*1024
65+
config.MultipartCopyThreshold = 128 * 1024 * 1024
6666
}
6767

6868
if config.ProjectId != "" {
6969
log.Infof("Using Ceph multitenancy format bucket naming: %s", bucket)
70-
bucket = config.ProjectId+":"+bucket
70+
bucket = config.ProjectId + ":" + bucket
7171
}
7272

7373
awsConfig, err := config.ToAwsConfig(flags)
@@ -105,15 +105,15 @@ func NewS3(bucket string, flags *cfg.FlagStorage, config *cfg.S3Config) (*S3Back
105105
}
106106

107107
type IMDSv1Response struct {
108-
Code string
109-
Token string
108+
Code string
109+
Token string
110110
Expiration time.Time
111111
}
112112

113113
type GCPCredResponse struct {
114114
AccessToken string `json:"access_token"`
115-
TokenType string `json:"token_type"`
116-
ExpiresIn int `json:"expires_in"`
115+
TokenType string `json:"token_type"`
116+
ExpiresIn int `json:"expires_in"`
117117
}
118118

119119
func (s *S3Backend) TryIAM() (err error) {
@@ -366,7 +366,7 @@ func (s *S3Backend) testBucket(key string) (err error) {
366366
s.flags.ReadRetryAttempts = 5
367367
}
368368
err = ReadBackoff(s.flags, func(attempt int) error {
369-
_, err := s.HeadBlob(&HeadBlobInput{Key: key});
369+
_, err := s.HeadBlob(&HeadBlobInput{Key: key})
370370
return err
371371
})
372372
if err != nil {
@@ -699,7 +699,7 @@ func (s *S3Backend) mpuCopyParts(size int64, from string, to string, mpuId strin
699699
wg.SetLimit(s.flags.MaxParallelParts)
700700

701701
var startOffset int64
702-
var partIdx int
702+
var partIdx int
703703
for _, cfg := range partSizes {
704704
for i := 0; i < int(cfg.PartCount) && startOffset < size; i++ {
705705
endOffset := MinInt64(startOffset+int64(cfg.PartSize), size)
@@ -1103,7 +1103,7 @@ func (s *S3Backend) MultipartBlobAdd(param *MultipartBlobAddInput) (*MultipartBl
11031103

11041104
return &MultipartBlobAddOutput{
11051105
RequestId: s.getRequestId(req),
1106-
PartId: resp.ETag,
1106+
PartId: resp.ETag,
11071107
}, nil
11081108
}
11091109

@@ -1112,7 +1112,7 @@ func (s *S3Backend) MultipartBlobCopy(param *MultipartBlobCopyInput) (*Multipart
11121112
Bucket: &s.bucket,
11131113
Key: param.Commit.Key,
11141114
PartNumber: aws.Int64(int64(param.PartNumber)),
1115-
CopySource: aws.String(pathEscape(s.bucket+"/"+param.CopySource)),
1115+
CopySource: aws.String(pathEscape(s.bucket + "/" + param.CopySource)),
11161116
UploadId: param.Commit.UploadId,
11171117
}
11181118
if param.Size != 0 {
@@ -1134,7 +1134,7 @@ func (s *S3Backend) MultipartBlobCopy(param *MultipartBlobCopyInput) (*Multipart
11341134

11351135
return &MultipartBlobCopyOutput{
11361136
RequestId: s.getRequestId(req),
1137-
PartId: resp.CopyPartResult.ETag,
1137+
PartId: resp.CopyPartResult.ETag,
11381138
}, nil
11391139
}
11401140

core/backend_test.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,13 @@ package core
1818

1919
type TestBackend struct {
2020
StorageBackend
21-
ListBlobsFunc func(param *ListBlobsInput) (*ListBlobsOutput, error)
22-
HeadBlobFunc func(param *HeadBlobInput) (*HeadBlobOutput, error)
23-
MultipartBlobAddFunc func(param *MultipartBlobAddInput) (*MultipartBlobAddOutput, error)
24-
MultipartBlobCopyFunc func(param *MultipartBlobCopyInput) (*MultipartBlobCopyOutput, error)
21+
ListBlobsFunc func(param *ListBlobsInput) (*ListBlobsOutput, error)
22+
HeadBlobFunc func(param *HeadBlobInput) (*HeadBlobOutput, error)
23+
MultipartBlobAddFunc func(param *MultipartBlobAddInput) (*MultipartBlobAddOutput, error)
24+
MultipartBlobCopyFunc func(param *MultipartBlobCopyInput) (*MultipartBlobCopyOutput, error)
2525
MultipartBlobCommitFunc func(param *MultipartBlobCommitInput) (*MultipartBlobCommitOutput, error)
26-
capabilities *Capabilities
27-
err error
26+
capabilities *Capabilities
27+
err error
2828
}
2929

3030
func (s *TestBackend) Init(bucket string) error {

core/buffer_list.go

+14-14
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,8 @@ type BufferList struct {
7878

7979
type FileBuffer struct {
8080
queueId uint64
81-
offset uint64
82-
length uint64
81+
offset uint64
82+
length uint64
8383
// Chunk state: 1 = clean. 2 = dirty. 3 = part flushed, but not finalized
8484
// 4 = flushed, not finalized, but removed from memory
8585
state BufferState
@@ -194,15 +194,15 @@ func (l *BufferList) EvictFromMemory(buf *FileBuffer) (allocated int64, deleted
194194
prev.onDisk == buf.onDisk {
195195
l.unqueue(buf)
196196
l.unqueue(prev)
197-
l.at.Delete(prev.offset+prev.length)
197+
l.at.Delete(prev.offset + prev.length)
198198
buf.length += prev.length
199199
buf.offset = prev.offset
200200
l.queue(buf)
201201
deleted = true
202202
}
203203
} else if buf.state == BUF_CLEAN {
204204
l.unqueue(buf)
205-
l.at.Delete(buf.offset+buf.length)
205+
l.at.Delete(buf.offset + buf.length)
206206
deleted = true
207207
} else if buf.state == BUF_FLUSHED_FULL {
208208
// A flushed buffer can be removed at a cost of finalizing multipart upload
@@ -220,7 +220,7 @@ func (l *BufferList) EvictFromMemory(buf *FileBuffer) (allocated int64, deleted
220220
})
221221
if prev != nil && prev.offset+prev.length == buf.offset && prev.state == buf.state {
222222
l.unqueue(prev)
223-
l.at.Delete(prev.offset+prev.length)
223+
l.at.Delete(prev.offset + prev.length)
224224
buf.length += prev.length
225225
buf.offset = prev.offset
226226
deleted = true
@@ -259,7 +259,7 @@ func (l *BufferList) unqueue(b *FileBuffer) {
259259
}
260260
if b.state == BUF_DIRTY {
261261
sp := l.helpers.PartNum(b.offset)
262-
ep := l.helpers.PartNum(b.offset+b.length-1)
262+
ep := l.helpers.PartNum(b.offset + b.length - 1)
263263
for i := sp; i < ep+1; i++ {
264264
p := l.dirtyParts[i]
265265
if p == nil || p.refcnt == 0 {
@@ -303,7 +303,7 @@ func (l *BufferList) queue(b *FileBuffer) {
303303
l.dirtyParts = make(map[uint64]*dirtyPart)
304304
}
305305
sp := l.helpers.PartNum(b.offset)
306-
ep := l.helpers.PartNum(b.offset+b.length-1)
306+
ep := l.helpers.PartNum(b.offset + b.length - 1)
307307
for i := sp; i <= ep; i++ {
308308
l.referenceDirtyPart(i)
309309
}
@@ -324,8 +324,8 @@ func (l *BufferList) requeueSplit(left *FileBuffer) {
324324
l.dirtyParts = make(map[uint64]*dirtyPart)
325325
}
326326
// most refcounts don't change - except if splitting not at part boundary
327-
lbound := l.helpers.PartNum(left.offset+left.length-1)
328-
rbound := l.helpers.PartNum(left.offset+left.length)
327+
lbound := l.helpers.PartNum(left.offset + left.length - 1)
328+
rbound := l.helpers.PartNum(left.offset + left.length)
329329
if lbound == rbound {
330330
l.referenceDirtyPart(lbound)
331331
}
@@ -377,7 +377,7 @@ func (l *BufferList) delete(b *FileBuffer) (allocated int64) {
377377
b.ptr = nil
378378
b.data = nil
379379
}
380-
l.at.Delete(b.offset+b.length)
380+
l.at.Delete(b.offset + b.length)
381381
l.unqueue(b)
382382
return
383383
}
@@ -801,14 +801,14 @@ func mergeRA(rr []Range, readAhead uint64, readMerge uint64) []Range {
801801
rr[prev].End = rr[i].End
802802
} else {
803803
prev++
804-
sz := rr[i].End-rr[i].Start
804+
sz := rr[i].End - rr[i].Start
805805
if sz < readAhead {
806806
sz = readAhead
807807
}
808-
rr[prev] = Range{Start: rr[i].Start, End: rr[i].Start+sz}
808+
rr[prev] = Range{Start: rr[i].Start, End: rr[i].Start + sz}
809809
}
810810
}
811-
return rr[0:prev+1]
811+
return rr[0 : prev+1]
812812
}
813813

814814
func splitRA(rr []Range, maxPart uint64) []Range {
@@ -821,7 +821,7 @@ func splitRA(rr []Range, maxPart uint64) []Range {
821821
split = true
822822
}
823823
for off := rr[i].Start; off < rr[i].End; off += maxPart {
824-
res = append(res, Range{Start: off, End: off+maxPart})
824+
res = append(res, Range{Start: off, End: off + maxPart})
825825
}
826826
res[len(res)-1].End = rr[i].End
827827
} else if split {

core/buffer_list_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ func (s *BufferListTest) TestGetHolesEvicted(t *C) {
119119
t.Assert(flcl, Equals, true)
120120
// Now check previous part - it should have a hole, but no FL_CLEARED
121121
holes, loading, flcl = l.GetHoles(5*1024, 5*1024)
122-
t.Assert(holes, DeepEquals, []Range{{8*1024, 10*1024}})
122+
t.Assert(holes, DeepEquals, []Range{{8 * 1024, 10 * 1024}})
123123
t.Assert(loading, Equals, false)
124124
t.Assert(flcl, Equals, false)
125125
}
@@ -162,7 +162,7 @@ func (s *BufferListTest) TestSplitDirtyQueue(t *C) {
162162
// Now check dirty list - it should be empty
163163
// With incorrect refcounting it would either be non-empty or the code would panic()
164164
numDirty := 0
165-
l.IterateDirtyParts(func(partNum uint64) bool { numDirty++; return true; })
165+
l.IterateDirtyParts(func(partNum uint64) bool { numDirty++; return true })
166166
t.Assert(numDirty, Equals, 0)
167167
}
168168

0 commit comments

Comments
 (0)