-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathhttputilx.go
More file actions
274 lines (241 loc) · 7.63 KB
/
httputilx.go
File metadata and controls
274 lines (241 loc) · 7.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
// Package httputilx provides HTTP utility functions.
package httputilx // import "github.com/teamwork/utils/v2/httputilx"
import (
"bytes"
"fmt"
"io"
"log/slog"
"net/http"
"net/http/httputil"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/teamwork/utils/v2/ioutilx"
)
// DumpBody reads the body of a HTTP request without consuming it, so it can be
// read again later.
// It will read at most maxSize of bytes. Use -1 to read everything.
//
// It's based on httputil.DumpRequest.
//
// Copyright 2009 The Go Authors. All rights reserved. Use of this source code
// is governed by a BSD-style license that can be found in the LICENSE file:
// https://golang.org/LICENSE
func DumpBody(r *http.Request, maxSize int64) ([]byte, error) {
if r.Body == nil {
return nil, nil
}
save, body, err := ioutilx.DumpReader(r.Body)
if err != nil {
return nil, err
}
var b bytes.Buffer
var dest io.Writer = &b
chunked := len(r.TransferEncoding) > 0 && r.TransferEncoding[0] == "chunked"
if chunked {
dest = httputil.NewChunkedWriter(dest)
}
if maxSize < 0 {
_, err = io.Copy(dest, body)
} else {
_, err = io.CopyN(dest, body, maxSize)
if err == io.EOF {
err = nil
}
}
if err != nil {
return nil, err
}
if chunked {
_ = dest.(io.Closer).Close()
_, _ = io.WriteString(&b, "\r\n")
}
r.Body = save
return b.Bytes(), nil
}
// ErrNotOK is used when the status code is not 200 OK.
type ErrNotOK struct {
URL string
Err string
}
func (e ErrNotOK) Error() string {
return fmt.Sprintf("code %v while downloading %v", e.Err, e.URL)
}
// Fetch the contents of an HTTP URL.
//
// This is not intended to cover all possible use cases for fetching files,
// only the most common ones. Use the net/http package for more advanced usage.
func Fetch(url string) ([]byte, error) {
client := http.Client{Timeout: 60 * time.Second}
response, err := client.Get(url)
if err != nil {
return nil, errors.Wrapf(err, "cannot download %v", url)
}
defer response.Body.Close() // nolint: errcheck
// TODO: Maybe add sanity check to bail out of the Content-Length is very
// large?
data, err := io.ReadAll(response.Body)
if err != nil {
return nil, errors.Wrapf(err, "cannot read body of %v", url)
}
if response.StatusCode != http.StatusOK {
return data, ErrNotOK{
URL: url,
Err: fmt.Sprintf("%v %v", response.StatusCode, response.Status),
}
}
return data, nil
}
// Save an HTTP URL to the directory dir with the filename. The filename can be
// generated from the URL if empty.
//
// It will return the full path to the save file. Note that it may create both a
// file *and* return an error (e.g. in cases of non-200 status codes).
//
// This is not intended to cover all possible use cases for fetching files,
// only the most common ones. Use the net/http package for more advanced usage.
func Save(url string, dir string, filename string) (string, error) {
// Use last path of url if filename is empty
if filename == "" {
tokens := strings.Split(url, "/")
filename = tokens[len(tokens)-1]
}
path := filepath.FromSlash(dir + "/" + filename)
client := http.Client{Timeout: 60 * time.Second}
response, err := client.Get(url)
if err != nil {
return "", errors.Wrapf(err, "cannot download %v", url)
}
defer response.Body.Close() // nolint: errcheck
output, err := os.Create(path)
if err != nil {
return "", errors.Wrapf(err, "cannot create %v", path)
}
defer output.Close() // nolint: errcheck
_, err = io.Copy(output, response.Body)
if err != nil {
return path, errors.Wrapf(err, "cannot read body of %v in to %v", url, path)
}
if response.StatusCode != http.StatusOK {
return path, ErrNotOK{
URL: url,
Err: fmt.Sprintf("%v %v", response.StatusCode, response.Status),
}
}
return path, nil
}
// ExponentialBackoffOptions contains options for the exponential backoff retry
// mechanism.
type ExponentialBackoffOptions struct {
client *http.Client
maxRetries int
initialBackoff time.Duration
maxBackoff time.Duration
backoffMultiplier float64
shouldRetry func(resp *http.Response, err error) bool
logger *slog.Logger
}
// ExponentialBackoffOption is a function that configures
// ExponentialBackoffOptions.
type ExponentialBackoffOption func(*ExponentialBackoffOptions)
// ExponentialBackoffWithClient sets the HTTP client to be used when sending the
// API requests. By default, http.DefaultClient is used.
func ExponentialBackoffWithClient(client *http.Client) ExponentialBackoffOption {
return func(o *ExponentialBackoffOptions) {
o.client = client
}
}
// ExponentialBackoffWithConfig sets the configuration for the exponential
// backoff retry mechanism. By default, it will retry up to 3 times, starting
// with a 100ms backoff, doubling each time up to a maximum of 5s.
func ExponentialBackoffWithConfig(
maxRetries int,
initialBackoff, maxBackoff time.Duration,
backoffMultiplier float64,
) ExponentialBackoffOption {
return func(o *ExponentialBackoffOptions) {
o.maxRetries = maxRetries
o.initialBackoff = initialBackoff
o.maxBackoff = maxBackoff
o.backoffMultiplier = backoffMultiplier
}
}
// ExponentialBackoffWithShouldRetry sets the function to determine whether a
// request should be retried based on the response and error. By default, it
// retries on any error, as well as on HTTP 5xx and 429 status codes.
func ExponentialBackoffWithShouldRetry(
shouldRetry func(resp *http.Response, err error) bool,
) ExponentialBackoffOption {
return func(o *ExponentialBackoffOptions) {
o.shouldRetry = shouldRetry
}
}
// ExponentialBackoffWithLogger sets the logger to be used for logging retry
// attempts. By default, a no-op logger is used.
func ExponentialBackoffWithLogger(logger *slog.Logger) ExponentialBackoffOption {
return func(o *ExponentialBackoffOptions) {
o.logger = logger
}
}
// DoExponentialBackoff will send an API request using exponential backoff until
// it either succeeds or the maximum number of retries is reached.
func DoExponentialBackoff(req *http.Request, options ...ExponentialBackoffOption) (*http.Response, error) {
o := ExponentialBackoffOptions{
client: http.DefaultClient,
maxRetries: 3,
initialBackoff: 100 * time.Millisecond,
maxBackoff: 5 * time.Second,
backoffMultiplier: 2.0,
shouldRetry: func(resp *http.Response, err error) bool {
if err != nil {
return true
}
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode >= 500 {
return true
}
return false
},
logger: slog.New(slog.DiscardHandler),
}
for _, option := range options {
option(&o)
}
backoff := o.initialBackoff
for attempt := 0; attempt <= o.maxRetries; attempt++ {
reqClone := req.Clone(req.Context())
if req.Body != nil {
if seeker, ok := req.Body.(interface {
Seek(int64, int) (int64, error)
}); ok {
_, _ = seeker.Seek(0, 0)
}
reqClone.Body = req.Body
}
resp, err := o.client.Do(reqClone)
if !o.shouldRetry(resp, err) || attempt >= o.maxRetries {
return resp, err
}
logArgs := []any{
slog.Int("attempt", attempt+1),
slog.Duration("backoff", backoff),
}
if err != nil {
logArgs = append(logArgs, slog.String("error", err.Error()))
}
if resp != nil {
if err := resp.Body.Close(); err != nil {
o.logger.Error("failed to close response body",
slog.Int("attempt", attempt+1),
slog.String("error", err.Error()),
)
}
logArgs = append(logArgs, slog.Int("status_code", resp.StatusCode))
}
o.logger.Debug("request failed", logArgs...)
time.Sleep(backoff)
backoff = min(time.Duration(float64(backoff)*o.backoffMultiplier), o.maxBackoff)
}
return nil, fmt.Errorf("request failed after %d attempts", o.maxRetries+1)
}