Skip to content

Commit 4a43319

Browse files
authored
Retry-After delay shouldn't exceed MaxRetryDelay (Azure#19118)
* Retry-After delay shouldn't exceed MaxRetryDelay Don't retry when Retry-After has a delay greater than the configured MaxRetryDelay value. * add test for the retry path
1 parent d8f9b99 commit 4a43319

File tree

3 files changed

+41
-3
lines changed

3 files changed

+41
-3
lines changed

sdk/azcore/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
### Breaking Changes
88

99
### Bugs Fixed
10+
* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`.
1011

1112
### Other Changes
1213

sdk/azcore/runtime/policy_retry.go

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,14 +168,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
168168
return
169169
}
170170

171-
// drain before retrying so nothing is leaked
172-
Drain(resp)
173-
174171
// use the delay from retry-after if available
175172
delay := shared.RetryAfter(resp)
176173
if delay <= 0 {
177174
delay = calcDelay(options, try)
175+
} else if delay > options.MaxRetryDelay {
176+
// the retry-after delay exceeds the the cap so don't retry
177+
log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay)
178+
return
178179
}
180+
181+
// drain before retrying so nothing is leaked
182+
Drain(resp)
183+
179184
log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay)
180185
select {
181186
case <-time.After(delay):

sdk/azcore/runtime/policy_retry_test.go

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -672,6 +672,38 @@ func TestRetryPolicySuccessWithPerTryTimeoutNoRetryWithBodyDownload(t *testing.T
672672
require.Equal(t, largeBody, body)
673673
}
674674

675+
func TestPipelineNoRetryOn429(t *testing.T) {
676+
srv, close := mock.NewServer()
677+
defer close()
678+
// initial response is throttling with a long retry-after delay, it should not trigger a retry
679+
srv.AppendResponse(mock.WithStatusCode(http.StatusTooManyRequests), mock.WithHeader(shared.HeaderRetryAfter, "300"))
680+
perRetryPolicy := countingPolicy{}
681+
req, err := NewRequest(context.Background(), http.MethodGet, srv.URL())
682+
require.NoError(t, err)
683+
pl := exported.NewPipeline(srv, NewRetryPolicy(nil), &perRetryPolicy)
684+
resp, err := pl.Do(req)
685+
require.NoError(t, err)
686+
require.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
687+
require.Equal(t, 1, perRetryPolicy.count)
688+
}
689+
690+
func TestPipelineRetryOn429(t *testing.T) {
691+
srv, close := mock.NewServer()
692+
defer close()
693+
srv.AppendResponse(mock.WithStatusCode(http.StatusTooManyRequests), mock.WithHeader(shared.HeaderRetryAfter, "1"))
694+
srv.AppendResponse(mock.WithStatusCode(http.StatusTooManyRequests), mock.WithHeader(shared.HeaderRetryAfter, "1"))
695+
srv.AppendResponse(mock.WithStatusCode(http.StatusOK))
696+
perRetryPolicy := countingPolicy{}
697+
req, err := NewRequest(context.Background(), http.MethodGet, srv.URL())
698+
require.NoError(t, err)
699+
opt := testRetryOptions()
700+
pl := exported.NewPipeline(srv, NewRetryPolicy(opt), &perRetryPolicy)
701+
resp, err := pl.Do(req)
702+
require.NoError(t, err)
703+
require.Equal(t, http.StatusOK, resp.StatusCode)
704+
require.Equal(t, 3, perRetryPolicy.count)
705+
}
706+
675707
func newRewindTrackingBody(s string) *rewindTrackingBody {
676708
// there are two rewinds that happen before rewinding for a retry
677709
// 1. to get the body's size in SetBody()

0 commit comments

Comments
 (0)