0
Fork 0
mirror of https://github.com/ninenines/cowboy.git synced 2025-07-14 12:20:24 +00:00

Retry the read_urlencoded_body_too_large if timeout triggers

This is caused by the timeout being 1s after the period.
When the CI environment is overloaded, sometimes the
timeout will trigger. We retry, knowing that the
timetrap will catch us if we retry too much.
This commit is contained in:
Loïc Hoguin 2024-01-18 11:13:51 +01:00
parent ecf3d43613
commit 992ee6241d
No known key found for this signature in database
GPG key ID: 8A9DF795F6FED764

View file

@ -591,8 +591,20 @@ do_read_urlencoded_body_too_large(Path, Body, Config) ->
{<<"content-length">>, integer_to_binary(iolist_size(Body))} {<<"content-length">>, integer_to_binary(iolist_size(Body))}
]), ]),
gun:data(ConnPid, Ref, fin, Body), gun:data(ConnPid, Ref, fin, Body),
{response, _, 413, _} = gun:await(ConnPid, Ref, infinity), Response = gun:await(ConnPid, Ref, infinity),
gun:close(ConnPid). gun:close(ConnPid),
case Response of
{response, _, 413, _} ->
ok;
%% We got the wrong crash, likely because the environment
%% was overloaded and the timeout triggered. Try again.
{response, _, 408, _} ->
do_read_urlencoded_body_too_large(Path, Body, Config);
%% Timing issues make it possible for the connection to be
%% closed before the data went through. We retry.
{error, {stream_error, {closed, {error,closed}}}} ->
do_read_urlencoded_body_too_large(Path, Body, Config)
end.
read_urlencoded_body_too_long(Config) -> read_urlencoded_body_too_long(Config) ->
doc("application/x-www-form-urlencoded request body sent too slow. " doc("application/x-www-form-urlencoded request body sent too slow. "