2014-02-06 19:36:25 +01:00
|
|
|
%% Copyright (c) 2011-2014, Loïc Hoguin <essen@ninenines.eu>
|
2011-03-29 13:49:48 +02:00
|
|
|
%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
|
2011-03-07 22:59:22 +01:00
|
|
|
%%
|
|
|
|
%% Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
%% purpose with or without fee is hereby granted, provided that the above
|
|
|
|
%% copyright notice and this permission notice appear in all copies.
|
|
|
|
%%
|
|
|
|
%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
|
2012-08-27 13:28:57 +02:00
|
|
|
-module(cowboy_req).
|
2011-03-18 22:38:26 +01:00
|
|
|
|
2012-08-27 12:16:07 +02:00
|
|
|
%% Request API.
|
2013-05-15 15:17:33 +02:00
|
|
|
-export([new/14]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([method/1]).
|
|
|
|
-export([version/1]).
|
|
|
|
-export([peer/1]).
|
|
|
|
-export([host/1]).
|
|
|
|
-export([host_info/1]).
|
|
|
|
-export([port/1]).
|
|
|
|
-export([path/1]).
|
|
|
|
-export([path_info/1]).
|
2012-09-17 11:46:45 +02:00
|
|
|
-export([qs/1]).
|
2014-09-23 16:43:29 +03:00
|
|
|
-export([parse_qs/1]).
|
|
|
|
-export([match_qs/2]).
|
2012-09-16 01:13:44 +02:00
|
|
|
-export([host_url/1]).
|
2012-09-15 22:03:00 +02:00
|
|
|
-export([url/1]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([binding/2]).
|
|
|
|
-export([binding/3]).
|
|
|
|
-export([bindings/1]).
|
|
|
|
-export([header/2]).
|
|
|
|
-export([header/3]).
|
|
|
|
-export([headers/1]).
|
|
|
|
-export([parse_header/2]).
|
|
|
|
-export([parse_header/3]).
|
2014-09-23 16:43:29 +03:00
|
|
|
-export([parse_cookies/1]).
|
|
|
|
-export([match_cookies/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([meta/2]).
|
|
|
|
-export([meta/3]).
|
2012-09-15 22:51:37 +02:00
|
|
|
-export([set_meta/3]).
|
2012-08-27 12:16:07 +02:00
|
|
|
|
|
|
|
%% Request body API.
|
|
|
|
-export([has_body/1]).
|
|
|
|
-export([body_length/1]).
|
|
|
|
-export([body/1]).
|
2013-03-06 08:50:45 -05:00
|
|
|
-export([body/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([body_qs/1]).
|
2013-03-06 08:50:45 -05:00
|
|
|
-export([body_qs/2]).
|
2014-02-06 19:36:25 +01:00
|
|
|
|
|
|
|
%% Multipart API.
|
|
|
|
-export([part/1]).
|
2014-08-12 00:51:46 -07:00
|
|
|
-export([part/2]).
|
2014-02-06 19:36:25 +01:00
|
|
|
-export([part_body/1]).
|
|
|
|
-export([part_body/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
|
|
|
|
%% Response API.
|
|
|
|
-export([set_resp_cookie/4]).
|
|
|
|
-export([set_resp_header/3]).
|
|
|
|
-export([set_resp_body/2]).
|
2013-01-05 23:35:30 +01:00
|
|
|
-export([set_resp_body_fun/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([set_resp_body_fun/3]).
|
|
|
|
-export([has_resp_header/2]).
|
|
|
|
-export([has_resp_body/1]).
|
2012-09-16 01:55:40 +02:00
|
|
|
-export([delete_resp_header/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
-export([reply/2]).
|
|
|
|
-export([reply/3]).
|
|
|
|
-export([reply/4]).
|
|
|
|
-export([chunked_reply/2]).
|
|
|
|
-export([chunked_reply/3]).
|
|
|
|
-export([chunk/2]).
|
|
|
|
-export([upgrade_reply/3]).
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
-export([continue/1]).
|
2013-08-24 11:20:14 +02:00
|
|
|
-export([maybe_reply/2]).
|
2012-09-15 22:19:39 +02:00
|
|
|
-export([ensure_response/2]).
|
2012-08-27 12:16:07 +02:00
|
|
|
|
2012-09-16 16:04:16 +02:00
|
|
|
%% Private setter/getter API.
|
2013-02-11 09:03:13 +01:00
|
|
|
-export([append_buffer/2]).
|
2012-09-29 11:08:59 +02:00
|
|
|
-export([get/2]).
|
|
|
|
-export([set/2]).
|
2012-09-17 00:39:29 +02:00
|
|
|
-export([set_bindings/4]).
|
2012-09-16 13:57:27 +02:00
|
|
|
-export([lock/1]).
|
2012-09-15 20:33:57 +02:00
|
|
|
-export([to_list/1]).
|
2011-06-01 12:49:03 +02:00
|
|
|
|
2013-10-23 11:21:31 +02:00
|
|
|
-type cookie_opts() :: cow_cookie:cookie_opts().
|
2012-12-07 14:54:45 +01:00
|
|
|
-export_type([cookie_opts/0]).
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-type content_decode_fun() :: fun((binary()) -> binary()).
|
2013-05-16 15:54:30 +02:00
|
|
|
-type transfer_decode_fun() :: fun((binary(), any())
|
2014-03-27 11:39:09 +01:00
|
|
|
-> cow_http_te:decode_ret()).
|
2013-05-16 15:54:30 +02:00
|
|
|
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
-type body_opts() :: [{continue, boolean()}
|
|
|
|
| {length, non_neg_integer()}
|
|
|
|
| {read_length, non_neg_integer()}
|
|
|
|
| {read_timeout, timeout()}
|
|
|
|
| {transfer_decode, transfer_decode_fun(), any()}
|
|
|
|
| {content_decode, content_decode_fun()}].
|
|
|
|
-export_type([body_opts/0]).
|
|
|
|
|
2013-05-30 20:21:01 +02:00
|
|
|
-type resp_body_fun() :: fun((any(), module()) -> ok).
|
2014-09-23 16:43:29 +03:00
|
|
|
-type send_chunk_fun() :: fun((iodata()) -> ok).
|
2013-02-18 21:20:36 +00:00
|
|
|
-type resp_chunked_fun() :: fun((send_chunk_fun()) -> ok).
|
2012-09-26 10:08:43 +02:00
|
|
|
|
2012-09-17 01:11:44 +02:00
|
|
|
-record(http_req, {
|
|
|
|
%% Transport.
|
2013-05-30 20:21:01 +02:00
|
|
|
socket = undefined :: any(),
|
2012-09-17 01:11:44 +02:00
|
|
|
transport = undefined :: undefined | module(),
|
|
|
|
connection = keepalive :: keepalive | close,
|
|
|
|
|
|
|
|
%% Request.
|
|
|
|
pid = undefined :: pid(),
|
2012-09-20 06:22:51 +02:00
|
|
|
method = <<"GET">> :: binary(),
|
2013-05-16 16:06:52 +02:00
|
|
|
version = 'HTTP/1.1' :: cowboy:http_version(),
|
2012-09-17 01:11:44 +02:00
|
|
|
peer = undefined :: undefined | {inet:ip_address(), inet:port_number()},
|
|
|
|
host = undefined :: undefined | binary(),
|
2013-01-22 18:25:16 +01:00
|
|
|
host_info = undefined :: undefined | cowboy_router:tokens(),
|
2012-09-17 01:11:44 +02:00
|
|
|
port = undefined :: undefined | inet:port_number(),
|
|
|
|
path = undefined :: binary(),
|
2013-01-22 18:25:16 +01:00
|
|
|
path_info = undefined :: undefined | cowboy_router:tokens(),
|
2012-09-17 11:46:45 +02:00
|
|
|
qs = undefined :: binary(),
|
2013-01-22 18:25:16 +01:00
|
|
|
bindings = undefined :: undefined | cowboy_router:bindings(),
|
2013-05-16 16:16:32 +02:00
|
|
|
headers = [] :: cowboy:http_headers(),
|
2012-09-17 01:11:44 +02:00
|
|
|
meta = [] :: [{atom(), any()}],
|
|
|
|
|
|
|
|
%% Request body.
|
2013-05-16 15:54:30 +02:00
|
|
|
body_state = waiting :: waiting | done | {stream, non_neg_integer(),
|
|
|
|
transfer_decode_fun(), any(), content_decode_fun()},
|
2012-09-17 01:11:44 +02:00
|
|
|
buffer = <<>> :: binary(),
|
2014-02-06 19:36:25 +01:00
|
|
|
multipart = undefined :: undefined | {binary(), binary()},
|
2012-09-17 01:11:44 +02:00
|
|
|
|
|
|
|
%% Response.
|
2013-01-07 22:42:16 +01:00
|
|
|
resp_compress = false :: boolean(),
|
2013-06-26 18:04:20 +01:00
|
|
|
resp_state = waiting :: locked | waiting | waiting_stream
|
|
|
|
| chunks | stream | done,
|
2013-05-16 16:16:32 +02:00
|
|
|
resp_headers = [] :: cowboy:http_headers(),
|
2013-01-05 23:35:30 +01:00
|
|
|
resp_body = <<>> :: iodata() | resp_body_fun()
|
2013-02-18 21:20:36 +00:00
|
|
|
| {non_neg_integer(), resp_body_fun()}
|
|
|
|
| {chunked, resp_chunked_fun()},
|
2012-09-17 01:11:44 +02:00
|
|
|
|
|
|
|
%% Functions.
|
2013-01-20 18:57:42 +01:00
|
|
|
onresponse = undefined :: undefined | already_called
|
2013-05-16 17:01:38 +02:00
|
|
|
| cowboy:onresponse_fun()
|
2012-09-17 01:11:44 +02:00
|
|
|
}).
|
|
|
|
|
|
|
|
-opaque req() :: #http_req{}.
|
2012-08-27 14:27:41 +02:00
|
|
|
-export_type([req/0]).
|
|
|
|
|
2011-03-18 22:38:26 +01:00
|
|
|
%% Request API.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2013-05-30 20:21:01 +02:00
|
|
|
-spec new(any(), module(),
|
2013-02-06 23:32:31 +01:00
|
|
|
undefined | {inet:ip_address(), inet:port_number()},
|
2013-05-15 15:17:33 +02:00
|
|
|
binary(), binary(), binary(),
|
2013-05-16 16:16:32 +02:00
|
|
|
cowboy:http_version(), cowboy:http_headers(), binary(),
|
2013-01-07 22:42:16 +01:00
|
|
|
inet:port_number() | undefined, binary(), boolean(), boolean(),
|
2013-05-16 17:01:38 +02:00
|
|
|
undefined | cowboy:onresponse_fun())
|
2012-09-16 15:51:15 +02:00
|
|
|
-> req().
|
2013-05-15 15:17:33 +02:00
|
|
|
new(Socket, Transport, Peer, Method, Path, Query,
|
2012-09-26 14:11:53 +02:00
|
|
|
Version, Headers, Host, Port, Buffer, CanKeepalive,
|
2013-01-07 22:42:16 +01:00
|
|
|
Compress, OnResponse) ->
|
2013-01-30 23:38:51 +01:00
|
|
|
Req = #http_req{socket=Socket, transport=Transport, pid=self(), peer=Peer,
|
2013-05-15 15:17:33 +02:00
|
|
|
method=Method, path=Path, qs=Query, version=Version,
|
2012-09-26 14:11:53 +02:00
|
|
|
headers=Headers, host=Host, port=Port, buffer=Buffer,
|
2013-01-07 22:42:16 +01:00
|
|
|
resp_compress=Compress, onresponse=OnResponse},
|
2014-01-14 13:30:58 -05:00
|
|
|
case CanKeepalive of
|
2012-09-26 14:11:53 +02:00
|
|
|
false ->
|
|
|
|
Req#http_req{connection=close};
|
|
|
|
true ->
|
2014-09-23 16:43:29 +03:00
|
|
|
case parse_header(<<"connection">>, Req) of
|
|
|
|
undefined ->
|
2014-01-14 13:30:58 -05:00
|
|
|
case Version of
|
|
|
|
'HTTP/1.1' -> Req; %% keepalive
|
|
|
|
'HTTP/1.0' -> Req#http_req{connection=close}
|
|
|
|
end;
|
2014-09-23 16:43:29 +03:00
|
|
|
Tokens ->
|
2012-09-27 01:31:19 +02:00
|
|
|
Connection = connection_to_atom(Tokens),
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req{connection=Connection}
|
2012-09-26 14:11:53 +02:00
|
|
|
end
|
|
|
|
end.
|
2012-09-16 15:51:15 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec method(req()) -> binary().
|
2011-03-07 22:59:22 +01:00
|
|
|
method(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.method.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec version(req()) -> cowboy:http_version().
|
2011-03-07 22:59:22 +01:00
|
|
|
version(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.version.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-10-01 09:55:51 -04:00
|
|
|
-spec peer(req()) -> {inet:ip_address(), inet:port_number()} | undefined.
|
2011-03-07 22:59:22 +01:00
|
|
|
peer(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.peer.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec host(req()) -> binary().
|
2011-03-07 22:59:22 +01:00
|
|
|
host(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.host.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec host_info(req()) -> cowboy_router:tokens() | undefined.
|
2011-05-09 14:31:06 +02:00
|
|
|
host_info(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.host_info.
|
2011-05-09 14:31:06 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec port(req()) -> inet:port_number().
|
2011-05-04 12:05:57 +02:00
|
|
|
port(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.port.
|
2011-05-04 12:05:57 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec path(req()) -> binary().
|
2011-03-07 22:59:22 +01:00
|
|
|
path(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.path.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec path_info(req()) -> cowboy_router:tokens() | undefined.
|
2011-05-09 14:31:06 +02:00
|
|
|
path_info(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.path_info.
|
2011-05-09 14:31:06 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec qs(req()) -> binary().
|
2012-09-17 11:46:45 +02:00
|
|
|
qs(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.qs.
|
|
|
|
|
|
|
|
-spec parse_qs(req()) -> [{binary(), binary() | true}].
|
|
|
|
parse_qs(#http_req{qs=Qs}) ->
|
|
|
|
cow_qs:parse_qs(Qs).
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-10-04 13:21:16 +03:00
|
|
|
-spec match_qs(cowboy:fields(), req()) -> map().
|
|
|
|
match_qs(Fields, Req) ->
|
|
|
|
filter(Fields, kvlist_to_map(Fields, parse_qs(Req))).
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2012-09-16 01:13:44 +02:00
|
|
|
%% The URL includes the scheme, host and port only.
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec host_url(req()) -> undefined | binary().
|
|
|
|
host_url(#http_req{port=undefined}) ->
|
|
|
|
undefined;
|
|
|
|
host_url(#http_req{transport=Transport, host=Host, port=Port}) ->
|
2012-09-15 22:03:00 +02:00
|
|
|
TransportName = Transport:name(),
|
|
|
|
Secure = case TransportName of
|
|
|
|
ssl -> <<"s">>;
|
|
|
|
_ -> <<>>
|
|
|
|
end,
|
|
|
|
PortBin = case {TransportName, Port} of
|
|
|
|
{ssl, 443} -> <<>>;
|
|
|
|
{tcp, 80} -> <<>>;
|
2014-07-12 14:19:29 +02:00
|
|
|
_ -> << ":", (integer_to_binary(Port))/binary >>
|
2012-09-15 22:03:00 +02:00
|
|
|
end,
|
2014-09-23 16:43:29 +03:00
|
|
|
<< "http", Secure/binary, "://", Host/binary, PortBin/binary >>.
|
2012-09-16 01:13:44 +02:00
|
|
|
|
2013-05-15 15:17:33 +02:00
|
|
|
%% The URL includes the scheme, host, port, path and query string.
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec url(req()) -> undefined | binary().
|
2012-11-27 13:22:38 -06:00
|
|
|
url(Req=#http_req{}) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
HostURL = host_url(Req),
|
|
|
|
url(Req, HostURL).
|
2012-11-27 13:22:38 -06:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
url(_, undefined) ->
|
|
|
|
undefined;
|
|
|
|
url(#http_req{path=Path, qs=QS}, HostURL) ->
|
2012-09-15 22:03:00 +02:00
|
|
|
QS2 = case QS of
|
|
|
|
<<>> -> <<>>;
|
|
|
|
_ -> << "?", QS/binary >>
|
|
|
|
end,
|
2014-09-23 16:43:29 +03:00
|
|
|
<< HostURL/binary, Path/binary, QS2/binary >>.
|
2012-09-15 22:03:00 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec binding(atom(), req()) -> any() | undefined.
|
|
|
|
binding(Name, Req) ->
|
2011-03-29 13:57:21 +02:00
|
|
|
binding(Name, Req, undefined).
|
2011-03-29 13:49:48 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec binding(atom(), req(), Default) -> any() | Default when Default::any().
|
2011-07-18 14:21:45 +02:00
|
|
|
binding(Name, Req, Default) when is_atom(Name) ->
|
2011-03-27 13:11:57 +02:00
|
|
|
case lists:keyfind(Name, 1, Req#http_req.bindings) of
|
2014-09-23 16:43:29 +03:00
|
|
|
{_, Value} -> Value;
|
|
|
|
false -> Default
|
2011-03-27 13:11:57 +02:00
|
|
|
end.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec bindings(req()) -> [{atom(), any()}].
|
2011-03-07 22:59:22 +01:00
|
|
|
bindings(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.bindings.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec header(binary(), req()) -> binary() | undefined.
|
2012-09-21 09:18:56 +02:00
|
|
|
header(Name, Req) ->
|
2011-03-29 13:57:21 +02:00
|
|
|
header(Name, Req, undefined).
|
2011-03-29 13:49:48 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec header(binary(), req(), Default) -> binary() | Default when Default::any().
|
2012-09-21 09:18:56 +02:00
|
|
|
header(Name, Req, Default) ->
|
2011-03-21 17:26:00 +01:00
|
|
|
case lists:keyfind(Name, 1, Req#http_req.headers) of
|
2014-09-23 16:43:29 +03:00
|
|
|
{Name, Value} -> Value;
|
|
|
|
false -> Default
|
2011-03-21 17:26:00 +01:00
|
|
|
end.
|
2011-03-07 22:59:22 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec headers(req()) -> cowboy:http_headers().
|
2011-03-07 22:59:22 +01:00
|
|
|
headers(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req.headers.
|
|
|
|
|
|
|
|
-spec parse_header(binary(), Req) -> any() when Req::req().
|
|
|
|
parse_header(Name = <<"content-length">>, Req) ->
|
2015-02-04 16:18:28 +01:00
|
|
|
parse_header(Name, Req, 0, fun cow_http_hd:parse_content_length/1);
|
2014-09-23 16:43:29 +03:00
|
|
|
parse_header(Name = <<"cookie">>, Req) ->
|
2015-02-04 16:18:28 +01:00
|
|
|
parse_header(Name, Req, [], fun cow_cookie:parse_cookie/1);
|
2014-09-23 16:43:29 +03:00
|
|
|
parse_header(Name = <<"transfer-encoding">>, Req) ->
|
2015-02-04 16:18:28 +01:00
|
|
|
parse_header(Name, Req, [<<"identity">>], fun cow_http_hd:parse_transfer_encoding/1);
|
2014-09-23 16:43:29 +03:00
|
|
|
parse_header(Name, Req) ->
|
|
|
|
parse_header(Name, Req, undefined).
|
|
|
|
|
|
|
|
-spec parse_header(binary(), Req, any()) -> any() when Req::req().
|
2015-02-04 16:18:28 +01:00
|
|
|
parse_header(Name, Req, Default) ->
|
|
|
|
parse_header(Name, Req, Default, parse_header_fun(Name)).
|
|
|
|
|
|
|
|
parse_header_fun(<<"accept">>) -> fun cow_http_hd:parse_accept/1;
|
|
|
|
parse_header_fun(<<"accept-charset">>) -> fun cow_http_hd:parse_accept_charset/1;
|
|
|
|
parse_header_fun(<<"accept-encoding">>) -> fun cow_http_hd:parse_accept_encoding/1;
|
|
|
|
parse_header_fun(<<"accept-language">>) -> fun cow_http_hd:parse_accept_language/1;
|
|
|
|
parse_header_fun(<<"authorization">>) -> fun cow_http_hd:parse_authorization/1;
|
|
|
|
parse_header_fun(<<"connection">>) -> fun cow_http_hd:parse_connection/1;
|
|
|
|
parse_header_fun(<<"content-length">>) -> fun cow_http_hd:parse_content_length/1;
|
|
|
|
parse_header_fun(<<"content-type">>) -> fun cow_http_hd:parse_content_type/1;
|
|
|
|
parse_header_fun(<<"cookie">>) -> fun cow_cookie:parse_cookie/1;
|
|
|
|
parse_header_fun(<<"expect">>) -> fun cow_http_hd:parse_expect/1;
|
|
|
|
parse_header_fun(<<"if-match">>) -> fun cow_http_hd:parse_if_match/1;
|
|
|
|
parse_header_fun(<<"if-modified-since">>) -> fun cow_http_hd:parse_if_modified_since/1;
|
|
|
|
parse_header_fun(<<"if-none-match">>) -> fun cow_http_hd:parse_if_none_match/1;
|
|
|
|
parse_header_fun(<<"if-unmodified-since">>) -> fun cow_http_hd:parse_if_unmodified_since/1;
|
|
|
|
parse_header_fun(<<"range">>) -> fun cow_http_hd:parse_range/1;
|
|
|
|
parse_header_fun(<<"sec-websocket-extensions">>) -> fun cow_http_hd:parse_sec_websocket_extensions/1;
|
|
|
|
parse_header_fun(<<"sec-websocket-protocol">>) -> fun cow_http_hd:parse_sec_websocket_protocol_req/1;
|
|
|
|
parse_header_fun(<<"transfer-encoding">>) -> fun cow_http_hd:parse_transfer_encoding/1;
|
|
|
|
parse_header_fun(<<"upgrade">>) -> fun cow_http_hd:parse_upgrade/1;
|
|
|
|
parse_header_fun(<<"x-forwarded-for">>) -> fun cow_http_hd:parse_x_forwarded_for/1.
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
parse_header(Name, Req, Default, ParseFun) ->
|
2011-10-05 03:17:13 +02:00
|
|
|
case header(Name, Req) of
|
2015-02-04 16:18:28 +01:00
|
|
|
undefined -> Default;
|
|
|
|
Value -> ParseFun(Value)
|
2011-10-26 04:07:08 +02:00
|
|
|
end.
|
2011-10-05 03:17:13 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec parse_cookies(req()) -> [{binary(), binary()}].
|
|
|
|
parse_cookies(Req) ->
|
|
|
|
parse_header(<<"cookie">>, Req).
|
2011-07-08 13:41:30 -05:00
|
|
|
|
2014-10-04 13:21:16 +03:00
|
|
|
-spec match_cookies(cowboy:fields(), req()) -> map().
|
|
|
|
match_cookies(Fields, Req) ->
|
|
|
|
filter(Fields, kvlist_to_map(Fields, parse_cookies(Req))).
|
2011-07-08 13:41:30 -05:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec meta(atom(), req()) -> any() | undefined.
|
2011-12-19 09:44:24 +01:00
|
|
|
meta(Name, Req) ->
|
|
|
|
meta(Name, Req, undefined).
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec meta(atom(), req(), any()) -> any().
|
2011-12-19 09:44:24 +01:00
|
|
|
meta(Name, Req, Default) ->
|
|
|
|
case lists:keyfind(Name, 1, Req#http_req.meta) of
|
2014-09-23 16:43:29 +03:00
|
|
|
{Name, Value} -> Value;
|
|
|
|
false -> Default
|
2011-12-19 09:44:24 +01:00
|
|
|
end.
|
|
|
|
|
2012-09-15 22:51:37 +02:00
|
|
|
-spec set_meta(atom(), any(), Req) -> Req when Req::req().
|
|
|
|
set_meta(Name, Value, Req=#http_req{meta=Meta}) ->
|
2014-03-26 19:05:59 +01:00
|
|
|
Req#http_req{meta=lists:keystore(Name, 1, Meta, {Name, Value})}.
|
2012-09-15 22:51:37 +02:00
|
|
|
|
2011-03-21 17:26:00 +01:00
|
|
|
%% Request Body API.
|
|
|
|
|
2013-04-11 14:22:16 +02:00
|
|
|
-spec has_body(req()) -> boolean().
|
2012-03-29 01:14:44 +02:00
|
|
|
has_body(Req) ->
|
2013-02-14 18:56:52 +06:00
|
|
|
case lists:keyfind(<<"content-length">>, 1, Req#http_req.headers) of
|
|
|
|
{_, <<"0">>} ->
|
|
|
|
false;
|
|
|
|
{_, _} ->
|
|
|
|
true;
|
|
|
|
_ ->
|
|
|
|
lists:keymember(<<"transfer-encoding">>, 1, Req#http_req.headers)
|
|
|
|
end.
|
2012-03-29 01:14:44 +02:00
|
|
|
|
|
|
|
%% The length may not be known if Transfer-Encoding is not identity,
|
|
|
|
%% and the body hasn't been read at the time of the call.
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec body_length(req()) -> undefined | non_neg_integer().
|
2012-03-29 01:14:44 +02:00
|
|
|
body_length(Req) ->
|
2013-03-05 14:08:44 +01:00
|
|
|
case parse_header(<<"transfer-encoding">>, Req) of
|
2014-09-23 16:43:29 +03:00
|
|
|
[<<"identity">>] ->
|
|
|
|
parse_header(<<"content-length">>, Req);
|
|
|
|
_ ->
|
|
|
|
undefined
|
2012-03-29 01:14:44 +02:00
|
|
|
end.
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec body(Req) -> {ok, binary(), Req} | {more, binary(), Req} when Req::req().
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body(Req) ->
|
|
|
|
body(Req, []).
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec body(Req, body_opts()) -> {ok, binary(), Req} | {more, binary(), Req} when Req::req().
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body(Req=#http_req{body_state=waiting}, Opts) ->
|
|
|
|
%% Send a 100 continue if needed (enabled by default).
|
2014-09-23 16:43:29 +03:00
|
|
|
case lists:keyfind(continue, 1, Opts) of
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{_, false} ->
|
2014-09-23 16:43:29 +03:00
|
|
|
ok;
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
_ ->
|
2014-09-23 16:43:29 +03:00
|
|
|
ExpectHeader = parse_header(<<"expect">>, Req),
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
ok = case ExpectHeader of
|
2015-05-29 15:07:22 -07:00
|
|
|
continue -> continue(Req);
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
_ -> ok
|
2014-09-23 16:43:29 +03:00
|
|
|
end
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
end,
|
|
|
|
%% Initialize body streaming state.
|
|
|
|
CFun = case lists:keyfind(content_decode, 1, Opts) of
|
|
|
|
false ->
|
2015-02-04 16:18:28 +01:00
|
|
|
fun body_content_decode_identity/1;
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{_, CFun0} ->
|
|
|
|
CFun0
|
|
|
|
end,
|
|
|
|
case lists:keyfind(transfer_decode, 1, Opts) of
|
|
|
|
false ->
|
2014-09-23 16:43:29 +03:00
|
|
|
case parse_header(<<"transfer-encoding">>, Req) of
|
|
|
|
[<<"chunked">>] ->
|
|
|
|
body(Req#http_req{body_state={stream, 0,
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
fun cow_http_te:stream_chunked/2, {0, 0}, CFun}}, Opts);
|
2014-09-23 16:43:29 +03:00
|
|
|
[<<"identity">>] ->
|
|
|
|
case body_length(Req) of
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
0 ->
|
2014-09-23 16:43:29 +03:00
|
|
|
{ok, <<>>, Req#http_req{body_state=done}};
|
|
|
|
Len ->
|
|
|
|
body(Req#http_req{body_state={stream, Len,
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
fun cow_http_te:stream_identity/2, {0, Len},
|
|
|
|
CFun}}, Opts)
|
|
|
|
end
|
|
|
|
end;
|
|
|
|
{_, TFun, TState} ->
|
2014-09-23 16:43:29 +03:00
|
|
|
body(Req#http_req{body_state={stream, 0,
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
TFun, TState, CFun}}, Opts)
|
|
|
|
end;
|
|
|
|
body(Req=#http_req{body_state=done}, _) ->
|
|
|
|
{ok, <<>>, Req};
|
|
|
|
body(Req, Opts) ->
|
|
|
|
ChunkLen = case lists:keyfind(length, 1, Opts) of
|
|
|
|
false -> 8000000;
|
|
|
|
{_, ChunkLen0} -> ChunkLen0
|
|
|
|
end,
|
|
|
|
ReadLen = case lists:keyfind(read_length, 1, Opts) of
|
|
|
|
false -> 1000000;
|
|
|
|
{_, ReadLen0} -> ReadLen0
|
|
|
|
end,
|
|
|
|
ReadTimeout = case lists:keyfind(read_timeout, 1, Opts) of
|
|
|
|
false -> 15000;
|
|
|
|
{_, ReadTimeout0} -> ReadTimeout0
|
|
|
|
end,
|
|
|
|
body_loop(Req, ReadTimeout, ReadLen, ChunkLen, <<>>).
|
|
|
|
|
2015-02-04 16:18:28 +01:00
|
|
|
%% Default identity function for content decoding.
|
|
|
|
%% @todo Move into cowlib when more content decode functions get implemented.
|
|
|
|
body_content_decode_identity(Data) -> Data.
|
|
|
|
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body_loop(Req=#http_req{buffer=Buffer, body_state={stream, Length, _, _, _}},
|
|
|
|
ReadTimeout, ReadLength, ChunkLength, Acc) ->
|
|
|
|
{Tag, Res, Req2} = case Buffer of
|
|
|
|
<<>> ->
|
|
|
|
body_recv(Req, ReadTimeout, min(Length, ReadLength));
|
|
|
|
_ ->
|
|
|
|
body_decode(Req, ReadTimeout)
|
|
|
|
end,
|
|
|
|
case {Tag, Res} of
|
2014-09-23 16:43:29 +03:00
|
|
|
{ok, Data} ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{ok, << Acc/binary, Data/binary >>, Req2};
|
2014-09-23 16:43:29 +03:00
|
|
|
{more, Data} ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
Acc2 = << Acc/binary, Data/binary >>,
|
|
|
|
case byte_size(Acc2) >= ChunkLength of
|
|
|
|
true -> {more, Acc2, Req2};
|
|
|
|
false -> body_loop(Req2, ReadTimeout, ReadLength, ChunkLength, Acc2)
|
2014-09-23 16:43:29 +03:00
|
|
|
end
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
end.
|
|
|
|
|
|
|
|
body_recv(Req=#http_req{transport=Transport, socket=Socket, buffer=Buffer},
|
|
|
|
ReadTimeout, ReadLength) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
{ok, Data} = Transport:recv(Socket, ReadLength, ReadTimeout),
|
|
|
|
body_decode(Req#http_req{buffer= << Buffer/binary, Data/binary >>}, ReadTimeout).
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
|
2012-03-29 01:14:44 +02:00
|
|
|
%% Two decodings happen. First a decoding function is applied to the
|
|
|
|
%% transferred data, and then another is applied to the actual content.
|
|
|
|
%%
|
|
|
|
%% Transfer encoding is generally used for chunked bodies. The decoding
|
|
|
|
%% function uses a state to keep track of how much it has read, which is
|
|
|
|
%% also initialized through this function.
|
|
|
|
%%
|
|
|
|
%% Content encoding is generally used for compression.
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
%%
|
2014-03-24 14:14:43 +01:00
|
|
|
%% @todo Handle chunked after-the-facts headers.
|
|
|
|
%% @todo Depending on the length returned we might want to 0 or +5 it.
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body_decode(Req=#http_req{buffer=Data, body_state={stream, _,
|
|
|
|
TDecode, TState, CDecode}}, ReadTimeout) ->
|
|
|
|
case TDecode(Data, TState) of
|
2012-03-29 01:14:44 +02:00
|
|
|
more ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body_recv(Req#http_req{body_state={stream, 0,
|
|
|
|
TDecode, TState, CDecode}}, ReadTimeout, 0);
|
|
|
|
{more, Data2, TState2} ->
|
|
|
|
{more, CDecode(Data2), Req#http_req{body_state={stream, 0,
|
|
|
|
TDecode, TState2, CDecode}, buffer= <<>>}};
|
|
|
|
{more, Data2, Length, TState2} when is_integer(Length) ->
|
|
|
|
{more, CDecode(Data2), Req#http_req{body_state={stream, Length,
|
|
|
|
TDecode, TState2, CDecode}, buffer= <<>>}};
|
|
|
|
{more, Data2, Rest, TState2} ->
|
|
|
|
{more, CDecode(Data2), Req#http_req{body_state={stream, 0,
|
|
|
|
TDecode, TState2, CDecode}, buffer=Rest}};
|
|
|
|
{done, TotalLength, Rest} ->
|
2014-09-23 16:43:29 +03:00
|
|
|
{ok, <<>>, body_decode_end(Req, TotalLength, Rest)};
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{done, Data2, TotalLength, Rest} ->
|
|
|
|
{ok, CDecode(Data2), body_decode_end(Req, TotalLength, Rest)}
|
2012-03-29 01:14:44 +02:00
|
|
|
end.
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
body_decode_end(Req=#http_req{headers=Headers}, TotalLength, Rest) ->
|
2012-09-21 09:18:56 +02:00
|
|
|
Headers2 = lists:keystore(<<"content-length">>, 1, Headers,
|
2014-07-12 14:19:29 +02:00
|
|
|
{<<"content-length">>, integer_to_binary(TotalLength)}),
|
2012-03-29 01:14:44 +02:00
|
|
|
%% At this point we just assume TEs were all decoded.
|
2012-09-21 09:18:56 +02:00
|
|
|
Headers3 = lists:keydelete(<<"transfer-encoding">>, 1, Headers2),
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req{buffer=Rest, body_state=done, headers=Headers3}.
|
|
|
|
|
|
|
|
-spec body_qs(Req) -> {ok, [{binary(), binary() | true}], Req}
|
|
|
|
| {badlength, Req} when Req::req().
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body_qs(Req) ->
|
|
|
|
body_qs(Req, [
|
|
|
|
{length, 64000},
|
|
|
|
{read_length, 64000},
|
|
|
|
{read_timeout, 5000}]).
|
|
|
|
|
|
|
|
-spec body_qs(Req, body_opts()) -> {ok, [{binary(), binary() | true}], Req}
|
2014-09-23 16:43:29 +03:00
|
|
|
| {badlength, Req} when Req::req().
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
body_qs(Req, Opts) ->
|
|
|
|
case body(Req, Opts) of
|
|
|
|
{ok, Body, Req2} ->
|
|
|
|
{ok, cow_qs:parse_qs(Body), Req2};
|
|
|
|
{more, _, Req2} ->
|
2014-09-23 16:43:29 +03:00
|
|
|
{badlength, Req2}
|
2012-03-29 01:14:44 +02:00
|
|
|
end.
|
|
|
|
|
2014-02-06 19:36:25 +01:00
|
|
|
%% Multipart API.
|
2011-11-08 00:51:49 +01:00
|
|
|
|
2014-02-06 19:36:25 +01:00
|
|
|
-spec part(Req)
|
|
|
|
-> {ok, cow_multipart:headers(), Req} | {done, Req}
|
|
|
|
when Req::req().
|
|
|
|
part(Req) ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
part(Req, [
|
|
|
|
{length, 64000},
|
|
|
|
{read_length, 64000},
|
|
|
|
{read_timeout, 5000}]).
|
2014-02-06 19:36:25 +01:00
|
|
|
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
-spec part(Req, body_opts())
|
|
|
|
-> {ok, cow_multipart:headers(), Req} | {done, Req}
|
|
|
|
when Req::req().
|
|
|
|
part(Req=#http_req{multipart=undefined}, Opts) ->
|
|
|
|
part(init_multipart(Req), Opts);
|
|
|
|
part(Req, Opts) ->
|
|
|
|
{Data, Req2} = stream_multipart(Req, Opts),
|
|
|
|
part(Data, Opts, Req2).
|
|
|
|
|
|
|
|
part(Buffer, Opts, Req=#http_req{multipart={Boundary, _}}) ->
|
2014-02-06 19:36:25 +01:00
|
|
|
case cow_multipart:parse_headers(Buffer, Boundary) of
|
|
|
|
more ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{Data, Req2} = stream_multipart(Req, Opts),
|
|
|
|
part(<< Buffer/binary, Data/binary >>, Opts, Req2);
|
2014-02-06 19:36:25 +01:00
|
|
|
{more, Buffer2} ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
{Data, Req2} = stream_multipart(Req, Opts),
|
|
|
|
part(<< Buffer2/binary, Data/binary >>, Opts, Req2);
|
2014-02-06 19:36:25 +01:00
|
|
|
{ok, Headers, Rest} ->
|
|
|
|
{ok, Headers, Req#http_req{multipart={Boundary, Rest}}};
|
|
|
|
%% Ignore epilogue.
|
|
|
|
{done, _} ->
|
|
|
|
{done, Req#http_req{multipart=undefined}}
|
2011-11-08 00:51:49 +01:00
|
|
|
end.
|
|
|
|
|
2014-02-06 19:36:25 +01:00
|
|
|
-spec part_body(Req)
|
|
|
|
-> {ok, binary(), Req} | {more, binary(), Req}
|
|
|
|
when Req::req().
|
|
|
|
part_body(Req) ->
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
part_body(Req, []).
|
2014-02-06 19:36:25 +01:00
|
|
|
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
-spec part_body(Req, body_opts())
|
2014-02-06 19:36:25 +01:00
|
|
|
-> {ok, binary(), Req} | {more, binary(), Req}
|
|
|
|
when Req::req().
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
part_body(Req=#http_req{multipart=undefined}, Opts) ->
|
|
|
|
part_body(init_multipart(Req), Opts);
|
|
|
|
part_body(Req, Opts) ->
|
|
|
|
part_body(<<>>, Opts, Req, <<>>).
|
|
|
|
|
|
|
|
part_body(Buffer, Opts, Req=#http_req{multipart={Boundary, _}}, Acc) ->
|
|
|
|
ChunkLen = case lists:keyfind(length, 1, Opts) of
|
|
|
|
false -> 8000000;
|
|
|
|
{_, ChunkLen0} -> ChunkLen0
|
|
|
|
end,
|
|
|
|
case byte_size(Acc) > ChunkLen of
|
|
|
|
true ->
|
|
|
|
{more, Acc, Req#http_req{multipart={Boundary, Buffer}}};
|
|
|
|
false ->
|
|
|
|
{Data, Req2} = stream_multipart(Req, Opts),
|
|
|
|
case cow_multipart:parse_body(<< Buffer/binary, Data/binary >>, Boundary) of
|
|
|
|
{ok, Body} ->
|
|
|
|
part_body(<<>>, Opts, Req2, << Acc/binary, Body/binary >>);
|
|
|
|
{ok, Body, Rest} ->
|
|
|
|
part_body(Rest, Opts, Req2, << Acc/binary, Body/binary >>);
|
|
|
|
done ->
|
|
|
|
{ok, Acc, Req2};
|
|
|
|
{done, Body} ->
|
|
|
|
{ok, << Acc/binary, Body/binary >>, Req2};
|
|
|
|
{done, Body, Rest} ->
|
|
|
|
{ok, << Acc/binary, Body/binary >>,
|
|
|
|
Req2#http_req{multipart={Boundary, Rest}}}
|
|
|
|
end
|
2011-11-08 00:51:49 +01:00
|
|
|
end.
|
|
|
|
|
2014-02-06 19:36:25 +01:00
|
|
|
init_multipart(Req) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
{<<"multipart">>, _, Params} = parse_header(<<"content-type">>, Req),
|
2014-02-06 19:36:25 +01:00
|
|
|
{_, Boundary} = lists:keyfind(<<"boundary">>, 1, Params),
|
2014-09-23 16:43:29 +03:00
|
|
|
Req#http_req{multipart={Boundary, <<>>}}.
|
2014-02-06 19:36:25 +01:00
|
|
|
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
stream_multipart(Req=#http_req{body_state=BodyState, multipart={_, <<>>}}, Opts) ->
|
|
|
|
true = BodyState =/= done,
|
|
|
|
{_, Data, Req2} = body(Req, Opts),
|
|
|
|
{Data, Req2};
|
|
|
|
stream_multipart(Req=#http_req{multipart={Boundary, Buffer}}, _) ->
|
|
|
|
{Buffer, Req#http_req{multipart={Boundary, <<>>}}}.
|
2014-02-06 19:36:25 +01:00
|
|
|
|
2011-03-18 22:38:26 +01:00
|
|
|
%% Response API.
|
|
|
|
|
2012-12-17 12:32:17 +01:00
|
|
|
%% The cookie name cannot contain any of the following characters:
|
|
|
|
%% =,;\s\t\r\n\013\014
|
|
|
|
%%
|
|
|
|
%% The cookie value cannot contain any of the following characters:
|
|
|
|
%% ,; \t\r\n\013\014
|
2012-12-07 14:54:45 +01:00
|
|
|
-spec set_resp_cookie(iodata(), iodata(), cookie_opts(), Req)
|
|
|
|
-> Req when Req::req().
|
|
|
|
set_resp_cookie(Name, Value, Opts, Req) ->
|
2013-10-23 11:21:31 +02:00
|
|
|
Cookie = cow_cookie:setcookie(Name, Value, Opts),
|
2012-12-07 14:54:45 +01:00
|
|
|
set_resp_header(<<"set-cookie">>, Cookie, Req).
|
2011-12-07 11:54:57 +01:00
|
|
|
|
2012-09-21 09:18:56 +02:00
|
|
|
-spec set_resp_header(binary(), iodata(), Req)
|
2012-09-16 03:51:07 +02:00
|
|
|
-> Req when Req::req().
|
2011-11-28 09:09:41 +01:00
|
|
|
set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
|
2012-09-21 09:18:56 +02:00
|
|
|
Req#http_req{resp_headers=[{Name, Value}|RespHeaders]}.
|
2011-11-28 09:09:41 +01:00
|
|
|
|
2012-09-16 03:51:07 +02:00
|
|
|
-spec set_resp_body(iodata(), Req) -> Req when Req::req().
|
2011-11-28 09:09:41 +01:00
|
|
|
set_resp_body(Body, Req) ->
|
2012-09-16 03:51:07 +02:00
|
|
|
Req#http_req{resp_body=Body}.
|
2011-11-28 09:09:41 +01:00
|
|
|
|
2013-01-05 23:35:30 +01:00
|
|
|
-spec set_resp_body_fun(resp_body_fun(), Req) -> Req when Req::req().
|
2013-02-01 10:40:00 -06:00
|
|
|
set_resp_body_fun(StreamFun, Req) when is_function(StreamFun) ->
|
2013-01-05 23:35:30 +01:00
|
|
|
Req#http_req{resp_body=StreamFun}.
|
|
|
|
|
|
|
|
%% If the body function crashes while writing the response body or writes
|
|
|
|
%% fewer bytes than declared the behaviour is undefined.
|
2012-09-26 10:08:43 +02:00
|
|
|
-spec set_resp_body_fun(non_neg_integer(), resp_body_fun(), Req)
|
2013-02-18 21:20:36 +00:00
|
|
|
-> Req when Req::req();
|
|
|
|
(chunked, resp_chunked_fun(), Req)
|
2012-09-26 10:08:43 +02:00
|
|
|
-> Req when Req::req().
|
2013-02-06 18:35:07 +01:00
|
|
|
set_resp_body_fun(StreamLen, StreamFun, Req)
|
|
|
|
when is_integer(StreamLen), is_function(StreamFun) ->
|
2013-02-18 21:20:36 +00:00
|
|
|
Req#http_req{resp_body={StreamLen, StreamFun}};
|
|
|
|
set_resp_body_fun(chunked, StreamFun, Req)
|
|
|
|
when is_function(StreamFun) ->
|
|
|
|
Req#http_req{resp_body={chunked, StreamFun}}.
|
2011-12-28 18:00:27 +01:00
|
|
|
|
2012-09-21 09:18:56 +02:00
|
|
|
-spec has_resp_header(binary(), req()) -> boolean().
|
2011-11-28 09:09:41 +01:00
|
|
|
has_resp_header(Name, #http_req{resp_headers=RespHeaders}) ->
|
2012-09-21 09:18:56 +02:00
|
|
|
lists:keymember(Name, 1, RespHeaders).
|
2011-11-28 09:09:41 +01:00
|
|
|
|
2012-08-27 14:27:41 +02:00
|
|
|
-spec has_resp_body(req()) -> boolean().
|
2013-01-20 00:58:50 +01:00
|
|
|
has_resp_body(#http_req{resp_body=RespBody}) when is_function(RespBody) ->
|
|
|
|
true;
|
2013-02-18 21:20:36 +00:00
|
|
|
has_resp_body(#http_req{resp_body={chunked, _}}) ->
|
|
|
|
true;
|
2011-12-28 18:00:27 +01:00
|
|
|
has_resp_body(#http_req{resp_body={Length, _}}) ->
|
|
|
|
Length > 0;
|
2011-11-28 09:09:41 +01:00
|
|
|
has_resp_body(#http_req{resp_body=RespBody}) ->
|
2011-12-16 16:19:08 +01:00
|
|
|
iolist_size(RespBody) > 0.
|
2011-11-28 09:09:41 +01:00
|
|
|
|
2012-09-21 09:18:56 +02:00
|
|
|
-spec delete_resp_header(binary(), Req)
|
2012-09-16 01:55:40 +02:00
|
|
|
-> Req when Req::req().
|
|
|
|
delete_resp_header(Name, Req=#http_req{resp_headers=RespHeaders}) ->
|
|
|
|
RespHeaders2 = lists:keydelete(Name, 1, RespHeaders),
|
|
|
|
Req#http_req{resp_headers=RespHeaders2}.
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec reply(cowboy:http_status(), Req) -> Req when Req::req().
|
2011-11-28 09:09:41 +01:00
|
|
|
reply(Status, Req=#http_req{resp_body=Body}) ->
|
|
|
|
reply(Status, [], Body, Req).
|
2011-10-13 16:16:53 +02:00
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec reply(cowboy:http_status(), cowboy:http_headers(), Req)
|
2014-09-23 16:43:29 +03:00
|
|
|
-> Req when Req::req().
|
2011-11-28 09:09:41 +01:00
|
|
|
reply(Status, Headers, Req=#http_req{resp_body=Body}) ->
|
|
|
|
reply(Status, Headers, Body, Req).
|
2011-10-13 16:16:53 +02:00
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec reply(cowboy:http_status(), cowboy:http_headers(),
|
2015-01-14 18:43:11 +03:00
|
|
|
iodata() | resp_body_fun() | {non_neg_integer(), resp_body_fun()}
|
|
|
|
| {chunked, resp_chunked_fun()}, Req)
|
2014-09-23 16:43:29 +03:00
|
|
|
-> Req when Req::req().
|
2012-09-26 10:08:43 +02:00
|
|
|
reply(Status, Headers, Body, Req=#http_req{
|
2013-01-05 20:19:43 +01:00
|
|
|
socket=Socket, transport=Transport,
|
2012-05-01 01:59:36 +02:00
|
|
|
version=Version, connection=Connection,
|
2013-01-07 22:42:16 +01:00
|
|
|
method=Method, resp_compress=Compress,
|
2013-06-26 18:04:20 +01:00
|
|
|
resp_state=RespState, resp_headers=RespHeaders})
|
|
|
|
when RespState =:= waiting; RespState =:= waiting_stream ->
|
2013-05-30 20:21:01 +02:00
|
|
|
HTTP11Headers = if
|
|
|
|
Transport =/= cowboy_spdy, Version =:= 'HTTP/1.1' ->
|
|
|
|
[{<<"connection">>, atom_to_connection(Connection)}];
|
|
|
|
true ->
|
|
|
|
[]
|
2012-04-30 22:49:36 +02:00
|
|
|
end,
|
2013-02-21 10:27:26 +04:00
|
|
|
Req3 = case Body of
|
2013-01-05 23:35:30 +01:00
|
|
|
BodyFun when is_function(BodyFun) ->
|
|
|
|
%% We stream the response body until we close the connection.
|
2013-02-12 17:15:11 +01:00
|
|
|
RespConn = close,
|
2013-05-30 20:21:01 +02:00
|
|
|
{RespType, Req2} = if
|
|
|
|
Transport =:= cowboy_spdy ->
|
|
|
|
response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
|
|
|
], stream, Req);
|
|
|
|
true ->
|
|
|
|
response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"connection">>, <<"close">>},
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>},
|
|
|
|
{<<"transfer-encoding">>, <<"identity">>}
|
|
|
|
], <<>>, Req)
|
|
|
|
end,
|
2013-01-05 23:35:30 +01:00
|
|
|
if RespType =/= hook, Method =/= <<"HEAD">> ->
|
|
|
|
BodyFun(Socket, Transport);
|
|
|
|
true -> ok
|
2013-02-21 10:27:26 +04:00
|
|
|
end,
|
|
|
|
Req2#http_req{connection=RespConn};
|
2013-02-18 21:20:36 +00:00
|
|
|
{chunked, BodyFun} ->
|
|
|
|
%% We stream the response body in chunks.
|
|
|
|
{RespType, Req2} = chunked_response(Status, Headers, Req),
|
|
|
|
if RespType =/= hook, Method =/= <<"HEAD">> ->
|
|
|
|
ChunkFun = fun(IoData) -> chunk(IoData, Req2) end,
|
|
|
|
BodyFun(ChunkFun),
|
2013-06-26 18:04:20 +01:00
|
|
|
%% Send the last chunk if chunked encoding was used.
|
|
|
|
if
|
|
|
|
Version =:= 'HTTP/1.0'; RespState =:= waiting_stream ->
|
|
|
|
Req2;
|
|
|
|
true ->
|
|
|
|
last_chunk(Req2)
|
2013-02-18 21:20:36 +00:00
|
|
|
end;
|
2013-05-30 20:21:01 +02:00
|
|
|
true -> Req2
|
|
|
|
end;
|
2012-09-26 10:08:43 +02:00
|
|
|
{ContentLength, BodyFun} ->
|
2013-01-05 23:35:30 +01:00
|
|
|
%% We stream the response body for ContentLength bytes.
|
2013-02-12 17:15:11 +01:00
|
|
|
RespConn = response_connection(Headers, Connection),
|
2012-09-26 10:08:43 +02:00
|
|
|
{RespType, Req2} = response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"content-length">>, integer_to_list(ContentLength)},
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
2013-05-30 20:21:01 +02:00
|
|
|
|HTTP11Headers], stream, Req),
|
2013-01-05 20:19:43 +01:00
|
|
|
if RespType =/= hook, Method =/= <<"HEAD">> ->
|
|
|
|
BodyFun(Socket, Transport);
|
2012-09-26 10:08:43 +02:00
|
|
|
true -> ok
|
2013-02-21 10:27:26 +04:00
|
|
|
end,
|
|
|
|
Req2#http_req{connection=RespConn};
|
2013-01-07 22:42:16 +01:00
|
|
|
_ when Compress ->
|
2013-02-12 17:15:11 +01:00
|
|
|
RespConn = response_connection(Headers, Connection),
|
2013-01-07 22:42:16 +01:00
|
|
|
Req2 = reply_may_compress(Status, Headers, Body, Req,
|
2013-02-21 10:27:26 +04:00
|
|
|
RespHeaders, HTTP11Headers, Method),
|
|
|
|
Req2#http_req{connection=RespConn};
|
2012-09-26 10:08:43 +02:00
|
|
|
_ ->
|
2013-02-12 17:15:11 +01:00
|
|
|
RespConn = response_connection(Headers, Connection),
|
2013-01-07 22:42:16 +01:00
|
|
|
Req2 = reply_no_compress(Status, Headers, Body, Req,
|
2013-02-21 10:27:26 +04:00
|
|
|
RespHeaders, HTTP11Headers, Method, iolist_size(Body)),
|
|
|
|
Req2#http_req{connection=RespConn}
|
2011-09-06 12:11:44 +02:00
|
|
|
end,
|
2014-09-23 16:43:29 +03:00
|
|
|
Req3#http_req{resp_state=done, resp_headers=[], resp_body= <<>>}.
|
2011-03-18 22:38:26 +01:00
|
|
|
|
2013-01-07 22:42:16 +01:00
|
|
|
reply_may_compress(Status, Headers, Body, Req,
|
|
|
|
RespHeaders, HTTP11Headers, Method) ->
|
|
|
|
BodySize = iolist_size(Body),
|
2014-09-23 16:43:29 +03:00
|
|
|
try parse_header(<<"accept-encoding">>, Req) of
|
|
|
|
Encodings ->
|
2013-06-10 15:22:05 +02:00
|
|
|
CanGzip = (BodySize > 300)
|
|
|
|
andalso (false =:= lists:keyfind(<<"content-encoding">>,
|
|
|
|
1, Headers))
|
|
|
|
andalso (false =:= lists:keyfind(<<"content-encoding">>,
|
|
|
|
1, RespHeaders))
|
|
|
|
andalso (false =:= lists:keyfind(<<"transfer-encoding">>,
|
|
|
|
1, Headers))
|
|
|
|
andalso (false =:= lists:keyfind(<<"transfer-encoding">>,
|
|
|
|
1, RespHeaders))
|
|
|
|
andalso (Encodings =/= undefined)
|
|
|
|
andalso (false =/= lists:keyfind(<<"gzip">>, 1, Encodings)),
|
|
|
|
case CanGzip of
|
|
|
|
true ->
|
|
|
|
GzBody = zlib:gzip(Body),
|
2014-09-23 16:43:29 +03:00
|
|
|
{_, Req2} = response(Status, Headers, RespHeaders, [
|
2013-06-10 15:22:05 +02:00
|
|
|
{<<"content-length">>, integer_to_list(byte_size(GzBody))},
|
|
|
|
{<<"content-encoding">>, <<"gzip">>},
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
|
|
|
|HTTP11Headers],
|
|
|
|
case Method of <<"HEAD">> -> <<>>; _ -> GzBody end,
|
2014-09-23 16:43:29 +03:00
|
|
|
Req),
|
|
|
|
Req2;
|
2013-06-10 15:22:05 +02:00
|
|
|
false ->
|
|
|
|
reply_no_compress(Status, Headers, Body, Req,
|
|
|
|
RespHeaders, HTTP11Headers, Method, BodySize)
|
2014-09-23 16:43:29 +03:00
|
|
|
end
|
|
|
|
catch _:_ ->
|
|
|
|
reply_no_compress(Status, Headers, Body, Req,
|
|
|
|
RespHeaders, HTTP11Headers, Method, BodySize)
|
2013-01-07 22:42:16 +01:00
|
|
|
end.
|
|
|
|
|
|
|
|
reply_no_compress(Status, Headers, Body, Req,
|
|
|
|
RespHeaders, HTTP11Headers, Method, BodySize) ->
|
|
|
|
{_, Req2} = response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"content-length">>, integer_to_list(BodySize)},
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
|
|
|
|HTTP11Headers],
|
|
|
|
case Method of <<"HEAD">> -> <<>>; _ -> Body end,
|
|
|
|
Req),
|
|
|
|
Req2.
|
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec chunked_reply(cowboy:http_status(), Req) -> Req when Req::req().
|
2011-10-13 16:16:53 +02:00
|
|
|
chunked_reply(Status, Req) ->
|
|
|
|
chunked_reply(Status, [], Req).
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec chunked_reply(cowboy:http_status(), cowboy:http_headers(), Req)
|
2014-09-23 16:43:29 +03:00
|
|
|
-> Req when Req::req().
|
2013-02-18 21:20:36 +00:00
|
|
|
chunked_reply(Status, Headers, Req) ->
|
|
|
|
{_, Req2} = chunked_response(Status, Headers, Req),
|
2014-09-23 16:43:29 +03:00
|
|
|
Req2.
|
2011-05-08 17:26:21 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec chunk(iodata(), req()) -> ok.
|
2012-09-20 06:22:51 +02:00
|
|
|
chunk(_Data, #http_req{method= <<"HEAD">>}) ->
|
2011-09-06 12:11:44 +02:00
|
|
|
ok;
|
2013-05-30 20:21:01 +02:00
|
|
|
chunk(Data, #http_req{socket=Socket, transport=cowboy_spdy,
|
|
|
|
resp_state=chunks}) ->
|
|
|
|
cowboy_spdy:stream_data(Socket, Data);
|
|
|
|
chunk(Data, #http_req{socket=Socket, transport=Transport,
|
2013-06-26 18:04:20 +01:00
|
|
|
resp_state=stream}) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
ok = Transport:send(Socket, Data);
|
2013-05-30 20:21:01 +02:00
|
|
|
chunk(Data, #http_req{socket=Socket, transport=Transport,
|
|
|
|
resp_state=chunks}) ->
|
2015-02-16 19:49:01 +01:00
|
|
|
case iolist_size(Data) of
|
|
|
|
0 -> ok;
|
|
|
|
Size -> Transport:send(Socket, [integer_to_list(Size, 16),
|
|
|
|
<<"\r\n">>, Data, <<"\r\n">>])
|
|
|
|
end.
|
2011-05-08 17:26:21 +02:00
|
|
|
|
2014-03-26 19:05:59 +01:00
|
|
|
%% If ever made public, need to send nothing if HEAD.
|
2013-05-30 20:21:01 +02:00
|
|
|
-spec last_chunk(Req) -> Req when Req::req().
|
|
|
|
last_chunk(Req=#http_req{socket=Socket, transport=cowboy_spdy}) ->
|
|
|
|
_ = cowboy_spdy:stream_close(Socket),
|
|
|
|
Req#http_req{resp_state=done};
|
|
|
|
last_chunk(Req=#http_req{socket=Socket, transport=Transport}) ->
|
|
|
|
_ = Transport:send(Socket, <<"0\r\n\r\n">>),
|
|
|
|
Req#http_req{resp_state=done}.
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec upgrade_reply(cowboy:http_status(), cowboy:http_headers(), Req)
|
2014-09-23 16:43:29 +03:00
|
|
|
-> Req when Req::req().
|
2013-05-30 20:21:01 +02:00
|
|
|
upgrade_reply(Status, Headers, Req=#http_req{transport=Transport,
|
|
|
|
resp_state=waiting, resp_headers=RespHeaders})
|
|
|
|
when Transport =/= cowboy_spdy ->
|
2012-05-01 01:59:36 +02:00
|
|
|
{_, Req2} = response(Status, Headers, RespHeaders, [
|
2012-09-21 09:18:56 +02:00
|
|
|
{<<"connection">>, <<"Upgrade">>}
|
2012-09-26 10:08:43 +02:00
|
|
|
], <<>>, Req),
|
2014-09-23 16:43:29 +03:00
|
|
|
Req2#http_req{resp_state=done, resp_headers=[], resp_body= <<>>}.
|
2011-10-20 14:11:17 +02:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
-spec continue(req()) -> ok.
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
continue(#http_req{socket=Socket, transport=Transport,
|
|
|
|
version=Version}) ->
|
|
|
|
HTTPVer = atom_to_binary(Version, latin1),
|
2014-09-23 16:43:29 +03:00
|
|
|
ok = Transport:send(Socket,
|
Add request body reading options
The options were added to allow developers to fix timeout
issues when reading large bodies. It is also a cleaner and
easier to extend interface.
This commit deprecates the functions init_stream, stream_body
and skip_body which are no longer needed. They will be removed
in 1.0.
The body function can now take an additional argument that is a
list of options. The body_qs, part and part_body functions can
too and simply pass this argument down to the body call.
There are options for disabling the automatic continue reply,
setting a maximum length to be returned (soft limit), setting
the read length and read timeout, and setting the transfer and
content decode functions.
The return value of the body and body_qs have changed slightly.
The body function now works similarly to the part_body function,
in that it returns either an ok or a more tuple depending on
whether there is additional data to be read. The body_qs function
can return a badlength tuple if the body is too big. The default
size has been increased from 16KB to 64KB.
The default read length and timeout have been tweaked and vary
depending on the function called.
The body function will now adequately process chunked bodies,
which means that the body_qs function will too. But this means
that the behavior has changed slightly and your code should be
tested properly when updating your code.
The body and body_qs still accept a length as first argument
for compatibility purpose with older code. Note that this form
is deprecated and will be removed in 1.0. The part and part_body
function, being new and never having been in a release yet, have
this form completely removed in this commit.
Again, while most code should work as-is, you should make sure
that it actually does before pushing this to production.
2014-06-02 23:09:43 +02:00
|
|
|
<< HTTPVer/binary, " ", (status(100))/binary, "\r\n\r\n" >>).
|
|
|
|
|
2013-08-24 11:20:14 +02:00
|
|
|
%% Meant to be used internally for sending errors after crashes.
|
2014-07-12 12:09:43 +02:00
|
|
|
-spec maybe_reply([{module(), atom(), arity() | [term()], _}], req()) -> ok.
|
|
|
|
maybe_reply(Stacktrace, Req) ->
|
2013-08-24 11:20:14 +02:00
|
|
|
receive
|
|
|
|
{cowboy_req, resp_sent} -> ok
|
|
|
|
after 0 ->
|
2014-07-12 12:09:43 +02:00
|
|
|
_ = do_maybe_reply(Stacktrace, Req),
|
2013-08-24 11:20:14 +02:00
|
|
|
ok
|
|
|
|
end.
|
|
|
|
|
2015-02-01 19:36:13 +01:00
|
|
|
do_maybe_reply([{erlang, binary_to_integer, _, _}, {cow_http_hd, parse_content_length, _, _}|_], Req) ->
|
|
|
|
cowboy_req:reply(400, Req);
|
2014-09-23 16:43:29 +03:00
|
|
|
do_maybe_reply([{cow_http_hd, _, _, _}|_], Req) ->
|
2014-07-12 12:09:43 +02:00
|
|
|
cowboy_req:reply(400, Req);
|
|
|
|
do_maybe_reply(_, Req) ->
|
|
|
|
cowboy_req:reply(500, Req).
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec ensure_response(req(), cowboy:http_status()) -> ok.
|
2012-09-15 22:19:39 +02:00
|
|
|
%% The response has already been fully sent to the client.
|
|
|
|
ensure_response(#http_req{resp_state=done}, _) ->
|
|
|
|
ok;
|
|
|
|
%% No response has been sent but everything apparently went fine.
|
|
|
|
%% Reply with the status code found in the second argument.
|
2013-06-26 18:04:20 +01:00
|
|
|
ensure_response(Req=#http_req{resp_state=RespState}, Status)
|
|
|
|
when RespState =:= waiting; RespState =:= waiting_stream ->
|
2012-09-15 22:19:39 +02:00
|
|
|
_ = reply(Status, [], [], Req),
|
|
|
|
ok;
|
|
|
|
%% Terminate the chunked body for HTTP/1.1 only.
|
2013-06-26 18:04:20 +01:00
|
|
|
ensure_response(#http_req{method= <<"HEAD">>}, _) ->
|
2012-09-15 22:19:39 +02:00
|
|
|
ok;
|
2013-05-30 20:21:01 +02:00
|
|
|
ensure_response(Req=#http_req{resp_state=chunks}, _) ->
|
|
|
|
_ = last_chunk(Req),
|
2013-06-26 18:04:20 +01:00
|
|
|
ok;
|
|
|
|
ensure_response(#http_req{}, _) ->
|
2012-09-15 22:19:39 +02:00
|
|
|
ok.
|
|
|
|
|
2012-09-16 16:04:16 +02:00
|
|
|
%% Private setter/getter API.
|
|
|
|
|
2013-02-11 09:03:13 +01:00
|
|
|
-spec append_buffer(binary(), Req) -> Req when Req::req().
|
|
|
|
append_buffer(Suffix, Req=#http_req{buffer=Buffer}) ->
|
|
|
|
Req#http_req{buffer= << Buffer/binary, Suffix/binary >>}.
|
|
|
|
|
2012-09-29 11:08:59 +02:00
|
|
|
-spec get(atom(), req()) -> any(); ([atom()], req()) -> any().
|
|
|
|
get(List, Req) when is_list(List) ->
|
|
|
|
[g(Atom, Req) || Atom <- List];
|
|
|
|
get(Atom, Req) when is_atom(Atom) ->
|
|
|
|
g(Atom, Req).
|
|
|
|
|
|
|
|
g(bindings, #http_req{bindings=Ret}) -> Ret;
|
|
|
|
g(body_state, #http_req{body_state=Ret}) -> Ret;
|
|
|
|
g(buffer, #http_req{buffer=Ret}) -> Ret;
|
|
|
|
g(connection, #http_req{connection=Ret}) -> Ret;
|
|
|
|
g(headers, #http_req{headers=Ret}) -> Ret;
|
|
|
|
g(host, #http_req{host=Ret}) -> Ret;
|
|
|
|
g(host_info, #http_req{host_info=Ret}) -> Ret;
|
|
|
|
g(meta, #http_req{meta=Ret}) -> Ret;
|
|
|
|
g(method, #http_req{method=Ret}) -> Ret;
|
|
|
|
g(multipart, #http_req{multipart=Ret}) -> Ret;
|
|
|
|
g(onresponse, #http_req{onresponse=Ret}) -> Ret;
|
|
|
|
g(path, #http_req{path=Ret}) -> Ret;
|
|
|
|
g(path_info, #http_req{path_info=Ret}) -> Ret;
|
|
|
|
g(peer, #http_req{peer=Ret}) -> Ret;
|
|
|
|
g(pid, #http_req{pid=Ret}) -> Ret;
|
|
|
|
g(port, #http_req{port=Ret}) -> Ret;
|
|
|
|
g(qs, #http_req{qs=Ret}) -> Ret;
|
|
|
|
g(resp_body, #http_req{resp_body=Ret}) -> Ret;
|
2013-04-15 18:36:33 +02:00
|
|
|
g(resp_compress, #http_req{resp_compress=Ret}) -> Ret;
|
2012-09-29 11:08:59 +02:00
|
|
|
g(resp_headers, #http_req{resp_headers=Ret}) -> Ret;
|
|
|
|
g(resp_state, #http_req{resp_state=Ret}) -> Ret;
|
|
|
|
g(socket, #http_req{socket=Ret}) -> Ret;
|
|
|
|
g(transport, #http_req{transport=Ret}) -> Ret;
|
|
|
|
g(version, #http_req{version=Ret}) -> Ret.
|
|
|
|
|
|
|
|
-spec set([{atom(), any()}], Req) -> Req when Req::req().
|
|
|
|
set([], Req) -> Req;
|
|
|
|
set([{bindings, Val}|Tail], Req) -> set(Tail, Req#http_req{bindings=Val});
|
|
|
|
set([{body_state, Val}|Tail], Req) -> set(Tail, Req#http_req{body_state=Val});
|
|
|
|
set([{buffer, Val}|Tail], Req) -> set(Tail, Req#http_req{buffer=Val});
|
|
|
|
set([{connection, Val}|Tail], Req) -> set(Tail, Req#http_req{connection=Val});
|
|
|
|
set([{headers, Val}|Tail], Req) -> set(Tail, Req#http_req{headers=Val});
|
|
|
|
set([{host, Val}|Tail], Req) -> set(Tail, Req#http_req{host=Val});
|
|
|
|
set([{host_info, Val}|Tail], Req) -> set(Tail, Req#http_req{host_info=Val});
|
|
|
|
set([{meta, Val}|Tail], Req) -> set(Tail, Req#http_req{meta=Val});
|
|
|
|
set([{method, Val}|Tail], Req) -> set(Tail, Req#http_req{method=Val});
|
|
|
|
set([{multipart, Val}|Tail], Req) -> set(Tail, Req#http_req{multipart=Val});
|
|
|
|
set([{onresponse, Val}|Tail], Req) -> set(Tail, Req#http_req{onresponse=Val});
|
|
|
|
set([{path, Val}|Tail], Req) -> set(Tail, Req#http_req{path=Val});
|
|
|
|
set([{path_info, Val}|Tail], Req) -> set(Tail, Req#http_req{path_info=Val});
|
|
|
|
set([{peer, Val}|Tail], Req) -> set(Tail, Req#http_req{peer=Val});
|
|
|
|
set([{pid, Val}|Tail], Req) -> set(Tail, Req#http_req{pid=Val});
|
|
|
|
set([{port, Val}|Tail], Req) -> set(Tail, Req#http_req{port=Val});
|
|
|
|
set([{qs, Val}|Tail], Req) -> set(Tail, Req#http_req{qs=Val});
|
|
|
|
set([{resp_body, Val}|Tail], Req) -> set(Tail, Req#http_req{resp_body=Val});
|
|
|
|
set([{resp_headers, Val}|Tail], Req) -> set(Tail, Req#http_req{resp_headers=Val});
|
|
|
|
set([{resp_state, Val}|Tail], Req) -> set(Tail, Req#http_req{resp_state=Val});
|
|
|
|
set([{socket, Val}|Tail], Req) -> set(Tail, Req#http_req{socket=Val});
|
|
|
|
set([{transport, Val}|Tail], Req) -> set(Tail, Req#http_req{transport=Val});
|
|
|
|
set([{version, Val}|Tail], Req) -> set(Tail, Req#http_req{version=Val}).
|
|
|
|
|
2013-01-22 18:25:16 +01:00
|
|
|
-spec set_bindings(cowboy_router:tokens(), cowboy_router:tokens(),
|
|
|
|
cowboy_router:bindings(), Req) -> Req when Req::req().
|
2012-09-17 00:39:29 +02:00
|
|
|
set_bindings(HostInfo, PathInfo, Bindings, Req) ->
|
|
|
|
Req#http_req{host_info=HostInfo, path_info=PathInfo,
|
|
|
|
bindings=Bindings}.
|
|
|
|
|
2012-09-16 13:57:27 +02:00
|
|
|
-spec lock(Req) -> Req when Req::req().
|
|
|
|
lock(Req) ->
|
|
|
|
Req#http_req{resp_state=locked}.
|
|
|
|
|
2012-09-15 20:33:57 +02:00
|
|
|
-spec to_list(req()) -> [{atom(), any()}].
|
|
|
|
to_list(Req) ->
|
|
|
|
lists:zip(record_info(fields, http_req), tl(tuple_to_list(Req))).
|
|
|
|
|
2011-03-07 22:59:22 +01:00
|
|
|
%% Internal.
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec chunked_response(cowboy:http_status(), cowboy:http_headers(), Req) ->
|
2013-02-18 21:20:36 +00:00
|
|
|
{normal | hook, Req} when Req::req().
|
2013-05-30 20:21:01 +02:00
|
|
|
chunked_response(Status, Headers, Req=#http_req{
|
|
|
|
transport=cowboy_spdy, resp_state=waiting,
|
|
|
|
resp_headers=RespHeaders}) ->
|
|
|
|
{RespType, Req2} = response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
|
|
|
], stream, Req),
|
|
|
|
{RespType, Req2#http_req{resp_state=chunks,
|
|
|
|
resp_headers=[], resp_body= <<>>}};
|
2013-02-18 21:20:36 +00:00
|
|
|
chunked_response(Status, Headers, Req=#http_req{
|
|
|
|
version=Version, connection=Connection,
|
2013-06-26 18:04:20 +01:00
|
|
|
resp_state=RespState, resp_headers=RespHeaders})
|
|
|
|
when RespState =:= waiting; RespState =:= waiting_stream ->
|
2013-02-18 21:20:36 +00:00
|
|
|
RespConn = response_connection(Headers, Connection),
|
2013-06-26 18:04:20 +01:00
|
|
|
HTTP11Headers = if
|
|
|
|
Version =:= 'HTTP/1.0' -> [];
|
|
|
|
true ->
|
|
|
|
MaybeTE = if
|
|
|
|
RespState =:= waiting_stream -> [];
|
|
|
|
true -> [{<<"transfer-encoding">>, <<"chunked">>}]
|
|
|
|
end,
|
|
|
|
[{<<"connection">>, atom_to_connection(Connection)}|MaybeTE]
|
|
|
|
end,
|
|
|
|
RespState2 = if
|
|
|
|
Version =:= 'HTTP/1.1', RespState =:= 'waiting' -> chunks;
|
|
|
|
true -> stream
|
2013-02-18 21:20:36 +00:00
|
|
|
end,
|
|
|
|
{RespType, Req2} = response(Status, Headers, RespHeaders, [
|
|
|
|
{<<"date">>, cowboy_clock:rfc1123()},
|
|
|
|
{<<"server">>, <<"Cowboy">>}
|
|
|
|
|HTTP11Headers], <<>>, Req),
|
2013-06-26 18:04:20 +01:00
|
|
|
{RespType, Req2#http_req{connection=RespConn, resp_state=RespState2,
|
2013-02-18 21:20:36 +00:00
|
|
|
resp_headers=[], resp_body= <<>>}}.
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec response(cowboy:http_status(), cowboy:http_headers(),
|
2013-05-30 20:21:01 +02:00
|
|
|
cowboy:http_headers(), cowboy:http_headers(), stream | iodata(), Req)
|
2012-08-27 14:27:41 +02:00
|
|
|
-> {normal | hook, Req} when Req::req().
|
2012-09-26 10:08:43 +02:00
|
|
|
response(Status, Headers, RespHeaders, DefaultHeaders, Body, Req=#http_req{
|
2012-05-01 01:59:36 +02:00
|
|
|
socket=Socket, transport=Transport, version=Version,
|
|
|
|
pid=ReqPid, onresponse=OnResponse}) ->
|
2013-01-20 18:57:42 +01:00
|
|
|
FullHeaders = case OnResponse of
|
|
|
|
already_called -> Headers;
|
|
|
|
_ -> response_merge_headers(Headers, RespHeaders, DefaultHeaders)
|
|
|
|
end,
|
2013-05-30 20:21:01 +02:00
|
|
|
Body2 = case Body of stream -> <<>>; _ -> Body end,
|
2014-06-03 18:31:05 +02:00
|
|
|
{Status2, FullHeaders2, Req2} = case OnResponse of
|
|
|
|
already_called -> {Status, FullHeaders, Req};
|
|
|
|
undefined -> {Status, FullHeaders, Req};
|
2013-05-30 20:21:01 +02:00
|
|
|
OnResponse ->
|
2014-06-03 18:31:05 +02:00
|
|
|
case OnResponse(Status, FullHeaders, Body2,
|
|
|
|
%% Don't call 'onresponse' from the hook itself.
|
|
|
|
Req#http_req{resp_headers=[], resp_body= <<>>,
|
|
|
|
onresponse=already_called}) of
|
|
|
|
StHdReq = {_, _, _} ->
|
|
|
|
StHdReq;
|
|
|
|
Req1 ->
|
|
|
|
{Status, FullHeaders, Req1}
|
|
|
|
end
|
2012-05-01 01:59:36 +02:00
|
|
|
end,
|
|
|
|
ReplyType = case Req2#http_req.resp_state of
|
2013-05-30 20:21:01 +02:00
|
|
|
waiting when Transport =:= cowboy_spdy, Body =:= stream ->
|
2014-06-03 18:31:05 +02:00
|
|
|
cowboy_spdy:stream_reply(Socket, status(Status2), FullHeaders2),
|
2013-05-30 20:21:01 +02:00
|
|
|
ReqPid ! {?MODULE, resp_sent},
|
|
|
|
normal;
|
|
|
|
waiting when Transport =:= cowboy_spdy ->
|
2014-06-03 18:31:05 +02:00
|
|
|
cowboy_spdy:reply(Socket, status(Status2), FullHeaders2, Body),
|
2013-05-30 20:21:01 +02:00
|
|
|
ReqPid ! {?MODULE, resp_sent},
|
|
|
|
normal;
|
2013-06-26 18:04:20 +01:00
|
|
|
RespState when RespState =:= waiting; RespState =:= waiting_stream ->
|
2013-05-16 12:56:01 +02:00
|
|
|
HTTPVer = atom_to_binary(Version, latin1),
|
2012-05-01 01:59:36 +02:00
|
|
|
StatusLine = << HTTPVer/binary, " ",
|
2014-06-03 18:31:05 +02:00
|
|
|
(status(Status2))/binary, "\r\n" >>,
|
2012-05-01 01:59:36 +02:00
|
|
|
HeaderLines = [[Key, <<": ">>, Value, <<"\r\n">>]
|
2014-06-03 18:31:05 +02:00
|
|
|
|| {Key, Value} <- FullHeaders2],
|
2014-09-23 16:43:29 +03:00
|
|
|
ok = Transport:send(Socket, [StatusLine, HeaderLines, <<"\r\n">>, Body2]),
|
2012-05-01 01:59:36 +02:00
|
|
|
ReqPid ! {?MODULE, resp_sent},
|
|
|
|
normal;
|
|
|
|
_ ->
|
|
|
|
hook
|
|
|
|
end,
|
|
|
|
{ReplyType, Req2}.
|
2012-04-30 22:49:36 +02:00
|
|
|
|
2013-05-16 16:16:32 +02:00
|
|
|
-spec response_connection(cowboy:http_headers(), keepalive | close)
|
2011-10-06 12:40:04 +02:00
|
|
|
-> keepalive | close.
|
|
|
|
response_connection([], Connection) ->
|
|
|
|
Connection;
|
|
|
|
response_connection([{Name, Value}|Tail], Connection) ->
|
|
|
|
case Name of
|
2012-09-29 15:06:33 +02:00
|
|
|
<<"connection">> ->
|
2014-03-24 14:09:36 +01:00
|
|
|
Tokens = cow_http_hd:parse_connection(Value),
|
2012-09-29 15:06:33 +02:00
|
|
|
connection_to_atom(Tokens);
|
|
|
|
_ ->
|
|
|
|
response_connection(Tail, Connection)
|
2011-10-06 12:40:04 +02:00
|
|
|
end.
|
|
|
|
|
2013-05-16 16:16:32 +02:00
|
|
|
-spec response_merge_headers(cowboy:http_headers(), cowboy:http_headers(),
|
|
|
|
cowboy:http_headers()) -> cowboy:http_headers().
|
2012-04-30 22:49:36 +02:00
|
|
|
response_merge_headers(Headers, RespHeaders, DefaultHeaders) ->
|
2012-09-21 09:18:56 +02:00
|
|
|
Headers2 = [{Key, Value} || {Key, Value} <- Headers],
|
2012-04-30 22:49:36 +02:00
|
|
|
merge_headers(
|
2011-11-28 09:09:41 +01:00
|
|
|
merge_headers(Headers2, RespHeaders),
|
2012-04-30 22:49:36 +02:00
|
|
|
DefaultHeaders).
|
2011-11-28 09:09:41 +01:00
|
|
|
|
2013-05-16 16:16:32 +02:00
|
|
|
-spec merge_headers(cowboy:http_headers(), cowboy:http_headers())
|
|
|
|
-> cowboy:http_headers().
|
2012-08-12 17:11:23 -03:00
|
|
|
|
|
|
|
%% Merge headers by prepending the tuples in the second list to the
|
|
|
|
%% first list. It also handles Set-Cookie properly, which supports
|
|
|
|
%% duplicated entries. Notice that, while the RFC2109 does allow more
|
|
|
|
%% than one cookie to be set per Set-Cookie header, we are following
|
|
|
|
%% the implementation of common web servers and applications which
|
|
|
|
%% return many distinct headers per each Set-Cookie entry to avoid
|
|
|
|
%% issues with clients/browser which may not support it.
|
2011-11-28 09:09:41 +01:00
|
|
|
merge_headers(Headers, []) ->
|
|
|
|
Headers;
|
2012-08-12 17:11:23 -03:00
|
|
|
merge_headers(Headers, [{<<"set-cookie">>, Value}|Tail]) ->
|
2013-06-26 18:04:20 +01:00
|
|
|
merge_headers([{<<"set-cookie">>, Value}|Headers], Tail);
|
2011-11-28 09:09:41 +01:00
|
|
|
merge_headers(Headers, [{Name, Value}|Tail]) ->
|
|
|
|
Headers2 = case lists:keymember(Name, 1, Headers) of
|
|
|
|
true -> Headers;
|
2012-09-24 04:47:25 +02:00
|
|
|
false -> [{Name, Value}|Headers]
|
2011-11-28 09:09:41 +01:00
|
|
|
end,
|
|
|
|
merge_headers(Headers2, Tail).
|
2011-05-08 14:40:58 +02:00
|
|
|
|
2011-05-25 23:02:40 +02:00
|
|
|
-spec atom_to_connection(keepalive) -> <<_:80>>;
|
|
|
|
(close) -> <<_:40>>.
|
2011-03-18 22:38:26 +01:00
|
|
|
atom_to_connection(keepalive) ->
|
2011-05-05 14:03:39 +02:00
|
|
|
<<"keep-alive">>;
|
2011-03-18 22:38:26 +01:00
|
|
|
atom_to_connection(close) ->
|
2011-05-05 14:03:39 +02:00
|
|
|
<<"close">>.
|
|
|
|
|
2012-09-29 13:16:48 +02:00
|
|
|
%% We don't match on "keep-alive" since it is the default value.
|
2012-09-27 01:31:19 +02:00
|
|
|
-spec connection_to_atom([binary()]) -> keepalive | close.
|
|
|
|
connection_to_atom([]) ->
|
|
|
|
keepalive;
|
|
|
|
connection_to_atom([<<"close">>|_]) ->
|
|
|
|
close;
|
|
|
|
connection_to_atom([_|Tail]) ->
|
|
|
|
connection_to_atom(Tail).
|
|
|
|
|
2013-05-16 16:29:24 +02:00
|
|
|
-spec status(cowboy:http_status()) -> binary().
|
2011-05-05 14:03:39 +02:00
|
|
|
status(100) -> <<"100 Continue">>;
|
|
|
|
status(101) -> <<"101 Switching Protocols">>;
|
|
|
|
status(102) -> <<"102 Processing">>;
|
|
|
|
status(200) -> <<"200 OK">>;
|
|
|
|
status(201) -> <<"201 Created">>;
|
|
|
|
status(202) -> <<"202 Accepted">>;
|
|
|
|
status(203) -> <<"203 Non-Authoritative Information">>;
|
|
|
|
status(204) -> <<"204 No Content">>;
|
|
|
|
status(205) -> <<"205 Reset Content">>;
|
|
|
|
status(206) -> <<"206 Partial Content">>;
|
|
|
|
status(207) -> <<"207 Multi-Status">>;
|
|
|
|
status(226) -> <<"226 IM Used">>;
|
|
|
|
status(300) -> <<"300 Multiple Choices">>;
|
|
|
|
status(301) -> <<"301 Moved Permanently">>;
|
|
|
|
status(302) -> <<"302 Found">>;
|
|
|
|
status(303) -> <<"303 See Other">>;
|
|
|
|
status(304) -> <<"304 Not Modified">>;
|
|
|
|
status(305) -> <<"305 Use Proxy">>;
|
|
|
|
status(306) -> <<"306 Switch Proxy">>;
|
|
|
|
status(307) -> <<"307 Temporary Redirect">>;
|
|
|
|
status(400) -> <<"400 Bad Request">>;
|
|
|
|
status(401) -> <<"401 Unauthorized">>;
|
|
|
|
status(402) -> <<"402 Payment Required">>;
|
|
|
|
status(403) -> <<"403 Forbidden">>;
|
|
|
|
status(404) -> <<"404 Not Found">>;
|
|
|
|
status(405) -> <<"405 Method Not Allowed">>;
|
|
|
|
status(406) -> <<"406 Not Acceptable">>;
|
|
|
|
status(407) -> <<"407 Proxy Authentication Required">>;
|
|
|
|
status(408) -> <<"408 Request Timeout">>;
|
|
|
|
status(409) -> <<"409 Conflict">>;
|
|
|
|
status(410) -> <<"410 Gone">>;
|
|
|
|
status(411) -> <<"411 Length Required">>;
|
|
|
|
status(412) -> <<"412 Precondition Failed">>;
|
|
|
|
status(413) -> <<"413 Request Entity Too Large">>;
|
|
|
|
status(414) -> <<"414 Request-URI Too Long">>;
|
|
|
|
status(415) -> <<"415 Unsupported Media Type">>;
|
|
|
|
status(416) -> <<"416 Requested Range Not Satisfiable">>;
|
|
|
|
status(417) -> <<"417 Expectation Failed">>;
|
|
|
|
status(418) -> <<"418 I'm a teapot">>;
|
|
|
|
status(422) -> <<"422 Unprocessable Entity">>;
|
|
|
|
status(423) -> <<"423 Locked">>;
|
|
|
|
status(424) -> <<"424 Failed Dependency">>;
|
|
|
|
status(425) -> <<"425 Unordered Collection">>;
|
|
|
|
status(426) -> <<"426 Upgrade Required">>;
|
2012-05-06 13:11:21 +02:00
|
|
|
status(428) -> <<"428 Precondition Required">>;
|
|
|
|
status(429) -> <<"429 Too Many Requests">>;
|
|
|
|
status(431) -> <<"431 Request Header Fields Too Large">>;
|
2011-05-05 14:03:39 +02:00
|
|
|
status(500) -> <<"500 Internal Server Error">>;
|
|
|
|
status(501) -> <<"501 Not Implemented">>;
|
|
|
|
status(502) -> <<"502 Bad Gateway">>;
|
|
|
|
status(503) -> <<"503 Service Unavailable">>;
|
|
|
|
status(504) -> <<"504 Gateway Timeout">>;
|
|
|
|
status(505) -> <<"505 HTTP Version Not Supported">>;
|
|
|
|
status(506) -> <<"506 Variant Also Negotiates">>;
|
|
|
|
status(507) -> <<"507 Insufficient Storage">>;
|
|
|
|
status(510) -> <<"510 Not Extended">>;
|
2012-05-06 13:11:21 +02:00
|
|
|
status(511) -> <<"511 Network Authentication Required">>;
|
2011-05-05 14:03:39 +02:00
|
|
|
status(B) when is_binary(B) -> B.
|
2011-03-18 22:38:26 +01:00
|
|
|
|
2014-09-23 16:43:29 +03:00
|
|
|
%% Create map, convert keys to atoms and group duplicate keys into lists.
|
|
|
|
%% Keys that are not found in the user provided list are entirely skipped.
|
|
|
|
%% @todo Can probably be done directly while parsing.
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Fields, KvList) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Keys = [case K of
|
|
|
|
{Key, _} -> Key;
|
|
|
|
{Key, _, _} -> Key;
|
|
|
|
Key -> Key
|
|
|
|
end || K <- Fields],
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, KvList, #{}).
|
2014-09-23 16:43:29 +03:00
|
|
|
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(_, [], Map) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Map;
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, [{Key, Value}|Tail], Map) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
try binary_to_existing_atom(Key, utf8) of
|
|
|
|
Atom ->
|
|
|
|
case lists:member(Atom, Keys) of
|
|
|
|
true ->
|
|
|
|
case maps:find(Atom, Map) of
|
|
|
|
{ok, MapValue} when is_list(MapValue) ->
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, Tail,
|
2014-09-23 16:43:29 +03:00
|
|
|
maps:put(Atom, [Value|MapValue], Map));
|
|
|
|
{ok, MapValue} ->
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, Tail,
|
2014-09-23 16:43:29 +03:00
|
|
|
maps:put(Atom, [Value, MapValue], Map));
|
|
|
|
error ->
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, Tail,
|
2014-09-23 16:43:29 +03:00
|
|
|
maps:put(Atom, Value, Map))
|
|
|
|
end;
|
|
|
|
false ->
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, Tail, Map)
|
2014-09-23 16:43:29 +03:00
|
|
|
end
|
|
|
|
catch error:badarg ->
|
2014-10-04 13:21:16 +03:00
|
|
|
kvlist_to_map(Keys, Tail, Map)
|
2014-09-23 16:43:29 +03:00
|
|
|
end.
|
|
|
|
|
|
|
|
%% Loop through fields, if value is missing and no default, crash;
|
|
|
|
%% else if value is missing and has a default, set default;
|
|
|
|
%% otherwise apply constraints. If constraint fails, crash.
|
2014-10-04 13:21:16 +03:00
|
|
|
filter([], Map) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
Map;
|
2014-10-04 13:21:16 +03:00
|
|
|
filter([{Key, Constraints}|Tail], Map) ->
|
|
|
|
filter_constraints(Tail, Map, Key, maps:get(Key, Map), Constraints);
|
|
|
|
filter([{Key, Constraints, Default}|Tail], Map) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
case maps:find(Key, Map) of
|
|
|
|
{ok, Value} ->
|
2014-10-04 13:21:16 +03:00
|
|
|
filter_constraints(Tail, Map, Key, Value, Constraints);
|
2014-09-23 16:43:29 +03:00
|
|
|
error ->
|
2014-10-04 13:21:16 +03:00
|
|
|
filter(Tail, maps:put(Key, Default, Map))
|
2014-09-23 16:43:29 +03:00
|
|
|
end;
|
2014-10-04 13:21:16 +03:00
|
|
|
filter([Key|Tail], Map) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
true = maps:is_key(Key, Map),
|
2014-10-04 13:21:16 +03:00
|
|
|
filter(Tail, Map).
|
2014-09-23 16:43:29 +03:00
|
|
|
|
2014-10-04 13:21:16 +03:00
|
|
|
filter_constraints(Tail, Map, Key, Value, Constraints) ->
|
2014-09-23 16:43:29 +03:00
|
|
|
case cowboy_constraints:validate(Value, Constraints) of
|
|
|
|
true ->
|
2014-10-04 13:21:16 +03:00
|
|
|
filter(Tail, Map);
|
2014-09-23 16:43:29 +03:00
|
|
|
{true, Value2} ->
|
2014-10-04 13:21:16 +03:00
|
|
|
filter(Tail, maps:put(Key, Value2, Map))
|
2014-09-23 16:43:29 +03:00
|
|
|
end.
|
|
|
|
|
2012-09-15 22:03:00 +02:00
|
|
|
%% Tests.
|
|
|
|
|
|
|
|
-ifdef(TEST).
|
|
|
|
url_test() ->
|
2014-09-23 16:43:29 +03:00
|
|
|
undefined =
|
2012-11-27 13:22:38 -06:00
|
|
|
url(#http_req{transport=ranch_tcp, host= <<>>, port= undefined,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<>>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"http://localhost/path">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_tcp, host= <<"localhost">>, port=80,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"http://localhost:443/path">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_tcp, host= <<"localhost">>, port=443,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"http://localhost:8080/path">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_tcp, host= <<"localhost">>, port=8080,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"http://localhost:8080/path?dummy=2785">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_tcp, host= <<"localhost">>, port=8080,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<"dummy=2785">>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"https://localhost/path">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_ssl, host= <<"localhost">>, port=443,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"https://localhost:8443/path">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_ssl, host= <<"localhost">>, port=8443,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<>>, pid=self()}),
|
2014-09-23 16:43:29 +03:00
|
|
|
<<"https://localhost:8443/path?dummy=2785">> =
|
2012-09-15 22:03:00 +02:00
|
|
|
url(#http_req{transport=ranch_ssl, host= <<"localhost">>, port=8443,
|
2013-05-15 15:17:33 +02:00
|
|
|
path= <<"/path">>, qs= <<"dummy=2785">>, pid=self()}),
|
2012-09-15 22:03:00 +02:00
|
|
|
ok.
|
|
|
|
|
2012-09-27 01:31:19 +02:00
|
|
|
connection_to_atom_test_() ->
|
|
|
|
Tests = [
|
|
|
|
{[<<"close">>], close},
|
|
|
|
{[<<"keep-alive">>], keepalive},
|
|
|
|
{[<<"keep-alive">>, <<"upgrade">>], keepalive}
|
|
|
|
],
|
|
|
|
[{lists:flatten(io_lib:format("~p", [T])),
|
|
|
|
fun() -> R = connection_to_atom(T) end} || {T, R} <- Tests].
|
|
|
|
|
2013-04-18 18:14:40 +04:00
|
|
|
merge_headers_test_() ->
|
|
|
|
Tests = [
|
|
|
|
{[{<<"content-length">>,<<"13">>},{<<"server">>,<<"Cowboy">>}],
|
|
|
|
[{<<"set-cookie">>,<<"foo=bar">>},{<<"content-length">>,<<"11">>}],
|
|
|
|
[{<<"set-cookie">>,<<"foo=bar">>},
|
|
|
|
{<<"content-length">>,<<"13">>},
|
|
|
|
{<<"server">>,<<"Cowboy">>}]},
|
|
|
|
{[{<<"content-length">>,<<"13">>},{<<"server">>,<<"Cowboy">>}],
|
|
|
|
[{<<"set-cookie">>,<<"foo=bar">>},{<<"set-cookie">>,<<"bar=baz">>}],
|
|
|
|
[{<<"set-cookie">>,<<"bar=baz">>},
|
|
|
|
{<<"set-cookie">>,<<"foo=bar">>},
|
|
|
|
{<<"content-length">>,<<"13">>},
|
|
|
|
{<<"server">>,<<"Cowboy">>}]}
|
|
|
|
],
|
|
|
|
[fun() -> Res = merge_headers(L,R) end || {L, R, Res} <- Tests].
|
2012-09-15 22:03:00 +02:00
|
|
|
-endif.
|