major whitespace updates to get line lengths down to < 80 characters whenever possible (a few tests ignore this). srx/jsx_test.erl was not touched because it's shortly to be replaced

This commit is contained in:
alisdair sullivan 2010-08-25 23:17:10 -07:00
parent ac5e0e4fe3
commit a86dec09ef
6 changed files with 998 additions and 414 deletions

View file

@ -23,5 +23,9 @@
-define(is_utf_encoding(X),
X == utf8; X == utf16; X == utf32; X == {utf16, little}; X == {utf32, little}
X == utf8
; X == utf16
; X == utf32
; X == {utf16, little}
; X == {utf32, little}
).

File diff suppressed because it is too large Load diff

View file

@ -26,7 +26,6 @@
%% @version really, really beta
%% @doc this module defines the interface to the jsx json parsing library
-module(jsx).
@ -70,7 +69,11 @@
%% | {multi_term, true | false}
%% | {encoding, auto | supported_utf()}.
%% @type supported_utf() = utf8 | utf16 | {utf16, little} | utf32 | {utf32, little}.
%% @type supported_utf() = utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}.
%% @type eep0018() = eep0018_object() | eep0018_array().
@ -79,7 +82,13 @@
%% @type eep0018_key() = binary() | atom().
%% @type eep0018_term() = eep0018_array() | eep0018_object() | eep0018_string() | eep0018_number() | true | false | null.
%% @type eep0018_term() = eep0018_array()
%% | eep0018_object()
%% | eep0018_string()
%% | eep0018_number()
%% | true
%% | false
%% | null.
%% @type eep0018_string() = binary().
@ -127,7 +136,8 @@ parser() ->
%% @spec parser(Opts::jsx_opts()) -> jsx_parser()
%% @doc
%% produces a function which takes a binary which may or may not represent an encoded json document and returns a generator
%% produces a function which takes a binary which may or may not represent an
%% encoded json document and returns a generator
%%
%% options:
%% <ul>
@ -137,17 +147,23 @@ parser() ->
%% false</p></li>
%%
%% <li>{encoded_unicode, ascii | codepoint | none}
%% <p>if a \uXXXX escape sequence is encountered within a key or string,
%% this option controls how it is interpreted. none makes no attempt
%% to interpret the value, leaving it unconverted. ascii will convert
%% any value that falls within the ascii range. codepoint will convert
%% any value that is a valid unicode codepoint. note that unicode
%% non-characters (including badly formed surrogates) will never be
%% converted. codepoint is the default</p></li>
%% <p>if a \uXXXX escape sequence is encountered within a key or
%% string, this option controls how it is interpreted. none makes no
%% attempt to interpret the value, leaving it unconverted. ascii will
%% convert any value that falls within the ascii range. codepoint will
%% convert any value that is a valid unicode codepoint. note that
%% unicode non-characters (including badly formed surrogates) will
%% never be converted. codepoint is the default</p></li>
%%
%% <li>{encoding, auto | utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <p>attempt to parse the binary using the specified encoding. auto will
%% auto detect any supported encoding and is the default</p></li>
%% <li>{encoding, auto
%% | utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>attempt to parse the binary using the specified encoding. auto
%% will auto detect any supported encoding and is the default</p></li>
%%
%% <li>{multi_term, true | false}
%% <p>usually, documents will be parsed in full before the end_json
@ -183,25 +199,32 @@ json_to_term(JSON) ->
%% options:
%% <ul>
%% <li>{strict, true | false}
%% <p>by default, attempting to convert unwrapped json values (numbers, strings and
%% the atoms true, false and null) result in a badarg exception. if strict equals
%% false, these are instead decoded to their equivalent eep0018 value. default is
%% false</p>
%% <p>by default, attempting to convert unwrapped json values (numbers,
%% strings and the atoms true, false and null) result in a badarg
%% exception. if strict equals false, these are instead decoded to
%% their equivalent eep0018 value. default is false</p>
%%
%% <p>note that there is a problem of ambiguity when parsing unwrapped json
%% numbers that requires special handling</p>
%% <p>note that there is a problem of ambiguity when parsing unwrapped
%% json numbers that requires special handling</p>
%%
%% <p>an unwrapped json number has no unambiguous end marker like a json object,
%% array or string. `1', `12' and `123' may all represent either a complete json
%% number or just the beginning of one. in this case, the parser will always
%% return `{incomplete, More}' rather than potentially terminate before input
%% is exhausted. to force termination, `More/1' may be called with the atom
%% `end_stream' as it's argument. note also that numbers followed by whitespace
%% will be parsed correctly</p></li>
%% <p>an unwrapped json number has no unambiguous end marker like a
%% json object, array or string. `1', `12' and `123' may all represent
%% either a complete json number or just the beginning of one. in this
%% case, the parser will always return `{incomplete, More}' rather than
%% potentially terminate before input is exhausted. to force
%% termination, `More/1' may be called with the atom `end_stream' as
%% it's argument. note also that numbers followed by whitespace will be
%% parsed correctly</p></li>
%%
%% <li>{encoding, auto | utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <p>assume the binary is encoded using the specified binary. default is auto, which
%% attempts to autodetect the encoding</p></li>
%% <li>{encoding, auto
%% | utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>assume the binary is encoded using the specified binary. default
%% is auto, which attempts to autodetect the encoding</p></li>
%%
%% <li>{comments, true | false}
%% <p>if true, json documents that contain c style (/* ... */) comments
@ -230,29 +253,38 @@ term_to_json(JSON) ->
%% @spec term_to_json(JSON::eep0018(), Opts::encoder_opts()) -> binary()
%% @doc
%% takes the erlang representation of a json object (as defined in eep0018) and returns a (binary encoded) json string
%% takes the erlang representation of a json object (as defined in eep0018) and
%% returns a (binary encoded) json string
%%
%% options:
%% <ul>
%% <li>{strict, true | false}
%% <p>by default, attempting to convert unwrapped json values (numbers,
%% strings and the atoms true, false and null) result in a badarg exception.
%% if strict equals false, these are instead json encoded. default is false</p></li>
%% strings and the atoms true, false and null) result in a badarg
%% exception. if strict equals false, these are instead json encoded.
%% default is false</p></li>
%%
%% <li>{encoding, utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <li>{encoding, utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>the encoding of the resulting binary. default is utf8</p></li>
%%
%% <li>space
%% <p>space is equivalent to {space, 1}</p></li>
%%
%% <li>{space, N}
%% <p>place N spaces after each colon and comma in the resulting binary. default is zero</p></li>
%% <p>place N spaces after each colon and comma in the resulting
%% binary. default is zero</p></li>
%%
%% <li>indent
%% <p>indent is equivalent to {indent, 1}</p></li>
%%
%% <li>{indent, N}
%% <p>indent each 'level' of the json structure by N spaces. default is zero</p></li>
%% <p>indent each 'level' of the json structure by N spaces. default is
%% zero</p></li>
%% </ul>
%% @end
@ -273,13 +305,19 @@ is_json(JSON) ->
%% options:
%% <ul>
%% <li>{strict, true | false}
%% <p>by default, unwrapped json values (numbers, strings and the atoms
%% true, false and null) return false. if strict equals true, is_json
%% returns true. default is false</p></li>
%% <p>by default, unwrapped json values (numbers, strings and the
%% atoms true, false and null) return false. if strict equals true,
%% is_json returns true. default is false</p></li>
%%
%% <li>{encoding, auto | utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <p>assume the binary is encoded using the specified binary. default is auto,
%% which attempts to autodetect the encoding</p></li>
%% <li>{encoding, auto
%% | utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>assume the binary is encoded using the specified binary. default
%% is auto, which attempts to autodetect the encoding</p></li>
%%
%% <li>{comments, true | false}
%% <p>if true, json documents that contain c style (/* ... */) comments
@ -300,20 +338,32 @@ format(JSON) ->
%% @spec format(JSON::binary(), Opts::format_opts()) -> binary()
%% @doc
%% formats a binary encoded json string according to the options chose. the defaults will produced a string stripped of all whitespace
%% formats a binary encoded json string according to the options chose. the
%% defaults will produced a string stripped of all whitespace
%%
%% options:
%% <ul>
%% <li>{strict, true | false}
%% <p>by default, unwrapped json values (numbers, strings and the atoms
%% true, false and null) result in an error. if strict equals true, they
%% are treated as valid json. default is false</p></li>
%% <p>by default, unwrapped json values (numbers, strings and the
%% atoms true, false and null) result in an error. if strict equals
%% true, they are treated as valid json. default is false</p></li>
%%
%% <li>{encoding, auto | utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <p>assume the binary is encoded using the specified binary. default is auto,
%% which attempts to autodetect the encoding</p></li>
%% <li>{encoding, auto
%% | utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>assume the binary is encoded using the specified binary. default
%% is auto, which attempts to autodetect the encoding</p></li>
%%
%% <li>{output_encoding, utf8 | utf16 | {utf16, little} | utf32 | {utf32, little} }
%% <li>{encoding, utf8
%% | utf16
%% | {utf16, little}
%% | utf32
%% | {utf32, little}
%% }
%% <p>the encoding of the resulting binary. default is utf8</p></li>
%%
%% <li>{comments, true | false}
@ -325,13 +375,15 @@ format(JSON) ->
%% <p>space is equivalent to {space, 1}</p></li>
%%
%% <li>{space, N}
%% <p>place N spaces after each colon and comma in the resulting binary. default is zero</p></li>
%% <p>place N spaces after each colon and comma in the resulting
%% binary. default is zero</p></li>
%%
%% <li>indent
%% <p>indent is equivalent to {indent, 1}</p></li>
%%
%% <li>{indent, N}
%% <p>indent each 'level' of the json structure by N spaces. default is zero</p></li>
%% <p>indent each 'level' of the json structure by N spaces. default is
%% zero</p></li>
%% </ul>
%% @end
@ -340,9 +392,17 @@ format(JSON, Opts) ->
%% @spec eventify(List::list()) -> jsx_parser_result()
%% @doc fake the jsx api for any list. useful if you want to serialize a structure to json using the pretty printer, or verify a sequence could be valid json
%% @doc fake the jsx api for any list. useful if you want to serialize a
%% structure to json using the pretty printer, or verify a sequence could be
%% valid json
eventify([]) ->
fun() -> {incomplete, fun(List) when is_list(List) -> eventify(List); (_) -> erlang:error(badarg) end} end;
fun() ->
{incomplete, fun(List) when is_list(List) ->
eventify(List)
; (_) ->
erlang:error(badarg)
end}
end;
eventify([Next|Rest]) ->
fun() -> {event, Next, eventify(Rest)} end.
@ -352,43 +412,50 @@ eventify([Next|Rest]) ->
%% encoding detection
%% first check to see if there's a bom, if not, use the rfc4627 method for determining
%% encoding. this function makes some assumptions about the validity of the stream
%% which may delay failure later than if an encoding is explicitly provided
%% first check to see if there's a bom, if not, use the rfc4627 method for
%% determining encoding. this function makes some assumptions about the
%% validity of the stream which may delay failure later than if an encoding is
%% explicitly provided
detect_encoding(OptsList) ->
fun(Stream) -> detect_encoding(Stream, OptsList) end.
%% utf8 bom detection
detect_encoding(<<16#ef, 16#bb, 16#bf, Rest/binary>>, Opts) -> (jsx_utf8:parser(Opts))(Rest);
%% utf32-little bom detection (this has to come before utf16-little or it'll match that)
detect_encoding(<<16#ff, 16#fe, 0, 0, Rest/binary>>, Opts) -> (jsx_utf32le:parser(Opts))(Rest);
detect_encoding(<<16#ef, 16#bb, 16#bf, Rest/binary>>, Opts) ->
(jsx_utf8:parser(Opts))(Rest);
%% utf32-little bom detection (this has to come before utf16-little or it'll
%% match that)
detect_encoding(<<16#ff, 16#fe, 0, 0, Rest/binary>>, Opts) ->
(jsx_utf32le:parser(Opts))(Rest);
%% utf16-big bom detection
detect_encoding(<<16#fe, 16#ff, Rest/binary>>, Opts) -> (jsx_utf16:parser(Opts))(Rest);
detect_encoding(<<16#fe, 16#ff, Rest/binary>>, Opts) ->
(jsx_utf16:parser(Opts))(Rest);
%% utf16-little bom detection
detect_encoding(<<16#ff, 16#fe, Rest/binary>>, Opts) -> (jsx_utf16le:parser(Opts))(Rest);
detect_encoding(<<16#ff, 16#fe, Rest/binary>>, Opts) ->
(jsx_utf16le:parser(Opts))(Rest);
%% utf32-big bom detection
detect_encoding(<<0, 0, 16#fe, 16#ff, Rest/binary>>, Opts) -> (jsx_utf32:parser(Opts))(Rest);
detect_encoding(<<0, 0, 16#fe, 16#ff, Rest/binary>>, Opts) ->
(jsx_utf32:parser(Opts))(Rest);
%% utf32-little null order detection
detect_encoding(<<X, 0, 0, 0, _Rest/binary>> = JSON, Opts) when X =/= 0 ->
(jsx_utf32le:parser(Opts))(JSON);
%% utf16-big null order detection
detect_encoding(<<0, X, 0, Y, _Rest/binary>> = JSON, Opts) when X =/= 0, Y =/= 0 ->
(jsx_utf16:parser(Opts))(JSON);
%% utf16-little null order detection
detect_encoding(<<X, 0, Y, 0, _Rest/binary>> = JSON, Opts) when X =/= 0, Y =/= 0 ->
(jsx_utf16le:parser(Opts))(JSON);
%% utf32-big null order detection
detect_encoding(<<0, 0, 0, X, _Rest/binary>> = JSON, Opts) when X =/= 0 ->
(jsx_utf32:parser(Opts))(JSON);
%% utf16-little null order detection
detect_encoding(<<X, 0, _, 0, _Rest/binary>> = JSON, Opts) when X =/= 0 ->
(jsx_utf16le:parser(Opts))(JSON);
%% utf16-big null order detection
detect_encoding(<<0, X, 0, _, _Rest/binary>> = JSON, Opts) when X =/= 0 ->
(jsx_utf16:parser(Opts))(JSON);
%% utf8 null order detection
detect_encoding(<<X, Y, _Rest/binary>> = JSON, Opts) when X =/= 0, Y =/= 0 ->
(jsx_utf8:parser(Opts))(JSON);
%% a problem, to autodetect naked single digits' encoding, there is not enough data
%% to conclusively determine the encoding correctly. below is an attempt to solve
%% the problem
%% a problem, to autodetect naked single digits' encoding, there is not enough
%% data to conclusively determine the encoding correctly. below is an attempt
%% to solve the problem
detect_encoding(<<X>>, Opts) when X =/= 0 ->
{incomplete,
fun(end_stream) ->

View file

@ -33,7 +33,6 @@
-include("./include/jsx_common.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
@ -48,10 +47,10 @@ json_to_term(JSON, Opts) ->
end.
%% the jsx formatter (pretty printer) can do most of the heavy lifting in converting erlang
%% terms to json strings, but it expects a jsx event iterator. luckily, the mapping from
%% erlang terms to jsx events is straightforward and the iterator can be faked with an
%% anonymous function
%% the jsx formatter (pretty printer) can do most of the heavy lifting in
%% converting erlang terms to json strings, but it expects a jsx event
%% iterator. luckily, the mapping from erlang terms to jsx events is
%% straightforward and the iterator can be faked with an anonymous function
term_to_json(List, Opts) ->
case proplists:get_value(strict, Opts, true) of
true when is_list(List) -> continue
@ -59,7 +58,9 @@ term_to_json(List, Opts) ->
; false -> continue
end,
Encoding = proplists:get_value(encoding, Opts, utf8),
jsx:format(jsx:eventify(lists:reverse([end_json] ++ term_to_events(List))), [{output_encoding, Encoding}] ++ Opts).
jsx:format(jsx:eventify(lists:reverse([end_json] ++ term_to_events(List))),
[{output_encoding, Encoding}] ++ Opts
).
%% parse opts for the decoder
@ -67,7 +68,9 @@ opts_to_jsx_opts(Opts) ->
opts_to_jsx_opts(Opts, []).
opts_to_jsx_opts([{encoding, Val}|Rest], Acc) ->
case lists:member(Val, [auto, utf8, utf16, {utf16, little}, utf32, {utf32, little}]) of
case lists:member(Val,
[auto, utf8, utf16, {utf16, little}, utf32, {utf32, little}]
) of
true -> opts_to_jsx_opts(Rest, [{encoding, Val}] ++ Acc)
; false -> opts_to_jsx_opts(Rest, Acc)
end;
@ -85,22 +88,27 @@ opts_to_jsx_opts([], Acc) ->
%% ensure the first jsx event we get is start_object or start_array when running
%% in strict mode
collect_strict({event, Start, Next}, Acc, Opts) when Start =:= start_object; Start =:= start_array ->
collect_strict({event, Start, Next}, Acc, Opts)
when Start =:= start_object; Start =:= start_array ->
collect(Next(), [[]|Acc], Opts);
collect_strict(_, _, _) ->
erlang:error(badarg).
%% collect decoder events and convert to eep0018 format
collect({event, Start, Next}, Acc, Opts) when Start =:= start_object; Start =:= start_array ->
collect({event, Start, Next}, Acc, Opts)
when Start =:= start_object; Start =:= start_array ->
collect(Next(), [[]|Acc], Opts);
%% special case for empty object
collect({event, end_object, Next}, [[], Parent|Rest], Opts) when is_list(Parent) ->
collect({event, end_object, Next}, [[], Parent|Rest], Opts)
when is_list(Parent) ->
collect(Next(), [[[{}]] ++ Parent] ++ Rest, Opts);
%% reverse the array/object accumulator before prepending it to it's parent
collect({event, end_object, Next}, [Current, Parent|Rest], Opts) when is_list(Parent) ->
collect({event, end_object, Next}, [Current, Parent|Rest], Opts)
when is_list(Parent) ->
collect(Next(), [[lists:reverse(Current)] ++ Parent] ++ Rest, Opts);
collect({event, end_array, Next}, [Current, Parent|Rest], Opts) when is_list(Parent) ->
collect({event, end_array, Next}, [Current, Parent|Rest], Opts)
when is_list(Parent) ->
collect(Next(), [[lists:reverse(Current)] ++ Parent] ++ Rest, Opts);
%% special case for empty object
collect({event, end_object, Next}, [[], Key, Parent|Rest], Opts) ->
@ -110,24 +118,25 @@ collect({event, End, Next}, [Current, Key, Parent|Rest], Opts)
collect(Next(), [[{Key, lists:reverse(Current)}] ++ Parent] ++ Rest, Opts);
collect({event, end_json, _Next}, [[Acc]], _Opts) ->
Acc;
%% key can only be emitted inside of a json object, so just insert it directly into
%% the head of the accumulator and deal with it when we receive it's paired value
%% key can only be emitted inside of a json object, so just insert it directly
%% into the head of the accumulator and deal with it when we receive it's
%% paired value
collect({event, {key, _} = PreKey, Next}, [Current|_] = Acc, Opts) ->
Key = event(PreKey, Opts),
case decode_key_repeats(Key, Current) of
true -> erlang:error(badarg)
; false -> collect(Next(), [Key] ++ Acc, Opts)
end;
%% check acc to see if we're inside an object or an array. because inside an object
%% context the events that fall this far are always preceded by a key (which are
%% binaries or atoms), if Current is a list, we're inside an array, else, an
%% object
%% check acc to see if we're inside an object or an array. because inside an
%% object context the events that fall this far are always preceded by a key
%% (which are binaries or atoms), if Current is a list, we're inside an array,
%% else, an object
collect({event, Event, Next}, [Current|Rest], Opts) when is_list(Current) ->
collect(Next(), [[event(Event, Opts)] ++ Current] ++ Rest, Opts);
collect({event, Event, Next}, [Key, Current|Rest], Opts) ->
collect(Next(), [[{Key, event(Event, Opts)}] ++ Current] ++ Rest, Opts);
%% if our first returned event is {incomplete, ...} try to force end and return the
%% Event if one is returned
%% if our first returned event is {incomplete, ...} try to force end and return
%% the Event if one is returned
collect({incomplete, More}, [[]], Opts) ->
case More(end_stream) of
{event, Event, _Next} -> event(Event, Opts)
@ -170,7 +179,8 @@ decode_key_repeats(_Key, []) -> false.
%% convert eep0018 representation to jsx events. note special casing for the empty object
%% convert eep0018 representation to jsx events. note special casing for the
%% empty object
term_to_events([{}]) ->
[end_object, start_object];
term_to_events([First|_] = List) when is_tuple(First) ->
@ -203,7 +213,7 @@ list_to_events([], Acc) ->
term_to_event(List) when is_list(List) ->
term_to_events(List);
term_to_event(Float) when is_float(Float) ->
[{float, float_to_decimal(Float)}];
[{float, nice_decimal(Float)}];
term_to_event(Integer) when is_integer(Integer) ->
[{integer, erlang:integer_to_list(Integer)}];
term_to_event(String) when is_binary(String) ->
@ -222,24 +232,32 @@ key_to_event(Key) when is_binary(Key) ->
encode_key_repeats([Key], SoFar) -> encode_key_repeats(Key, SoFar, 0).
encode_key_repeats(Key, [Key|_], 0) -> true;
encode_key_repeats(Key, [end_object|Rest], Level) -> encode_key_repeats(Key, Rest, Level + 1);
encode_key_repeats(_, [start_object|_], 0) -> false;
encode_key_repeats(Key, [start_object|Rest], Level) -> encode_key_repeats(Key, Rest, Level - 1);
encode_key_repeats(Key, [_|Rest], Level) -> encode_key_repeats(Key, Rest, Level);
encode_key_repeats(_, [], 0) -> false.
encode_key_repeats(Key, [Key|_], 0) ->
true;
encode_key_repeats(Key, [end_object|Rest], Level) ->
encode_key_repeats(Key, Rest, Level + 1);
encode_key_repeats(_, [start_object|_], 0) ->
false;
encode_key_repeats(Key, [start_object|Rest], Level) ->
encode_key_repeats(Key, Rest, Level - 1);
encode_key_repeats(Key, [_|Rest], Level) ->
encode_key_repeats(Key, Rest, Level);
encode_key_repeats(_, [], 0) ->
false.
%% conversion of floats to 'nice' decimal output. erlang's float implementation is almost
%% but not quite ieee 754. it converts negative zero to plain zero silently, and throws
%% exceptions for any operations that would produce NaN or infinity. as far as I can
%% tell that is. trying to match against NaN or infinity binary patterns produces nomatch
%% exceptions, and arithmetic operations produce badarg exceptions. with that in mind, this
%% function makes no attempt to handle special values (except for zero)
%% conversion of floats to 'nice' decimal output. erlang's float implementation
%% is almost but not quite ieee 754. it converts negative zero to plain zero
%% silently, and throws exceptions for any operations that would produce NaN
%% or infinity. as far as I can tell that is. trying to match against NaN or
%% infinity binary patterns produces nomatch exceptions, and arithmetic
%% operations produce badarg exceptions. with that in mind, this function
%% makes no attempt to handle special values (except for zero)
%% algorithm from "Printing FLoating-Point Numbers Quickly and Accurately" by Burger & Dybvig
float_to_decimal(0.0) -> "0.0";
float_to_decimal(Num) when is_float(Num) ->
%% algorithm from "Printing FLoating-Point Numbers Quickly and Accurately" by
%% Burger & Dybvig
nice_decimal(0.0) -> "0.0";
nice_decimal(Num) when is_float(Num) ->
{F, E} = extract(<<Num:64/float>>),
{R, S, MP, MM} = initial_vals(F, E),
K = ceiling(math:log10(abs(Num)) - 1.0e-10),
@ -315,7 +333,8 @@ generate(RT, S, MP, MM, Round) ->
end.
%% this is not efficient at all and should be replaced with a lookup table probably
%% this is not efficient at all and should be replaced with a lookup table
%% probably
pow(_B, 0) -> 1;
pow(B, E) when E > 0 -> pow(B, E, 1).
@ -331,8 +350,10 @@ format(Dpoint, Digits) when Dpoint =< length(Digits), Dpoint > 0 ->
format(Dpoint, Digits) when Dpoint > 0 ->
Pad = Dpoint - length(Digits),
case Pad of
X when X > 6 -> format(Digits, 1, []) ++ "e" ++ integer_to_list(Dpoint - 1)
; _ -> format(Digits ++ [ 0 || _ <- lists:seq(1, Pad)], Dpoint, [])
X when X > 6 ->
format(Digits, 1, []) ++ "e" ++ integer_to_list(Dpoint - 1)
; _ ->
format(Digits ++ [ 0 || _ <- lists:seq(1, Pad)], Dpoint, [])
end;
format(Dpoint, Digits) when Dpoint < 0 ->
format(Digits, 1, []) ++ "e" ++ integer_to_list(Dpoint - 1).
@ -344,32 +365,41 @@ format([], ignore, Acc) ->
format(Digits, 0, Acc) ->
format(Digits, ignore, "." ++ Acc);
format([Digit|Digits], Dpoint, Acc) ->
format(Digits, case Dpoint of ignore -> ignore; X -> X - 1 end, to_ascii(Digit) ++ Acc).
format(Digits,
case Dpoint of ignore -> ignore; X -> X - 1 end, to_ascii(Digit) ++ Acc
).
to_ascii(X) -> [X + 48]. %% ascii "1" is [49], "2" is [50], etc...
%% json string escaping, for utf8 binaries. escape the json control sequences to their
%% json equivalent, escape other control characters to \uXXXX sequences, everything
%% else should be a legal json string component
%% json string escaping, for utf8 binaries. escape the json control sequences to
%% their json equivalent, escape other control characters to \uXXXX sequences,
%% everything else should be a legal json string component
json_escape(String) ->
json_escape(String, <<>>).
%% double quote
json_escape(<<$\", Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $\">>);
json_escape(<<$\", Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $\">>);
%% backslash \ reverse solidus
json_escape(<<$\\, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $\\>>);
json_escape(<<$\\, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $\\>>);
%% backspace
json_escape(<<$\b, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $b>>);
json_escape(<<$\b, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $b>>);
%% form feed
json_escape(<<$\f, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $f>>);
json_escape(<<$\f, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $f>>);
%% newline
json_escape(<<$\n, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $n>>);
json_escape(<<$\n, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $n>>);
%% cr
json_escape(<<$\r, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $r>>);
json_escape(<<$\r, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $r>>);
%% tab
json_escape(<<$\t, Rest/binary>>, Acc) -> json_escape(Rest, <<Acc/binary, $\\, $t>>);
json_escape(<<$\t, Rest/binary>>, Acc) ->
json_escape(Rest, <<Acc/binary, $\\, $t>>);
%% other control characters
json_escape(<<C/utf8, Rest/binary>>, Acc) when C >= 0, C < $\s ->
json_escape(Rest, <<Acc/binary, (json_escape_sequence(C))/binary>>);
@ -382,8 +412,8 @@ json_escape(_, _) ->
erlang:error(badarg).
%% convert a codepoint to it's \uXXXX equiv. for laziness, this only handles codepoints
%% this module might escape, ie, control characters
%% convert a codepoint to it's \uXXXX equiv. for laziness, this only handles
%% codepoints this module might escape, ie, control characters
json_escape_sequence(C) when C < 16#20 ->
<<_:8, A:4, B:4>> = <<C:16>>, % first two hex digits are always zero
<<$\\, $u, $0, $0, (to_hex(A)), (to_hex(B))>>.
@ -405,64 +435,194 @@ decode_test_() ->
[
{"empty object", ?_assert(json_to_term(<<"{}">>, []) =:= [{}])},
{"empty array", ?_assert(json_to_term(<<"[]">>, []) =:= [])},
{"simple object", ?_assert(json_to_term(<<"{\"a\": true, \"b\": true, \"c\": true}">>, [{label, atom}]) =:= [{a, true}, {b, true}, {c, true}])},
{"simple array", ?_assert(json_to_term(<<"[true,true,true]">>, []) =:= [true, true, true])},
{"nested structures", ?_assert(json_to_term(<<"{\"list\":[{\"list\":[{}, {}],\"object\":{}}, []],\"object\":{}}">>, [{label, atom}]) =:= [{list, [[{list, [[{}], [{}]]}, {object, [{}]}],[]]}, {object, [{}]}])},
{"numbers", ?_assert(json_to_term(<<"[-10000000000.0, -1, 0.0, 0, 1, 10000000000, 1000000000.0]">>, []) =:= [-10000000000.0, -1, 0.0, 0, 1, 10000000000, 1000000000.0])},
{"numbers (all floats)", ?_assert(json_to_term(<<"[-10000000000.0, -1, 0.0, 0, 1, 10000000000, 1000000000.0]">>, [{float, true}]) =:= [-10000000000.0, -1.0, 0.0, 0.0, 1.0, 10000000000.0, 1000000000.0])},
{"strings", ?_assert(json_to_term(<<"[\"a string\"]">>, []) =:= [<<"a string">>])},
{"literals", ?_assert(json_to_term(<<"[true,false,null]">>, []) =:= [true,false,null])},
{"naked true", ?_assert(json_to_term(<<"true">>, [{strict, false}]) =:= true)},
{"naked short number", ?_assert(json_to_term(<<"1">>, [{strict, false}]) =:= 1)},
{"simple object",
?_assert(json_to_term(
<<"{\"a\": true, \"b\": true, \"c\": true}">>,
[{label, atom}]
) =:= [{a, true}, {b, true}, {c, true}]
)
},
{"simple array",
?_assert(json_to_term(<<"[true,true,true]">>,
[]
) =:= [true, true, true]
)
},
{"nested structures",
?_assert(json_to_term(
<<"{\"x\":[{\"x\":[{}, {}],\"y\":{}}, []],\"y\":{}}">>,
[{label, atom}]
) =:= [{x, [[{x, [[{}], [{}]]}, {y, [{}]}],[]]}, {y, [{}]}]
)
},
{"numbers",
?_assert(json_to_term(
<<"[-100000000.0, -1, 0.0, 0, 1, 100000000, 10000000.0]">>,
[]
) =:= [-100000000.0, -1, 0.0, 0, 1, 100000000, 10000000.0]
)
},
{"numbers (all floats)",
?_assert(json_to_term(
<<"[-100000000.0, -1, 0.0, 0, 1, 1000, 10000000.0]">>,
[{float, true}]
) =:= [-100000000.0, -1.0, 0.0, 0.0, 1.0, 1000.0, 10000000.0]
)
},
{"strings",
?_assert(json_to_term(<<"[\"a string\"]">>,
[]
) =:= [<<"a string">>])
},
{"literals",
?_assert(json_to_term(<<"[true,false,null]">>,
[]
) =:= [true,false,null]
)
},
{"naked true",
?_assert(json_to_term(<<"true">>, [{strict, false}]) =:= true)
},
{"naked short number",
?_assert(json_to_term(<<"1">>, [{strict, false}]) =:= 1)
},
{"float", ?_assert(json_to_term(<<"1.0">>, [{strict, false}]) =:= 1.0)},
{"naked string", ?_assert(json_to_term(<<"\"hello world\"">>, [{strict, false}]) =:= <<"hello world">>)},
{"comments", ?_assert(json_to_term(<<"[ /* a comment in an empty array */ ]">>, [{comments, true}]) =:= [])}
{"naked string",
?_assert(json_to_term(<<"\"hello world\"">>,
[{strict, false}]
) =:= <<"hello world">>
)
},
{"comments",
?_assert(json_to_term(<<"[ /* a comment in an empty array */ ]">>,
[{comments, true}]
) =:= []
)
}
].
encode_test_() ->
[
{"empty object", ?_assert(term_to_json([{}], []) =:= <<"{}">>)},
{"empty array", ?_assert(term_to_json([], []) =:= <<"[]">>)},
{"simple object", ?_assert(term_to_json([{a, true}, {b, true}, {c, true}], []) =:= <<"{\"a\":true,\"b\":true,\"c\":true}">>)},
{"simple array", ?_assert(term_to_json([true, true, true], []) =:= <<"[true,true,true]">>)},
{"nested structures", ?_assert(term_to_json([{list, [[{list, [[{}], [{}]]}, {object, [{}]}],[]]}, {object, [{}]}], []) =:= <<"{\"list\":[{\"list\":[{},{}],\"object\":{}},[]],\"object\":{}}">>)},
{"numbers", ?_assert(term_to_json([-10000000000.0, -1, 0.0, 0, 1, 10000000000, 1000000000.0], []) =:= <<"[-1.0e10,-1,0.0,0,1,10000000000,1.0e9]">>)},
{"strings", ?_assert(term_to_json([<<"a string">>], []) =:= <<"[\"a string\"]">>)},
{"literals", ?_assert(term_to_json([true,false,null], []) =:= <<"[true,false,null]">>)},
{"naked true", ?_assert(term_to_json(true, [{strict, false}]) =:= <<"true">>)},
{"naked number", ?_assert(term_to_json(1, [{strict, false}]) =:= <<"1">>)},
{"simple object",
?_assert(term_to_json([{a, true}, {b, true}, {c, true}],
[]
) =:= <<"{\"a\":true,\"b\":true,\"c\":true}">>
)
},
{"simple array",
?_assert(term_to_json([true, true, true],
[]
) =:= <<"[true,true,true]">>
)
},
{"nested structures",
?_assert(term_to_json(
[{x, [[{x, [[{}], [{}]]}, {y, [{}]}],[]]}, {y, [{}]}],
[]
) =:= <<"{\"x\":[{\"x\":[{},{}],\"y\":{}},[]],\"y\":{}}">>
)
},
{"numbers",
?_assert(term_to_json(
[-10000000000.0, -1, 0.0, 0, 1, 10000000, 1000000000.0],
[]
) =:= <<"[-1.0e10,-1,0.0,0,1,10000000,1.0e9]">>
)
},
{"strings",
?_assert(term_to_json([<<"a string">>],
[]
) =:= <<"[\"a string\"]">>
)
},
{"literals",
?_assert(term_to_json([true,false,null],
[]
) =:= <<"[true,false,null]">>
)
},
{"naked true",
?_assert(term_to_json(true, [{strict, false}]) =:= <<"true">>)
},
{"naked number",
?_assert(term_to_json(1, [{strict, false}]) =:= <<"1">>)
},
{"float", ?_assert(term_to_json(1.0, [{strict, false}]) =:= <<"1.0">>)},
{"naked string", ?_assert(term_to_json(<<"hello world">>, [{strict, false}]) =:= <<"\"hello world\"">>)}
{"naked string",
?_assert(term_to_json(<<"hello world">>,
[{strict, false}]
) =:= <<"\"hello world\"">>
)
}
].
repeated_keys_test_() ->
[
{"encode", ?_assertError(badarg, term_to_json([{k, true}, {k, false}], []))},
{"decode", ?_assertError(badarg, json_to_term(<<"{\"k\": true, \"k\": false}">>, []))}
{"encode",
?_assertError(badarg, term_to_json([{k, true}, {k, false}], []))
},
{"decode",
?_assertError(badarg, json_to_term(
<<"{\"k\": true, \"k\": false}">>,
[]
)
)
}
].
escape_test_() ->
[
{"json string escaping", ?_assert(json_escape(<<"\"\\\b\f\n\r\t">>) =:= <<"\\\"\\\\\\b\\f\\n\\r\\t">>)},
{"json string hex escape", ?_assert(json_escape(<<1, 2, 3, 11, 26, 30, 31>>) =:= <<"\\u0001\\u0002\\u0003\\u000b\\u001a\\u001e\\u001f">>)}
{"json string escaping",
?_assert(json_escape(
<<"\"\\\b\f\n\r\t">>
) =:= <<"\\\"\\\\\\b\\f\\n\\r\\t">>
)
},
{"json string hex escape",
?_assert(json_escape(
<<1, 2, 3, 11, 26, 30, 31>>
) =:= <<"\\u0001\\u0002\\u0003\\u000b\\u001a\\u001e\\u001f">>
)
}
].
nice_decimal_test_() ->
[
{"0.0", ?_assert(float_to_decimal(0.0) =:= "0.0")},
{"1.0", ?_assert(float_to_decimal(1.0) =:= "1.0")},
{"-1.0", ?_assert(float_to_decimal(-1.0) =:= "-1.0")},
{"3.1234567890987654321", ?_assert(float_to_decimal(3.1234567890987654321) =:= "3.1234567890987655")},
{"1.0e23", ?_assert(float_to_decimal(1.0e23) =:= "1.0e23")},
{"0.3", ?_assert(float_to_decimal(3.0/10.0) =:= "0.3")},
{"0.0001", ?_assert(float_to_decimal(0.0001) =:= "1.0e-4")},
{"0.00000001", ?_assert(float_to_decimal(0.00000001) =:= "1.0e-8")},
{"1.0e-323", ?_assert(float_to_decimal(1.0e-323) =:= "1.0e-323")},
{"1.0e308", ?_assert(float_to_decimal(1.0e308) =:= "1.0e308")},
{"min normalized float", ?_assert(float_to_decimal(math:pow(2, -1022)) =:= "2.2250738585072014e-308")},
{"max normalized float", ?_assert(float_to_decimal((2 - math:pow(2, -52)) * math:pow(2, 1023)) =:= "1.7976931348623157e308")},
{"min denormalized float", ?_assert(float_to_decimal(math:pow(2, -1074)) =:= "5.0e-324")},
{"max denormalized float", ?_assert(float_to_decimal((1 - math:pow(2, -52)) * math:pow(2, -1022)) =:= "2.225073858507201e-308")}
{"0.0", ?_assert(nice_decimal(0.0) =:= "0.0")},
{"1.0", ?_assert(nice_decimal(1.0) =:= "1.0")},
{"-1.0", ?_assert(nice_decimal(-1.0) =:= "-1.0")},
{"3.1234567890987654321",
?_assert(
nice_decimal(3.1234567890987654321) =:= "3.1234567890987655")
},
{"1.0e23", ?_assert(nice_decimal(1.0e23) =:= "1.0e23")},
{"0.3", ?_assert(nice_decimal(3.0/10.0) =:= "0.3")},
{"0.0001", ?_assert(nice_decimal(0.0001) =:= "1.0e-4")},
{"0.00000001", ?_assert(nice_decimal(0.00000001) =:= "1.0e-8")},
{"1.0e-323", ?_assert(nice_decimal(1.0e-323) =:= "1.0e-323")},
{"1.0e308", ?_assert(nice_decimal(1.0e308) =:= "1.0e308")},
{"min normalized float",
?_assert(
nice_decimal(math:pow(2, -1022)) =:= "2.2250738585072014e-308"
)
},
{"max normalized float",
?_assert(
nice_decimal((2 - math:pow(2, -52)) * math:pow(2, 1023))
=:= "1.7976931348623157e308"
)
},
{"min denormalized float",
?_assert(nice_decimal(math:pow(2, -1074)) =:= "5.0e-324")
},
{"max denormalized float",
?_assert(
nice_decimal((1 - math:pow(2, -52)) * math:pow(2, -1022))
=:= "2.225073858507201e-308"
)
}
].
-endif.

View file

@ -79,7 +79,11 @@ format_something({event, start_object, Next}, Opts, Level) ->
{Continue, [?start_object, ?end_object]}
; Event ->
{Continue, Object} = format_object(Event, [], Opts, Level + 1),
{Continue, [?start_object, Object, indent(Opts, Level), ?end_object]}
{Continue, [?start_object,
Object,
indent(Opts, Level),
?end_object
]}
end;
format_something({event, start_array, Next}, Opts, Level) ->
case Next() of
@ -99,10 +103,24 @@ format_object({event, {key, Key}, Next}, Acc, Opts, Level) ->
{Continue, Value} = format_something(Next(), Opts, Level),
case Continue() of
{event, end_object, NextNext} ->
{NextNext, [Acc, indent(Opts, Level), encode(string, Key), ?colon, space(Opts), Value]}
{NextNext, [Acc,
indent(Opts, Level),
encode(string, Key),
?colon,
space(Opts),
Value
]}
; Else ->
format_object(Else,
[Acc, indent(Opts, Level), encode(string, Key), ?colon, space(Opts), Value, ?comma, space(Opts)],
[Acc,
indent(Opts, Level),
encode(string, Key),
?colon,
space(Opts),
Value,
?comma,
space(Opts)
],
Opts,
Level
)
@ -117,14 +135,24 @@ format_array(Event, Acc, Opts, Level) ->
{event, end_array, NextNext} ->
{NextNext, [Acc, indent(Opts, Level), Value]}
; Else ->
format_array(Else, [Acc, indent(Opts, Level), Value, ?comma, space(Opts)], Opts, Level)
format_array(Else,
[Acc,
indent(Opts, Level),
Value,
?comma,
space(Opts)
],
Opts,
Level
)
end.
encode(Acc, Opts) when is_list(Acc) ->
case Opts#format_opts.output_encoding of
iolist -> Acc
; UTF when ?is_utf_encoding(UTF) -> unicode:characters_to_binary(Acc, utf8, UTF)
; UTF when ?is_utf_encoding(UTF) ->
unicode:characters_to_binary(Acc, utf8, UTF)
; _ -> erlang:throw(badarg)
end;
encode(string, String) ->
@ -162,17 +190,58 @@ space(Opts) ->
minify_test_() ->
[
{"minify object", ?_assert(format(<<" { \"key\" :\n\t \"value\"\r\r\r\n } ">>, []) =:= <<"{\"key\":\"value\"}">>)},
{"minify array", ?_assert(format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>, []) =:= <<"[true,false,null]">>)}
{"minify object",
?_assert(format(<<" { \"key\" :\n\t \"value\"\r\r\r\n } ">>,
[]
) =:= <<"{\"key\":\"value\"}">>
)
},
{"minify array",
?_assert(format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>,
[]
) =:= <<"[true,false,null]">>
)
}
].
opts_test_() ->
[
{"unspecified indent/space", ?_assert(format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>, [space, indent]) =:= <<"[\n true, \n false, \n null\n]">>)},
{"specific indent/space", ?_assert(format(<<"\n{\n\"key\" : [],\n\"another key\" : true\n}\n">>, [{space, 2}, {indent, 4}]) =:= <<"{\n \"key\": [], \n \"another key\": true\n}">>)},
{"nested structures", ?_assert(format(<<"[{\"key\":\"value\", \"another key\": \"another value\"}, [[true, false, null]]]">>, [{space, 2}, {indent, 2}]) =:= <<"[\n {\n \"key\": \"value\", \n \"another key\": \"another value\"\n }, \n [\n [\n true, \n false, \n null\n ]\n ]\n]">>)},
{"just spaces", ?_assert(format(<<"[1,2,3]">>, [{space, 2}]) =:= <<"[1, 2, 3]">>)},
{"just indent", ?_assert(format(<<"[1.0, 2.0, 3.0]">>, [{indent, 2}]) =:= <<"[\n 1.0,\n 2.0,\n 3.0\n]">>)}
{"unspecified indent/space",
?_assert(format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>,
[space, indent]
) =:= <<"[\n true, \n false, \n null\n]">>
)
},
{"specific indent/space",
?_assert(format(
<<"\n{\n\"key\" : [],\n\"another key\" : true\n}\n">>,
[{space, 2}, {indent, 3}]
) =:= <<"{\n \"key\": [], \n \"another key\": true\n}">>
)
},
{"nested structures",
?_assert(format(
<<"[{\"key\":\"value\",
\"another key\": \"another value\"
},
[[true, false, null]]
]">>,
[{space, 2}, {indent, 2}]
) =:= <<"[\n {\n \"key\": \"value\", \n \"another key\": \"another value\"\n }, \n [\n [\n true, \n false, \n null\n ]\n ]\n]">>
)
},
{"just spaces",
?_assert(format(<<"[1,2,3]">>,
[{space, 2}]
) =:= <<"[1, 2, 3]">>
)
},
{"just indent",
?_assert(format(<<"[1.0, 2.0, 3.0]">>,
[{indent, 2}]
) =:= <<"[\n 1.0,\n 2.0,\n 3.0\n]">>
)
}
].
-endif.

View file

@ -68,8 +68,8 @@ collect({event, start_object, Next}, Keys) -> collect(Next(), [[]|Keys]);
collect({event, end_object, Next}, [_|Keys]) -> collect(Next(), [Keys]);
%% check to see if key has already been encountered, if not add it to the key accumulator
%% and continue, else return false
%% check to see if key has already been encountered, if not add it to the key
%% accumulator and continue, else return false
collect({event, {key, Key}, Next}, [Current|Keys]) ->
case lists:member(Key, Current) of
true -> false
@ -81,7 +81,8 @@ collect({event, _, Next}, Keys) ->
collect(Next(), Keys);
%% needed to parse numbers that don't have trailing whitespace in less strict mode
%% needed to parse numbers that don't have trailing whitespace in less strict
%% mode
collect({incomplete, More}, Keys) ->
collect(More(end_stream), Keys);
@ -98,32 +99,85 @@ true_test_() ->
[
{"empty object", ?_assert(is_json(<<"{}">>, []) =:= true)},
{"empty array", ?_assert(is_json(<<"[]">>, []) =:= true)},
{"whitespace", ?_assert(is_json(<<" \n \t \r [true] \t \n\r ">>, []) =:= true)},
{"nested terms", ?_assert(is_json(<<"[ { \"key\": [ {}, {}, {} ], \"more key\": [{}] }, {}, [[[]]] ]">>, []) =:= true)},
{"numbers", ?_assert(is_json(<<"[ -1.0, -1, -0, 0, 1e-1, 1, 1.0, 1e1 ]">>, []) =:= true)},
{"strings", ?_assert(is_json(<<"[ \"a\", \"string\", \"in\", \"multiple\", \"acts\" ]">>, []) =:= true)},
{"literals", ?_assert(is_json(<<"[ true, false, null ]">>, []) =:= true)},
{"nested objects", ?_assert(is_json(<<"{\"key\": { \"key\": true}}">>, []) =:= true)}
{"whitespace",
?_assert(is_json(<<" \n \t \r [true] \t \n\r ">>,
[]
) =:= true
)
},
{"nested terms",
?_assert(is_json(
<<"[{ \"x\": [ {}, {}, {} ], \"y\": [{}] }, {}, [[[]]]]">>,
[]
) =:= true
)
},
{"numbers",
?_assert(is_json(
<<"[ -1.0, -1, -0, 0, 1e-1, 1, 1.0, 1e1 ]">>,
[]
) =:= true
)
},
{"strings",
?_assert(is_json(
<<"[ \"a\", \"string\", \"in\", \"multiple\", \"acts\" ]">>,
[]
) =:= true
)
},
{"literals",
?_assert(is_json(<<"[ true, false, null ]">>, []) =:= true)
},
{"nested objects",
?_assert(is_json(<<"{\"key\": { \"key\": true}}">>, []) =:= true)
}
].
false_test_() ->
[
{"naked true", ?_assert(is_json(<<"true">>, []) =:= false)},
{"naked number", ?_assert(is_json(<<"1">>, []) =:= false)},
{"naked string", ?_assert(is_json(<<"\"i am not json\"">>, []) =:= false)},
{"naked string",
?_assert(is_json(<<"\"i am not json\"">>, []) =:= false)
},
{"unbalanced list", ?_assert(is_json(<<"[[[]]">>, []) =:= false)},
{"trailing comma", ?_assert(is_json(<<"[ true, false, null, ]">>, []) =:= false)},
{"trailing comma",
?_assert(is_json(<<"[ true, false, null, ]">>, []) =:= false)
},
{"unquoted key", ?_assert(is_json(<<"{ key: false }">>, []) =:= false)},
{"repeated key", ?_assert(is_json(<<"{\"key\": true, \"key\": true}">>, []) =:= false)},
{"repeated key",
?_assert(is_json(
<<"{\"key\": true, \"key\": true}">>,
[]
) =:= false
)
},
{"comments", ?_assert(is_json(<<"[ /* a comment */ ]">>, []) =:= false)}
].
less_strict_test_() ->
[
{"naked true", ?_assert(is_json(<<"true">>, [{strict, false}]) =:= true)},
{"naked number", ?_assert(is_json(<<"1">>, [{strict, false}]) =:= true)},
{"naked string", ?_assert(is_json(<<"\"i am not json\"">>, [{strict, false}]) =:= true)},
{"comments", ?_assert(is_json(<<"[ /* a comment */ ]">>, [{comments, true}]) =:= true)}
{"naked true",
?_assert(is_json(<<"true">>, [{strict, false}]) =:= true)
},
{"naked number",
?_assert(is_json(<<"1">>, [{strict, false}]) =:= true)
},
{"naked string",
?_assert(is_json(
<<"\"i am not json\"">>,
[{strict, false}]
) =:= true
)
},
{"comments",
?_assert(is_json(
<<"[ /* a comment */ ]">>,
[{comments, true}]
) =:= true
)
}
].