diff --git a/README.markdown b/README.markdown index 2ee3454..f7aae27 100644 --- a/README.markdown +++ b/README.markdown @@ -1,22 +1,201 @@ -## jsx (v1.0.2) ## +# jsx (v1.1) # -a sane json implementation for erlang, inspired by [yajl][yajl] +a sane [json][json] implementation for erlang, inspired by [yajl][yajl] copyright 2011, 2012 alisdair sullivan jsx is released under the terms of the [MIT][MIT] license -jsx uses [rebar][rebar] and [meck][meck] +jsx uses [rebar][rebar] for it's build chain and [meck][meck] for it's test suite [![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=master)](http://travis-ci.org/talentdeficit/jsx) -## api ## +## index ## + +* [introduction](#intro) +* [quickstart](#quickstart) +* [the api](#api) + - [json <-> erlang mapping](#mapping) + - [options](#options) + - [incomplete input](#incompletes) + - [the encoder and decoder](#core) + - [handler callbacks](#handler) + - [converting json to erlang and vice versa](#convert) + - [formatting and minifying json text](#format) + - [verifying json and terms are valid input](#verify) +* [acknowledgments](#thanks) -**converting json to erlang terms** -parses a JSON text (a utf8 encoded binary) and produces an erlang term (see json <-> erlang mapping details below) +## quickstart ## + +to build the library: `rebar compile` + +to convert a utf8 binary containing a json string into an erlang term: `jsx:to_term(JSON)` + +to convert an erlang term into a utf8 binary containing a json string: `jsx:to_json(Term)` + +to check if a binary is valid json: `jsx:is_json(JSON)` + +to check if a term is valid json: `jsx:is_term(Term)` + +to minify a json string: `jsx:format(JSON)` + + +## api ## + + +### json <-> erlang mapping ### + +**json** | **erlang** +--------------------------------|-------------------------------- +`number` | `integer()` and `float()` +`string` | `binary()` +`true`, `false` and `null` | `true`, `false` and `null` +`array` | `[]` and `[JSON]` +`object` | `[{}]` and `[{binary() OR atom(), JSON}]` + +#### json #### + +json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that, they are detailed in the options section below + +jsx also supports json fragments; valid json values that are not complete json. that means jsx will parse things like `<<"1">`, `<<"true">>` and `<<"\"hello world\"">>` without problems + +#### erlang #### + +only the erlang terms in the table above are supported. non supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly + +#### numbers #### + +javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors + +when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` + +#### strings #### + +the [json spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (properly escaped control characters, `"` and the escape character, `\`) and that are encoded in `utf8` + +the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates that appear otherwise are an error + +json string escapes of the form `\uXXXX` will be converted to their equivalent codepoint during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be + +in the interests of pragmatism, there is an option for looser parsing, see options below + +all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. the same restrictions apply as for strings encountered within json texts. that means no unpaired surrogates + +this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings + +#### true, false and null #### + +the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise + +#### arrays #### + +json arrays are represented with erlang lists of json values as described in this section + +#### objects #### + +json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values + + +### options ### + +jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. flags are always atoms and have no value. functions may have additional options beyond these, see individual function documentation for details + +#### `loose_unicode` #### + +json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. if this option is present attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec. this applies both to malformed unicode and disallowed codepoints + +#### `escape_forward_slash` #### + +json strings are escaped according to the json spec. this means forward slashes are never escaped. unfortunately, a microsoft implementation of json uses escaped forward slashes in json formatted date strings. without this option it is impossible to get date strings that some microsoft tools understand + +#### `explicit_end` #### + +this option treats all exhausted inputs as incomplete, as explained below. the parser will not attempt to return a final state until the function is called with the value `end_stream` + +#### `single_quotes` #### + +some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to deliminate keys and strings. this option allows json containing single quotes as structural (deliminator) characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings deliminated by single quotes. the parser will never emit json with keys or strings deliminated by single quotes + +#### `no_jsonp_escapes` #### + +javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `"\u2028"` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping if, for some reason, you object to this + +#### `comments` #### + +json has no official comments but some parsers allow c style comments. this flag allows comments (both `// ...` and `/* ... */` style) anywhere whitespace is allowed + + +### incomplete input ### + +jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory + +however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see above + + +### the encoder and decoder ### + +jsx is built on top of two finite state automata, one that handles json texts and one that handles erlang terms. both take a callback module as an argument that acts similar to a fold over a list of json 'events'. these events and the handler module's callbacks are detailed in the next section + +`jsx:decoder/3` and `jsx:encoder/3` are the entry points for the decoder and encoder, respectively + +`decoder(Handler, InitialState, Opts)` -> `Fun((JSON) -> Any)` + +`encoder(Handler, InitialState, Opts)` -> `Fun((Term) -> Any)` + +types: + +- `Handler` = `atom()`, should be the name of a callback module, see below +- `InitialState` = `term()`, passed as is to `Handler:init/1` +- `Opts` = see above +- `JSON` = `utf8` encoded json text +- `Term` = an erlang term as specified above in the mapping section +- `Any` = `term()` + +decoder returns an anonymous function that handles binary json input and encoder returns an anonymous function that handles erlang term input. these are safe to reuse for multiple inputs + + +### handler callbacks ### + +`Handler` should export the following pair of functions + +`Handler:init(InitialState)` -> `State` + +`Handler:handle_event(Event, State)` -> `NewState` + +types: + +- `InitialState`, `State`, `NewState` = any erlang term +- `Event` = + * `start_object` + * `end_object` + * `start_array` + * `end_array` + * `end_json` + * `{key, binary()}` + * `{string, binary()}` + * `{integer, integer()}` + * `{float, float()}` + * `{literal, true}` + * `{literal, false}` + * `{literal, null}` + +`init/1` is called with the `initialState` argument from `decoder/3` or `encoder/3` and should take care of any initialization your handler requires and return a new state + +`handle_event/2` is called for each `Event` emitted by the decoder/encoder with the output of the previous `handle_event/2` call (or `init/1` call, if `handle_event/2` has not yet been called) + +the event `end_json` will always be the last event emitted, you should take care of any cleanup in `handle_event/2` when encountering `end_json`. the state returned from this call will be returned as the final result of the anonymous function + +both `key` and `string` are `utf8` encoded binaries with all escaped values converted into the appropriate codepoints + + +### converting json to erlang and vice versa ### + +#### converting json to erlang terms #### + +`to_term` parses a JSON text (a utf8 encoded binary) and produces an erlang term (see json <-> erlang mapping details above) `to_term(JSON)` -> `Term` @@ -24,31 +203,22 @@ parses a JSON text (a utf8 encoded binary) and produces an erlang term (see json types: -* `JSON` = `binary()` -* `Term` = `[]` | `[{}]` | `[Value]` | `[{Label, Value}]` | `{incomplete, Fun}` -* `Value` = `binary()` | `integer()` | `float()` | `true` | `false` | `null` -* `Label` = `binary()` | `atom()` -* `Fun` = `fun(JSON)` -> `Term` -* `Opts` = `[]` | `[Opt]` +* `JSON` = as above in the mapping section +* `Term` = as above in the mapping section +* `Opts` = as above in the opts section, but see also additional opts below * `Opt` = - - `loose_unicode` - `labels` - `{labels, Label}` - `Label` = * `binary` * `atom` * `existing_atom` - - `explicit_end` - -`JSON` SHOULD be a utf8 encoded binary. if the option `loose_unicode` is present attempts are made to replace invalid codepoints with `u+FFFD` but badly encoded binaries may, in either case, result in `badarg` errors the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms, and results in a badarg error if keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom`, except it will not add new atoms to the atom table -see the note below about streaming mode for details of `explicit_end` - -**converting erlang terms to json** +#### converting erlang terms to json #### -produces a JSON text from an erlang term (see json <-> erlang mapping details below) +`to_json` parses an erlang term and produces a JSON text (see json <-> erlang mapping details below) `to_json(Term)` -> `JSON` @@ -56,26 +226,23 @@ produces a JSON text from an erlang term (see json <-> erlang mapping details be types: -* `JSON` = `binary()` -* `Term` = `[]` | `[{}]` | `[Value]` | `[{Label, Value}]` | `{incomplete, Fun}` -* `Value` = `binary()` | `integer()` | `float()` | `true` | `false` | `null` -* `Label` = `binary()` | `atom()` -* `Opts` = `[]` | `[Opt]` +* `JSON` = as above in the mapping section +* `Term` = as above in the mapping section +* `Opts` = as above in the opts section, but see also additional opts below * `Opt` = - `space` - `{space, N}` - `indent` - `{indent, N}` - - `escape_forward_slash` the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` - -if the option `escape_forward_slash` is enabled, `$/` is escaped. this is not normally required but is necessary for compatibility with microsoft's json date format -**formatting json texts** +### formatting and minifying json text ### + +#### formatting json texts #### produces a JSON text from JSON text, reformatted @@ -85,33 +252,24 @@ produces a JSON text from JSON text, reformatted types: -* `JSON` = `binary()` -* `Term` = `[]` | `[{}]` | `[Value]` | `[{Label, Value}]` | `{incomplete, Fun}` -* `Value` = `binary()` | `integer()` | `float()` | `true` | `false` | `null` -* `Label` = `binary()` | `atom()` -* `Fun` = `fun(JSON)` -> `Term` -* `Opts` = `[]` | `[Opt]` +* `JSON` = as above in the mapping section +* `Opts` = as above in the opts section, but see also additional opts below * `Opt` = - `space` - `{space, N}` - `indent` - `{indent, N}` - - `loose_unicode` - - `escape_forward_slash` - - `explicit_end` - -`JSON` SHOULD be a utf8 encoded binary. if the option `loose_unicode` is present attempts are made to replace invalid codepoints with `u+FFFD` but badly encoded binaries may, in either case, result in `badarg` errors the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` -if the option `escape_forward_slash` is enabled, `$/` is escaped. this is not normally required but is necessary for compatibility with microsoft's json date format - -see the note below about streaming mode for details of `explicit_end` +calling `format` with no options results in minified json text -**verifying json texts** +### verifying json and terms are valid input ### + +#### verifying json texts #### returns true if input is a valid JSON text, false if not @@ -122,81 +280,30 @@ returns true if input is a valid JSON text, false if not types: * `MaybeJSON` = `any()` -* `Opts` = `[]` | `[Opt]` -* `Opt` = - - `loose_unicode` - - `explicit_end` - -see `json_to_term` for details of options +* `Opts` = as above -**verifying json texts** +#### verifying terms #### returns true if input is a valid erlang term that represents a JSON text, false if not `is_term(MaybeJSON)` -> `true` | `false` +`is_term(MaybeJSON, Opts)` -> `true` | `false` + types: * `MaybeJSON` = `any()` +* `Opts` = as above -**streaming mode** - -this implementation is interruptable and reentrant and may be used to incrementally parse json texts. it's greedy and will exhaust input, returning when the stream buffer is empty. if the json text is so far valid, but incomplete (or if the option `explicit_end` has been selected), `{incomplete, Fun}` will be returned. `Fun/1` may be called with additional input (or the atom `end_stream` to force the end of parsing) - -`explicit_end` is of use when parsing bare numbers (like `123` or `-0.987` for example) as they may have no unambiguous end when encountered in a stream. it is also of use when reading from a socket or file and there may be unprocessed white space (or errors) left in the stream - - -## json <-> erlang ## - -**json** | **erlang** ---------------------------------|-------------------------------- -`number` | `integer()` OR `float()` -`string` | `binary()` -`true`, `false` and `null` | `true`, `false` and `null` -`array` | `[]` OR `[JSON]` -`object` | `[{}]` OR `[{binary(), JSON}]` - -**json** - -json must be encoded in `utf8`. if it's invalid `utf8`, it probably won't parse without errors. one optional exception is made for json strings that are otherwise `utf8`, see under `strings` below. - -**numbers** - -javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors - -when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` - -**strings** - -the [json spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (properly escaped control characters and quotes) and that are encoded in `utf8`. in the interests of pragmatism, however, the parser option `loose_unicode` attempts to replace invalid `utf8` sequences with the replacement codepoint `u+fffd` when possible - -all erlang strings are represented by *valid* `utf8` encoded binaries - -this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings - -**true, false and null** - -the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise - -**arrays** - -json arrays are represented with erlang lists of json values as described in this document - -**objects** - -json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties. all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers) - - -## acknowledgements ## - -paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides and alex kropivny have all contributed to the development of jsx, whether they know it or not +## acknowledgements ## +jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides, alex kropivny, steve strong, michael truog and dmitry kolesnikov +[json]: http://json.org [yajl]: http://lloyd.github.com/yajl [MIT]: http://www.opensource.org/licenses/mit-license.html [rebar]: https://github.com/basho/rebar [meck]: https://github.com/eproxus/meck -[json]: http://json.org [rfc4627]: http://tools.ietf.org/html/rfc4627 \ No newline at end of file diff --git a/priv/test_cases/empty_object_in_array.json b/priv/test_cases/empty_object_in_array.json new file mode 100644 index 0000000..ee1aac4 --- /dev/null +++ b/priv/test_cases/empty_object_in_array.json @@ -0,0 +1 @@ +[{}] \ No newline at end of file diff --git a/priv/test_cases/empty_object_in_array.test b/priv/test_cases/empty_object_in_array.test new file mode 100644 index 0000000..0a8679d --- /dev/null +++ b/priv/test_cases/empty_object_in_array.test @@ -0,0 +1,3 @@ +{name, "empty_object_in_array"}. +{jsx, [start_array,start_object,end_object,end_array,end_json]}. +{json, "empty_object_in_array.json"}. diff --git a/priv/test_cases/empty_string.json b/priv/test_cases/empty_string.json new file mode 100644 index 0000000..3cc762b --- /dev/null +++ b/priv/test_cases/empty_string.json @@ -0,0 +1 @@ +"" \ No newline at end of file diff --git a/priv/test_cases/empty_string.test b/priv/test_cases/empty_string.test new file mode 100644 index 0000000..c6faf71 --- /dev/null +++ b/priv/test_cases/empty_string.test @@ -0,0 +1,3 @@ +{name, "empty_string"}. +{jsx, [{string, <<>>},end_json]}. +{json, "empty_string.json"}. diff --git a/priv/test_cases/escaped_control.json b/priv/test_cases/escaped_control.json new file mode 100644 index 0000000..78af83f --- /dev/null +++ b/priv/test_cases/escaped_control.json @@ -0,0 +1 @@ +"\u0012" \ No newline at end of file diff --git a/priv/test_cases/escaped_control.test b/priv/test_cases/escaped_control.test new file mode 100644 index 0000000..603d719 --- /dev/null +++ b/priv/test_cases/escaped_control.test @@ -0,0 +1,3 @@ +{name, "escaped_control"}. +{jsx, [{string, <<18>>},end_json]}. +{json, "escaped_control.json"}. diff --git a/priv/test_cases/escaped_noncharacter.json b/priv/test_cases/escaped_noncharacter.json deleted file mode 100644 index e5c1b65..0000000 --- a/priv/test_cases/escaped_noncharacter.json +++ /dev/null @@ -1 +0,0 @@ -"\uffff" \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter.test b/priv/test_cases/escaped_noncharacter.test deleted file mode 100644 index 4e20bc3..0000000 --- a/priv/test_cases/escaped_noncharacter.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "escaped noncharacter"}. -{jsx, {error, badjson}}. -{json, "escaped_noncharacter.json"}. \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_ext.json b/priv/test_cases/escaped_noncharacter_ext.json deleted file mode 100644 index f10ec2b..0000000 --- a/priv/test_cases/escaped_noncharacter_ext.json +++ /dev/null @@ -1 +0,0 @@ -"\ud83f\udfff" \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_ext.test b/priv/test_cases/escaped_noncharacter_ext.test deleted file mode 100644 index 7049148..0000000 --- a/priv/test_cases/escaped_noncharacter_ext.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "escaped noncharacter (extended)"}. -{jsx, {error, badjson}}. -{json, "escaped_noncharacter_ext.json"}. \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_ext_replaced.json b/priv/test_cases/escaped_noncharacter_ext_replaced.json deleted file mode 100644 index f10ec2b..0000000 --- a/priv/test_cases/escaped_noncharacter_ext_replaced.json +++ /dev/null @@ -1 +0,0 @@ -"\ud83f\udfff" \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_ext_replaced.test b/priv/test_cases/escaped_noncharacter_ext_replaced.test deleted file mode 100644 index 0a740b6..0000000 --- a/priv/test_cases/escaped_noncharacter_ext_replaced.test +++ /dev/null @@ -1,4 +0,0 @@ -{name, "escaped noncharacter (extended)"}. -{jsx, [{string, <<16#fffd/utf8>>}, end_json]}. -{json, "escaped_noncharacter_ext.json"}. -{jsx_flags, [loose_unicode]}. \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_replaced.json b/priv/test_cases/escaped_noncharacter_replaced.json deleted file mode 100644 index e5c1b65..0000000 --- a/priv/test_cases/escaped_noncharacter_replaced.json +++ /dev/null @@ -1 +0,0 @@ -"\uffff" \ No newline at end of file diff --git a/priv/test_cases/escaped_noncharacter_replaced.test b/priv/test_cases/escaped_noncharacter_replaced.test deleted file mode 100644 index 9c5faac..0000000 --- a/priv/test_cases/escaped_noncharacter_replaced.test +++ /dev/null @@ -1,4 +0,0 @@ -{name, "escaped noncharacter replacement"}. -{jsx, [{string,<<16#fffd/utf8>>},end_json]}. -{json, "escaped_noncharacter_replaced.json"}. -{jsx_flags, [loose_unicode]}. \ No newline at end of file diff --git a/priv/test_cases/escaped_reserved_a.json b/priv/test_cases/escaped_reserved_a.json deleted file mode 100644 index dab850b..0000000 --- a/priv/test_cases/escaped_reserved_a.json +++ /dev/null @@ -1 +0,0 @@ -"\ufdd0" \ No newline at end of file diff --git a/priv/test_cases/escaped_reserved_a.test b/priv/test_cases/escaped_reserved_a.test deleted file mode 100644 index 8a5cba2..0000000 --- a/priv/test_cases/escaped_reserved_a.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "escaped reserved a"}. -{jsx, {error, badjson}}. -{json, "escaped_reserved_a.json"}. \ No newline at end of file diff --git a/priv/test_cases/escaped_reserved_b.json b/priv/test_cases/escaped_reserved_b.json deleted file mode 100644 index be11b6e..0000000 --- a/priv/test_cases/escaped_reserved_b.json +++ /dev/null @@ -1 +0,0 @@ -"\ufdef" \ No newline at end of file diff --git a/priv/test_cases/escaped_reserved_b.test b/priv/test_cases/escaped_reserved_b.test deleted file mode 100644 index 414f024..0000000 --- a/priv/test_cases/escaped_reserved_b.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "escaped reserved b"}. -{jsx, {error, badjson}}. -{json, "escaped_reserved_b.json"}. \ No newline at end of file diff --git a/priv/test_cases/naked_false.json b/priv/test_cases/false.json similarity index 100% rename from priv/test_cases/naked_false.json rename to priv/test_cases/false.json diff --git a/priv/test_cases/false.test b/priv/test_cases/false.test new file mode 100644 index 0000000..f40af7f --- /dev/null +++ b/priv/test_cases/false.test @@ -0,0 +1,3 @@ +{name, "false"}. +{jsx, [{literal, false},end_json]}. +{json, "false.json"}. diff --git a/priv/test_cases/naked_false.test b/priv/test_cases/naked_false.test deleted file mode 100644 index 5db57a1..0000000 --- a/priv/test_cases/naked_false.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "naked_false"}. -{jsx, [{literal,false},end_json]}. -{json, "naked_false.json"}. diff --git a/priv/test_cases/naked_null.test b/priv/test_cases/naked_null.test deleted file mode 100644 index 7386eaf..0000000 --- a/priv/test_cases/naked_null.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "naked_null"}. -{jsx, [{literal,null},end_json]}. -{json, "naked_null.json"}. diff --git a/priv/test_cases/naked_true.test b/priv/test_cases/naked_true.test deleted file mode 100644 index 924a200..0000000 --- a/priv/test_cases/naked_true.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "naked_true"}. -{jsx, [{literal,true},end_json]}. -{json, "naked_true.json"}. diff --git a/priv/test_cases/noncharacter.json b/priv/test_cases/noncharacter.json deleted file mode 100644 index 09db417..0000000 --- a/priv/test_cases/noncharacter.json +++ /dev/null @@ -1 +0,0 @@ -"﷐" \ No newline at end of file diff --git a/priv/test_cases/noncharacter.test b/priv/test_cases/noncharacter.test deleted file mode 100644 index 6b3732c..0000000 --- a/priv/test_cases/noncharacter.test +++ /dev/null @@ -1,3 +0,0 @@ -{name, "noncharacter"}. -{jsx, {error, badjson}}. -{json, "noncharacter.json"}. \ No newline at end of file diff --git a/priv/test_cases/noncharacter_replaced.json b/priv/test_cases/noncharacter_replaced.json deleted file mode 100644 index 09db417..0000000 --- a/priv/test_cases/noncharacter_replaced.json +++ /dev/null @@ -1 +0,0 @@ -"﷐" \ No newline at end of file diff --git a/priv/test_cases/noncharacter_replaced.test b/priv/test_cases/noncharacter_replaced.test deleted file mode 100644 index 0944886..0000000 --- a/priv/test_cases/noncharacter_replaced.test +++ /dev/null @@ -1,4 +0,0 @@ -{name, "noncharacter replaced"}. -{jsx, [{string,<<16#fffd/utf8>>},end_json]}. -{json, "noncharacter_replaced.json"}. -{jsx_flags, [loose_unicode]}. \ No newline at end of file diff --git a/priv/test_cases/naked_null.json b/priv/test_cases/null.json similarity index 100% rename from priv/test_cases/naked_null.json rename to priv/test_cases/null.json diff --git a/priv/test_cases/null.test b/priv/test_cases/null.test new file mode 100644 index 0000000..ddb56d5 --- /dev/null +++ b/priv/test_cases/null.test @@ -0,0 +1,3 @@ +{name, "null"}. +{jsx, [{literal, null},end_json]}. +{json, "null.json"}. diff --git a/priv/test_cases/string_escapes.json b/priv/test_cases/string_escapes.json index 461bc67..3c9af78 100644 --- a/priv/test_cases/string_escapes.json +++ b/priv/test_cases/string_escapes.json @@ -1 +1 @@ -["\"", "\\", "\/", "\b", "\f", "\n", "\r", "\t"] \ No newline at end of file +["\"", "\\", "\b", "\f", "\n", "\r", "\t"] \ No newline at end of file diff --git a/priv/test_cases/string_escapes.test b/priv/test_cases/string_escapes.test index 7cd460c..8f6eeed 100644 --- a/priv/test_cases/string_escapes.test +++ b/priv/test_cases/string_escapes.test @@ -2,7 +2,6 @@ {jsx, [start_array, {string,<<"\"">>}, {string,<<"\\">>}, - {string,<<"/">>}, {string,<<"\b">>}, {string,<<"\f">>}, {string,<<"\n">>}, diff --git a/priv/test_cases/naked_true.json b/priv/test_cases/true.json similarity index 100% rename from priv/test_cases/naked_true.json rename to priv/test_cases/true.json diff --git a/priv/test_cases/true.test b/priv/test_cases/true.test new file mode 100644 index 0000000..4dfeb8c --- /dev/null +++ b/priv/test_cases/true.test @@ -0,0 +1,3 @@ +{name, "true"}. +{jsx, [{literal, true},end_json]}. +{json, "true.json"}. diff --git a/src/jsx.app.src b/src/jsx.app.src index 2b07c8b..4568dc9 100644 --- a/src/jsx.app.src +++ b/src/jsx.app.src @@ -1,7 +1,7 @@ {application, jsx, [ {description, "a streaming, evented json parsing toolkit"}, - {vsn, "1.0.2"}, + {vsn, "1.1"}, {modules, [ jsx, jsx_encoder, diff --git a/src/jsx.erl b/src/jsx.erl index 9820070..11630d6 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -77,7 +77,6 @@ format(Source, Opts) -> jsx_to_json:format(Source, Opts). | float() | binary(). - to_term(Source) -> to_term(Source, []). to_term(Source, Opts) -> jsx_to_term:to_term(Source, Opts). @@ -135,6 +134,58 @@ encoder_decoder_equiv_test_() -> ]. +single_quotes_test_() -> + [ + {"single quoted keys", + ?_assertEqual( + to_term(<<"{'key':true}">>, [single_quotes]), + [{<<"key">>, true}] + ) + }, + {"multiple single quoted keys", + ?_assertEqual( + to_term(<<"{'key':true, 'another key':true}">>, [single_quotes]), + [{<<"key">>, true}, {<<"another key">>, true}] + ) + }, + {"nested single quoted keys", + ?_assertEqual( + to_term(<<"{'key': {'key':true, 'another key':true}}">>, [single_quotes]), + [{<<"key">>, [{<<"key">>, true}, {<<"another key">>, true}]}] + ) + }, + {"single quoted string", + ?_assertEqual( + to_term(<<"['string']">>, [single_quotes]), + [<<"string">>] + ) + }, + {"single quote in double quoted string", + ?_assertEqual( + to_term(<<"[\"a single quote: '\"]">>, [single_quotes]), + [<<"a single quote: '">>] + ) + }, + {"escaped single quote in single quoted string", + ?_assertEqual( + to_term(<<"['a single quote: \\'']">>, [single_quotes]), + [<<"a single quote: '">>] + ) + }, + {"escaped single quote when single quotes are disallowed", + ?_assertError( + badarg, + to_term(<<"[\"a single quote: \\'\"]">>) + ) + }, + {"mismatched quotes", + ?_assertError( + badarg, + to_term(<<"['mismatched\"]">>, [single_quotes]) + ) + } + ]. + %% test handler init([]) -> []. @@ -209,7 +260,7 @@ decode(JSON, Flags) -> incremental_decode(<>, Flags) -> P = jsx_decoder:decoder(?MODULE, [], Flags ++ [explicit_end]), try incremental_decode_loop(P(C), Rest) - catch error:badarg -> io:format("~p~n", [erlang:get_stacktrace()]), {error, badjson} + catch error:badarg -> {error, badjson} end. incremental_decode_loop({incomplete, More}, <<>>) -> diff --git a/src/jsx_decoder.erl b/src/jsx_decoder.erl index 0a82ea2..7d4faa8 100644 --- a/src/jsx_decoder.erl +++ b/src/jsx_decoder.erl @@ -59,7 +59,8 @@ decoder(Handler, State, Opts) -> %% kv seperator -define(comma, 16#2C). --define(quote, 16#22). +-define(doublequote, 16#22). +-define(singlequote, 16#27). -define(colon, 16#3A). %% string escape sequences @@ -76,6 +77,9 @@ decoder(Handler, State, Opts) -> -define(negative, 16#2D). -define(positive, 16#2B). +%% comments +-define(star, 16#2A). + %% some useful guards -define(is_hex(Symbol), @@ -126,12 +130,15 @@ decoder(Handler, State, Opts) -> -define(new_seq(C), [C]). -define(acc_seq(Seq, C), [C] ++ Seq). +-define(acc_seq(Seq, C, D), [C, D] ++ Seq). -define(end_seq(Seq), unicode:characters_to_binary(lists:reverse(Seq))). -value(<>, Handler, Stack, Opts) -> +value(<>, Handler, Stack, Opts) -> string(Rest, Handler, [?new_seq()|Stack], Opts); +value(<>, Handler, Stack, Opts = #opts{single_quotes=true}) -> + string(Rest, Handler, [?new_seq(), single_quote|Stack], Opts); value(<<$t, Rest/binary>>, Handler, Stack, Opts) -> tr(Rest, Handler, Stack, Opts); value(<<$f, Rest/binary>>, Handler, Stack, Opts) -> @@ -139,37 +146,47 @@ value(<<$f, Rest/binary>>, Handler, Stack, Opts) -> value(<<$n, Rest/binary>>, Handler, Stack, Opts) -> nu(Rest, Handler, Stack, Opts); value(<>, Handler, Stack, Opts) -> - negative(Rest, Handler, [?new_seq($-)|Stack], Opts); + negative(Rest, Handler, [[$-]|Stack], Opts); value(<>, Handler, Stack, Opts) -> - zero(Rest, Handler, [?new_seq($0)|Stack], Opts); + zero(Rest, Handler, [[$0]|Stack], Opts); value(<>, Handler, Stack, Opts) when ?is_nonzero(S) -> - integer(Rest, Handler, [?new_seq(S)|Stack], Opts); + integer(Rest, Handler, [[S]|Stack], Opts); value(<>, {Handler, State}, Stack, Opts) -> object(Rest, {Handler, Handler:handle_event(start_object, State)}, [key|Stack], Opts); value(<>, {Handler, State}, Stack, Opts) -> array(Rest, {Handler, Handler:handle_event(start_array, State)}, [array|Stack], Opts); value(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> value(Rest, Handler, Stack, Opts); +value(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> value(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); value(<<>>, Handler, Stack, Opts) -> ?incomplete(value, <<>>, Handler, Stack, Opts); value(Bin, Handler, Stack, Opts) -> ?error([Bin, Handler, Stack, Opts]). -object(<>, Handler, Stack, Opts) -> +object(<>, Handler, Stack, Opts) -> string(Rest, Handler, [?new_seq()|Stack], Opts); +object(<>, Handler, Stack, Opts = #opts{single_quotes=true}) -> + string(Rest, Handler, [?new_seq(), single_quote|Stack], Opts); object(<>, {Handler, State}, [key|Stack], Opts) -> maybe_done(Rest, {Handler, Handler:handle_event(end_object, State)}, Stack, Opts); object(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> object(Rest, Handler, Stack, Opts); +object(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> object(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); object(<<>>, Handler, Stack, Opts) -> ?incomplete(object, <<>>, Handler, Stack, Opts); object(Bin, Handler, Stack, Opts) -> ?error([Bin, Handler, Stack, Opts]). -array(<>, Handler, Stack, Opts) -> +array(<>, Handler, Stack, Opts) -> string(Rest, Handler, [?new_seq()|Stack], Opts); +array(<>, Handler, Stack, Opts = #opts{single_quotes=true}) -> + string(Rest, Handler, [?new_seq(), single_quote|Stack], Opts); array(<<$t, Rest/binary>>, Handler, Stack, Opts) -> tr(Rest, Handler, Stack, Opts); array(<<$f, Rest/binary>>, Handler, Stack, Opts) -> @@ -177,11 +194,11 @@ array(<<$f, Rest/binary>>, Handler, Stack, Opts) -> array(<<$n, Rest/binary>>, Handler, Stack, Opts) -> nu(Rest, Handler, Stack, Opts); array(<>, Handler, Stack, Opts) -> - negative(Rest, Handler, [?new_seq($-)|Stack], Opts); + negative(Rest, Handler, [[$-]|Stack], Opts); array(<>, Handler, Stack, Opts) -> - zero(Rest, Handler, [?new_seq($0)|Stack], Opts); + zero(Rest, Handler, [[$0]|Stack], Opts); array(<>, Handler, Stack, Opts) when ?is_nonzero(S) -> - integer(Rest, Handler, [?new_seq(S)|Stack], Opts); + integer(Rest, Handler, [[S]|Stack], Opts); array(<>, {Handler, State}, Stack, Opts) -> object(Rest, {Handler, Handler:handle_event(start_object, State)}, [key|Stack], Opts); array(<>, {Handler, State}, Stack, Opts) -> @@ -189,7 +206,10 @@ array(<>, {Handler, State}, Stack, Opts) -> array(<>, {Handler, State}, [array|Stack], Opts) -> maybe_done(Rest, {Handler, Handler:handle_event(end_array, State)}, Stack, Opts); array(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> - array(Rest, Handler, Stack, Opts); + array(Rest, Handler, Stack, Opts); +array(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> array(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); array(<<>>, Handler, Stack, Opts) -> ?incomplete(array, <<>>, Handler, Stack, Opts); array(Bin, Handler, Stack, Opts) -> @@ -200,16 +220,24 @@ colon(<>, Handler, [key|Stack], Opts) -> value(Rest, Handler, [object|Stack], Opts); colon(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> colon(Rest, Handler, Stack, Opts); +colon(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> colon(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); colon(<<>>, Handler, Stack, Opts) -> ?incomplete(colon, <<>>, Handler, Stack, Opts); colon(Bin, Handler, Stack, Opts) -> ?error([Bin, Handler, Stack, Opts]). -key(<>, Handler, Stack, Opts) -> +key(<>, Handler, Stack, Opts) -> string(Rest, Handler, [?new_seq()|Stack], Opts); +key(<>, Handler, Stack, Opts = #opts{single_quotes=true}) -> + string(Rest, Handler, [?new_seq(), single_quote|Stack], Opts); key(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> - key(Rest, Handler, Stack, Opts); + key(Rest, Handler, Stack, Opts); +key(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> key(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); key(<<>>, Handler, Stack, Opts) -> ?incomplete(key, <<>>, Handler, Stack, Opts); key(Bin, Handler, Stack, Opts) -> @@ -233,49 +261,215 @@ partial_utf(<>) partial_utf(_) -> false. -string(<>, {Handler, State}, [Acc, key|Stack], Opts) -> - colon(Rest, - {Handler, Handler:handle_event({key, ?end_seq(Acc)}, State)}, - [key|Stack], - Opts - ); -string(<>, {Handler, State}, [Acc|Stack], Opts) -> - maybe_done(Rest, - {Handler, Handler:handle_event({string, ?end_seq(Acc)}, State)}, - Stack, - Opts - ); +%% explicitly whitelist ascii set for better efficiency (seriously, it's worth +%% almost a 20% increase) +string(<<32, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 32)|Stack], Opts); +string(<<33, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 33)|Stack], Opts); +string(<>, {Handler, State}, S, Opts) -> + case S of + [Acc, key|Stack] -> + colon(Rest, {Handler, Handler:handle_event({key, ?end_seq(Acc)}, State)}, [key|Stack], Opts); + [_Acc, single_quote|_Stack] -> + ?error([<>, {Handler, State}, S, Opts]); + [Acc|Stack] -> + maybe_done(Rest, {Handler, Handler:handle_event({string, ?end_seq(Acc)}, State)}, Stack, Opts) + end; +string(<<35, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 35)|Stack], Opts); +string(<<36, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 36)|Stack], Opts); +string(<<37, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 37)|Stack], Opts); +string(<<38, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 38)|Stack], Opts); +string(<>, {Handler, State}, S, Opts = #opts{single_quotes=true}) -> + case S of + [Acc, single_quote, key|Stack] -> + colon(Rest, {Handler, Handler:handle_event({key, ?end_seq(Acc)}, State)}, [key|Stack], Opts); + [Acc, single_quote|Stack] -> + maybe_done(Rest, {Handler, Handler:handle_event({string, ?end_seq(Acc)}, State)}, Stack, Opts); + [Acc|Stack] -> + string(Rest, {Handler, State}, [?acc_seq(Acc, ?singlequote)|Stack], Opts) + end; +string(<<40, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 40)|Stack], Opts); +string(<<41, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 41)|Stack], Opts); +string(<<42, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 42)|Stack], Opts); +string(<<43, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 43)|Stack], Opts); +string(<<44, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 44)|Stack], Opts); +string(<<45, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 45)|Stack], Opts); +string(<<46, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 46)|Stack], Opts); +string(<<47, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 47)|Stack], Opts); +string(<<48, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 48)|Stack], Opts); +string(<<49, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 49)|Stack], Opts); +string(<<50, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 50)|Stack], Opts); +string(<<51, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 51)|Stack], Opts); +string(<<52, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 52)|Stack], Opts); +string(<<53, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 53)|Stack], Opts); +string(<<54, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 54)|Stack], Opts); +string(<<55, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 55)|Stack], Opts); +string(<<56, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 56)|Stack], Opts); +string(<<57, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 57)|Stack], Opts); +string(<<58, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 58)|Stack], Opts); +string(<<59, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 59)|Stack], Opts); +string(<<60, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 60)|Stack], Opts); +string(<<61, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 61)|Stack], Opts); +string(<<62, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 62)|Stack], Opts); +string(<<63, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 63)|Stack], Opts); +string(<<64, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 64)|Stack], Opts); +string(<<65, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 65)|Stack], Opts); +string(<<66, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 66)|Stack], Opts); +string(<<67, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 67)|Stack], Opts); +string(<<68, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 68)|Stack], Opts); +string(<<69, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 69)|Stack], Opts); +string(<<70, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 70)|Stack], Opts); +string(<<71, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 71)|Stack], Opts); +string(<<72, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 72)|Stack], Opts); +string(<<73, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 73)|Stack], Opts); +string(<<74, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 74)|Stack], Opts); +string(<<75, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 75)|Stack], Opts); +string(<<76, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 76)|Stack], Opts); +string(<<77, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 77)|Stack], Opts); +string(<<78, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 78)|Stack], Opts); +string(<<79, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 79)|Stack], Opts); +string(<<80, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 80)|Stack], Opts); +string(<<81, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 81)|Stack], Opts); +string(<<82, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 82)|Stack], Opts); +string(<<83, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 83)|Stack], Opts); +string(<<84, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 84)|Stack], Opts); +string(<<85, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 85)|Stack], Opts); +string(<<86, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 86)|Stack], Opts); +string(<<87, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 87)|Stack], Opts); +string(<<88, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 88)|Stack], Opts); +string(<<89, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 89)|Stack], Opts); +string(<<90, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 90)|Stack], Opts); +string(<<91, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 91)|Stack], Opts); string(<>, Handler, Stack, Opts) -> escape(Rest, Handler, Stack, Opts); -%% things get dumb here. erlang doesn't properly restrict unicode non-characters -%% so you can't trust the codepoints it returns always -%% the range 32..16#fdcf is safe, so allow that -string(<>, Handler, [Acc|Stack], Opts) - when ?is_noncontrol(S), S < 16#fdd0 -> - string(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); -%% the range 16#fdf0..16#fffd is also safe -string(<>, Handler, [Acc|Stack], Opts) - when S > 16#fdef, S < 16#fffe -> - string(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); -%% yes, i think it's insane too -string(<>, Handler, [Acc|Stack], Opts) - when S > 16#ffff andalso - S =/= 16#1fffe andalso S =/= 16#1ffff andalso - S =/= 16#2fffe andalso S =/= 16#2ffff andalso - S =/= 16#3fffe andalso S =/= 16#3ffff andalso - S =/= 16#4fffe andalso S =/= 16#4ffff andalso - S =/= 16#5fffe andalso S =/= 16#5ffff andalso - S =/= 16#6fffe andalso S =/= 16#6ffff andalso - S =/= 16#7fffe andalso S =/= 16#7ffff andalso - S =/= 16#8fffe andalso S =/= 16#8ffff andalso - S =/= 16#9fffe andalso S =/= 16#9ffff andalso - S =/= 16#afffe andalso S =/= 16#affff andalso - S =/= 16#bfffe andalso S =/= 16#bffff andalso - S =/= 16#cfffe andalso S =/= 16#cffff andalso - S =/= 16#dfffe andalso S =/= 16#dffff andalso - S =/= 16#efffe andalso S =/= 16#effff andalso - S =/= 16#ffffe andalso S =/= 16#fffff andalso - S =/= 16#10fffe andalso S =/= 16#10ffff -> +string(<<93, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 93)|Stack], Opts); +string(<<94, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 94)|Stack], Opts); +string(<<95, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 95)|Stack], Opts); +string(<<96, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 96)|Stack], Opts); +string(<<97, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 97)|Stack], Opts); +string(<<98, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 98)|Stack], Opts); +string(<<99, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 99)|Stack], Opts); +string(<<100, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 100)|Stack], Opts); +string(<<101, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 101)|Stack], Opts); +string(<<102, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 102)|Stack], Opts); +string(<<103, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 103)|Stack], Opts); +string(<<104, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 104)|Stack], Opts); +string(<<105, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 105)|Stack], Opts); +string(<<106, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 106)|Stack], Opts); +string(<<107, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 107)|Stack], Opts); +string(<<108, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 108)|Stack], Opts); +string(<<109, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 109)|Stack], Opts); +string(<<110, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 110)|Stack], Opts); +string(<<111, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 111)|Stack], Opts); +string(<<112, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 112)|Stack], Opts); +string(<<113, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 113)|Stack], Opts); +string(<<114, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 114)|Stack], Opts); +string(<<115, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 115)|Stack], Opts); +string(<<116, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 116)|Stack], Opts); +string(<<117, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 117)|Stack], Opts); +string(<<118, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 118)|Stack], Opts); +string(<<119, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 119)|Stack], Opts); +string(<<120, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 120)|Stack], Opts); +string(<<121, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 121)|Stack], Opts); +string(<<122, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 122)|Stack], Opts); +string(<<123, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 123)|Stack], Opts); +string(<<124, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 124)|Stack], Opts); +string(<<125, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 125)|Stack], Opts); +string(<<126, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 126)|Stack], Opts); +string(<<127, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 127)|Stack], Opts); +string(<>, Handler, [Acc|Stack], Opts) when ?is_noncontrol(S) -> string(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); string(Bin, Handler, Stack, Opts) -> case partial_utf(Bin) of @@ -288,21 +482,16 @@ string(Bin, Handler, Stack, Opts) -> end. %% we don't need to guard against partial utf here, because it's already taken -%% care of in string. theoretically, the last clause of noncharacter/4 is -%% unreachable -%% non-characters erlang doesn't recognize as non-characters -noncharacter(<>, Handler, [Acc|Stack], Opts) - when ?is_noncontrol(S) -> - string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts); -%% u+fffe and u+ffff -noncharacter(<<239, 191, X, Rest/binary>>, Handler, [Acc|Stack], Opts) - when X == 190; X == 191 -> - string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts); +%% care of in string %% surrogates noncharacter(<<237, X, _, Rest/binary>>, Handler, [Acc|Stack], Opts) when X >= 160 -> string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts); -noncharacter(Bin, Handler, Stack, Opts) -> - ?error([Bin, Handler, Stack, Opts]). +%% u+fffe and u+ffff for R14BXX +noncharacter(<<239, 191, X, Rest/binary>>, Handler, [Acc|Stack], Opts) when X == 190; X == 191 -> + string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts); +%% bad utf8 +noncharacter(<<_, Rest/binary>>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts). escape(<<$b, Rest/binary>>, Handler, [Acc|Stack], Opts) -> @@ -315,11 +504,16 @@ escape(<<$r, Rest/binary>>, Handler, [Acc|Stack], Opts) -> string(Rest, Handler, [?acc_seq(Acc, $\r)|Stack], Opts); escape(<<$t, Rest/binary>>, Handler, [Acc|Stack], Opts) -> string(Rest, Handler, [?acc_seq(Acc, $\t)|Stack], Opts); +escape(<>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, $\\)|Stack], Opts); +escape(<>, Handler, [Acc|Stack], Opts=#opts{escape_forward_slash=true}) -> + string(Rest, Handler, [?acc_seq(Acc, $/)|Stack], Opts); +escape(<>, Handler, [Acc|Stack], Opts) -> + string(Rest, Handler, [?acc_seq(Acc, $\")|Stack], Opts); +escape(<>, Handler, [Acc|Stack], Opts = #opts{single_quotes=true}) -> + string(Rest, Handler, [?acc_seq(Acc, ?singlequote)|Stack], Opts); escape(<<$u, Rest/binary>>, Handler, Stack, Opts) -> - escaped_unicode(Rest, Handler, [?new_seq()|Stack], Opts); -escape(<>, Handler, [Acc|Stack], Opts) - when S =:= ?quote; S =:= ?solidus; S =:= ?rsolidus -> - string(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); + escaped_unicode(Rest, Handler, Stack, Opts); escape(<<>>, Handler, Stack, Opts) -> ?incomplete(escape, <<>>, Handler, Stack, Opts); escape(Bin, Handler, Stack, Opts) -> @@ -328,96 +522,74 @@ escape(Bin, Handler, Stack, Opts) -> %% this code is ugly and unfortunate, but so is json's handling of escaped %% unicode codepoint sequences. -escaped_unicode(<>, Handler, [[C,B,A], Acc|Stack], Opts) - when ?is_hex(D) -> +escaped_unicode(<>, Handler, [Acc|Stack], Opts) + when ?is_hex(A), ?is_hex(B), ?is_hex(C), ?is_hex(D) -> case erlang:list_to_integer([A, B, C, D], 16) of - %% high surrogate, we need a low surrogate next + %% high surrogate, dispatch to low surrogate X when X >= 16#d800, X =< 16#dbff -> low_surrogate(Rest, Handler, [X, Acc|Stack], Opts) - %% non-characters, you're not allowed to exchange these - ; X when X == 16#fffe; X == 16#ffff; X >= 16#fdd0, X =< 16#fdef -> + %% low surrogate, illegal in this position + ; X when X >= 16#dc00, X =< 16#dfff -> case Opts#opts.loose_unicode of - true -> - string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts) - ; false -> - ?error([<>, Handler, [[C,B,A], Acc|Stack], Opts]) + true -> string(Rest, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts) + ; false -> ?error([<>, Handler, [Acc|Stack], Opts]) end %% anything else - ; X -> - string(Rest, Handler, [?acc_seq(Acc, X)|Stack], Opts) + ; X -> string(Rest, Handler, [?acc_seq(Acc, X)|Stack], Opts) end; -escaped_unicode(<>, Handler, [Acc|Stack], Opts) - when ?is_hex(S) -> - escaped_unicode(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); -escaped_unicode(<<>>, Handler, Stack, Opts) -> - ?incomplete(escaped_unicode, <<>>, Handler, Stack, Opts); escaped_unicode(Bin, Handler, Stack, Opts) -> - ?error([Bin, Handler, Stack, Opts]). - - -low_surrogate(<>, Handler, Stack, Opts) -> - low_surrogate_u(Rest, Handler, Stack, Opts); -%% not an escaped codepoint, our high codepoint is illegal. dispatch back to -%% string to handle -low_surrogate(<> = Bin, Handler, [High, String|Stack], Opts) -> - case Opts#opts.loose_unicode of - true -> - string(Bin, Handler, [?acc_seq(String, 16#fffd)|Stack], Opts) - ; false -> - ?error([<>, Handler, [High, String|Stack], Opts]) - end; -low_surrogate(<<>>, Handler, Stack, Opts) -> - ?incomplete(low_surrogate, <<>>, Handler, Stack, Opts); -low_surrogate(Bin, Handler, Stack, Opts) -> - ?error([Bin, Handler, Stack, Opts]). - - -low_surrogate_u(<<$u, Rest/binary>>, Handler, Stack, Opts) -> - low_surrogate_v(Rest, Handler, [?new_seq()|Stack], Opts); -low_surrogate_u(<<>>, Handler, Stack, Opts) -> - ?incomplete(low_surrogate_u, <<>>, Handler, Stack, Opts); -%% not a low surrogate, dispatch back to string to handle, including the -%% rsolidus we parsed previously -low_surrogate_u(Bin, Handler, [High, String|Stack], Opts) -> - case Opts#opts.loose_unicode of - true -> - string(<>, Handler, [?acc_seq(String, 16#fffd)|Stack], Opts) - ; false -> - ?error([Bin, Handler, [High, String|Stack], Opts]) + case is_partial_escape(Bin) of + true -> ?incomplete(escaped_unicode, Bin, Handler, Stack, Opts) + ; false -> ?error([Bin, Handler, Stack, Opts]) end. -low_surrogate_v(<>, Handler, [[C,B,A], High, String|Stack], Opts) - when ?is_hex(D) -> +is_partial_escape(<>) when ?is_hex(A), ?is_hex(B), ?is_hex(C) -> true; +is_partial_escape(<>) when ?is_hex(A), ?is_hex(B) -> true; +is_partial_escape(<>) when ?is_hex(A) -> true; +is_partial_escape(<<>>) -> true; +is_partial_escape(_) -> false. + + +low_surrogate(<>, Handler, [High, Acc|Stack], Opts) + when ?is_hex(A), ?is_hex(B), ?is_hex(C), ?is_hex(D) -> case erlang:list_to_integer([A, B, C, D], 16) of - X when X >= 16#dc00, X =< 16#dfff -> - V = surrogate_to_codepoint(High, X), - case V rem 16#10000 of Y when Y == 16#fffe; Y == 16#ffff -> + X when X >= 16#dc00, X =< 16#dfff -> + Y = surrogate_to_codepoint(High, X), + case (Y =< 16#d800 orelse Y >= 16#e000) of + true -> string(Rest, Handler, [?acc_seq(Acc, Y)|Stack], Opts) + ; false -> case Opts#opts.loose_unicode of true -> - string(Rest, Handler, [?acc_seq(String, 16#fffd)|Stack], Opts) - ; false -> - ?error([<>, Handler, [[C,B,A], High, String|Stack], Opts]) + string(Rest, Handler, [?acc_seq(Acc, 16#fffd, 16#fffd)|Stack], Opts) + ; false -> + ?error([<>, Handler, [High, Acc|Stack], Opts]) end - ; _ -> - string(Rest, Handler, [?acc_seq(String, V)|Stack], Opts) end - %% not a low surrogate, bad bad bad ; _ -> case Opts#opts.loose_unicode of - true -> - string(Rest, Handler, [?acc_seq(?acc_seq(String, 16#fffd), 16#fffd)|Stack], Opts) - ; false -> - ?error([<>, Handler, [[C,B,A], High, String|Stack], Opts]) + true -> string(Rest, Handler, [?acc_seq(Acc, 16#fffd, 16#fffd)|Stack], Opts) + ; false -> ?error([<>, Handler, [High, Acc|Stack], Opts]) end end; -low_surrogate_v(<>, Handler, [Acc|Stack], Opts) - when ?is_hex(S) -> - low_surrogate_v(Rest, Handler, [?acc_seq(Acc, S)|Stack], Opts); -low_surrogate_v(<<>>, Handler, Stack, Opts) -> - ?incomplete(low_surrogate_v, <<>>, Handler, Stack, Opts); -low_surrogate_v(Bin, Handler, Stack, Opts) -> - ?error([Bin, Handler, Stack, Opts]). +low_surrogate(Bin, Handler, [High, Acc|Stack], Opts) -> + case is_partial_low(Bin) of + true -> ?incomplete(low_surrogate, Bin, Handler, [High, Acc|Stack], Opts) + ; false -> + case Opts#opts.loose_unicode of + true -> string(Bin, Handler, [?acc_seq(Acc, 16#fffd)|Stack], Opts) + ; false -> ?error([Bin, Handler, [High, Acc|Stack], Opts]) + end + end. + + +is_partial_low(<>) when ?is_hex(A), ?is_hex(B), ?is_hex(C) -> true; +is_partial_low(<>) when ?is_hex(A), ?is_hex(B) -> true; +is_partial_low(<>) when ?is_hex(A) -> true; +is_partial_low(<>) -> true; +is_partial_low(<>) -> true; +is_partial_low(<<>>) -> true; +is_partial_low(_) -> false. %% stole this from the unicode spec @@ -461,6 +633,9 @@ zero(<>, Handler, [Acc|Stack], Opts) -> initial_decimal(Rest, Handler, [{Acc, []}|Stack], Opts); zero(<>, {Handler, State}, [Acc|Stack], Opts) when ?is_whitespace(S) -> maybe_done(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); +zero(<>, {Handler, State}, [Acc|Stack], Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> maybe_done(R, H, S, O) end, + comment(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, [Resume|Stack], Opts); zero(<<>>, {Handler, State}, [Acc|Stack], Opts = #opts{explicit_end=false}) -> maybe_done(<<>>, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); zero(<<>>, Handler, Stack, Opts) -> @@ -499,6 +674,9 @@ integer(<>, Handler, [Acc|Stack], Opts) when S =:= $e; S =:= $E e(Rest, Handler, [{Acc, [], []}|Stack], Opts); integer(<>, {Handler, State}, [Acc|Stack], Opts) when ?is_whitespace(S) -> maybe_done(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); +integer(<>, {Handler, State}, [Acc|Stack], Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> maybe_done(R, H, S, O) end, + comment(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, [Resume|Stack], Opts); integer(<<>>, {Handler, State}, [Acc|Stack], Opts = #opts{explicit_end=false}) -> maybe_done(<<>>, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); integer(<<>>, Handler, Stack, Opts) -> @@ -544,6 +722,9 @@ decimal(<>, Handler, [{Int, Frac}|Stack], Opts) e(Rest, Handler, [{Int, Frac, []}|Stack], Opts); decimal(<>, {Handler, State}, [Acc|Stack], Opts) when ?is_whitespace(S) -> maybe_done(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); +decimal(<>, {Handler, State}, [Acc|Stack], Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> maybe_done(R, H, S, O) end, + comment(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, [Resume|Stack], Opts); decimal(<<>>, {Handler, State}, [Acc|Stack], Opts = #opts{explicit_end=false}) -> maybe_done(<<>>, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); decimal(<<>>, Handler, Stack, Opts) -> @@ -598,6 +779,9 @@ exp(<>, {Handler, State}, [Acc, array|Stack], Opts) -> value(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, [array|Stack], Opts); exp(<>, {Handler, State}, [Acc|Stack], Opts) when ?is_whitespace(S) -> maybe_done(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); +exp(<>, {Handler, State}, [Acc|Stack], Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> maybe_done(R, H, S, O) end, + comment(Rest, {Handler, Handler:handle_event(format_number(Acc), State)}, [Resume|Stack], Opts); exp(<<>>, {Handler, State}, [Acc|Stack], Opts = #opts{explicit_end=false}) -> maybe_done(<<>>, {Handler, Handler:handle_event(format_number(Acc), State)}, Stack, Opts); exp(<<>>, Handler, Stack, Opts) -> @@ -696,6 +880,48 @@ null(Bin, Handler, Stack, Opts) -> ?error([Bin, Handler, Stack, Opts]). +comment(<>, Handler, Stack, Opts) -> + single_comment(Rest, Handler, Stack, Opts); +comment(<>, Handler, Stack, Opts) -> + multi_comment(Rest, Handler, Stack, Opts); +comment(<<>>, Handler, Stack, Opts) -> + ?incomplete(comment, <<>>, Handler, Stack, Opts); +comment(Bin, Handler, Stack, Opts) -> + ?error([Bin, Handler, Stack, Opts]). + + +single_comment(<>, Handler, [Resume|Stack], Opts) -> + Resume(Rest, Handler, Stack, Opts); +single_comment(<<>>, Handler, [Resume|Stack], Opts) -> + Resume(<<>>, Handler, Stack, Opts); +single_comment(<<_S/utf8, Rest/binary>>, Handler, Stack, Opts) -> + single_comment(Rest, Handler, Stack, Opts); +single_comment(<<>>, Handler, Stack, Opts) -> + ?incomplete(single_comment, <<>>, Handler, Stack, Opts); +single_comment(Bin, Handler, Stack, Opts) -> + ?error([Bin, Handler, Stack, Opts]). + + +multi_comment(<>, Handler, Stack, Opts) -> + end_multi_comment(Rest, Handler, Stack, Opts); +multi_comment(<<_S/utf8, Rest/binary>>, Handler, Stack, Opts) -> + multi_comment(Rest, Handler, Stack, Opts); +multi_comment(<<>>, Handler, Stack, Opts) -> + ?incomplete(multi_comment, <<>>, Handler, Stack, Opts); +multi_comment(Bin, Handler, Stack, Opts) -> + ?error([Bin, Handler, Stack, Opts]). + + +end_multi_comment(<>, Handler, [Resume|Stack], Opts) -> + Resume(Rest, Handler, Stack, Opts); +end_multi_comment(<<_S/utf8, Rest/binary>>, Handler, Stack, Opts) -> + multi_comment(Rest, Handler, Stack, Opts); +end_multi_comment(<<>>, Handler, Stack, Opts) -> + ?incomplete(end_multi_comment, <<>>, Handler, Stack, Opts); +end_multi_comment(Bin, Handler, Stack, Opts) -> + ?error([Bin, Handler, Stack, Opts]). + + maybe_done(<>, {Handler, State}, [object|Stack], Opts) -> maybe_done(Rest, {Handler, Handler:handle_event(end_object, State)}, Stack, Opts); maybe_done(<>, {Handler, State}, [array|Stack], Opts) -> @@ -706,6 +932,9 @@ maybe_done(<>, Handler, [array|_] = Stack, Opts) -> value(Rest, Handler, Stack, Opts); maybe_done(<>, Handler, Stack, Opts) when ?is_whitespace(S) -> maybe_done(Rest, Handler, Stack, Opts); +maybe_done(<>, Handler, Stack, Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> maybe_done(R, H, S, O) end, + comment(Rest, Handler, [Resume|Stack], Opts); maybe_done(<<>>, Handler, Stack, Opts) when length(Stack) > 0 -> ?incomplete(maybe_done, <<>>, Handler, Stack, Opts); maybe_done(Rest, {Handler, State}, [], Opts) -> @@ -716,6 +945,9 @@ maybe_done(Bin, Handler, Stack, Opts) -> done(<>, Handler, [], Opts) when ?is_whitespace(S) -> done(Rest, Handler, [], Opts); +done(<>, Handler, [], Opts=#opts{comments=true}) -> + Resume = fun(R, H, S, O) -> done(R, H, S, O) end, + comment(Rest, Handler, [Resume], Opts); done(<<>>, {Handler, State}, [], Opts = #opts{explicit_end=true}) -> {incomplete, fun(Stream) when is_binary(Stream) -> done(<>, {Handler, State}, [], Opts) @@ -731,24 +963,196 @@ done(Bin, Handler, Stack, Opts) -> ?error([Bin, Handler, Stack, Opts]). -include_lib("eunit/include/eunit.hrl"). -noncharacters_test_() -> +comments_test_() -> [ - {"noncharacters - badjson", - ?_assertEqual(check_bad(noncharacters()), []) - }, - {"noncharacters - replaced", - ?_assertEqual(check_replaced(noncharacters()), []) - } + {"preceeding // comment", ?_assertEqual( + decode(<<"// comment ", ?newline, "[]">>, [comments]), + [start_array, end_array, end_json] + )}, + {"preceeding /**/ comment", ?_assertEqual( + decode(<<"/* comment */[]">>, [comments]), + [start_array, end_array, end_json] + )}, + {"trailing // comment", ?_assertEqual( + decode(<<"[]// comment", ?newline>>, [comments]), + [start_array, end_array, end_json] + )}, + {"trailing // comment (no newline)", ?_assertEqual( + decode(<<"[]// comment">>, [comments]), + [start_array, end_array, end_json] + )}, + {"trailing /**/ comment", ?_assertEqual( + decode(<<"[] /* comment */">>, [comments]), + [start_array, end_array, end_json] + )}, + {"// comment inside array", ?_assertEqual( + decode(<<"[ // comment", ?newline, "]">>, [comments]), + [start_array, end_array, end_json] + )}, + {"/**/ comment inside array", ?_assertEqual( + decode(<<"[ /* comment */ ]">>, [comments]), + [start_array, end_array, end_json] + )}, + {"// comment at beginning of array", ?_assertEqual( + decode(<<"[ // comment", ?newline, "true", ?newline, "]">>, [comments]), + [start_array, {literal, true}, end_array, end_json] + )}, + {"/**/ comment at beginning of array", ?_assertEqual( + decode(<<"[ /* comment */ true ]">>, [comments]), + [start_array, {literal, true}, end_array, end_json] + )}, + {"// comment at end of array", ?_assertEqual( + decode(<<"[ true // comment", ?newline, "]">>, [comments]), + [start_array, {literal, true}, end_array, end_json] + )}, + {"/**/ comment at end of array", ?_assertEqual( + decode(<<"[ true /* comment */ ]">>, [comments]), + [start_array, {literal, true}, end_array, end_json] + )}, + {"// comment midarray (post comma)", ?_assertEqual( + decode(<<"[ true, // comment", ?newline, "false ]">>, [comments]), + [start_array, {literal, true}, {literal, false}, end_array, end_json] + )}, + {"/**/ comment midarray (post comma)", ?_assertEqual( + decode(<<"[ true, /* comment */ false ]">>, [comments]), + [start_array, {literal, true}, {literal, false}, end_array, end_json] + )}, + {"// comment midarray (pre comma)", ?_assertEqual( + decode(<<"[ true// comment", ?newline, ", false ]">>, [comments]), + [start_array, {literal, true}, {literal, false}, end_array, end_json] + )}, + {"/**/ comment midarray (pre comma)", ?_assertEqual( + decode(<<"[ true/* comment */, false ]">>, [comments]), + [start_array, {literal, true}, {literal, false}, end_array, end_json] + )}, + {"// comment inside object", ?_assertEqual( + decode(<<"{ // comment", ?newline, "}">>, [comments]), + [start_object, end_object, end_json] + )}, + {"/**/ comment inside object", ?_assertEqual( + decode(<<"{ /* comment */ }">>, [comments]), + [start_object, end_object, end_json] + )}, + {"// comment at beginning of object", ?_assertEqual( + decode(<<"{ // comment", ?newline, " \"key\": true", ?newline, "}">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"/**/ comment at beginning of object", ?_assertEqual( + decode(<<"{ /* comment */ \"key\": true }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"// comment at end of object", ?_assertEqual( + decode(<<"{ \"key\": true // comment", ?newline, "}">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"/**/ comment at end of object", ?_assertEqual( + decode(<<"{ \"key\": true /* comment */ }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"// comment midobject (post comma)", ?_assertEqual( + decode(<<"{ \"x\": true, // comment", ?newline, "\"y\": false }">>, [comments]), + [ + start_object, + {key, <<"x">>}, + {literal, true}, + {key, <<"y">>}, + {literal, false}, + end_object, + end_json + ] + )}, + {"/**/ comment midobject (post comma)", ?_assertEqual( + decode(<<"{ \"x\": true, /* comment */", ?newline, "\"y\": false }">>, [comments]), + [ + start_object, + {key, <<"x">>}, + {literal, true}, + {key, <<"y">>}, + {literal, false}, + end_object, + end_json + ] + )}, + {"// comment midobject (pre comma)", ?_assertEqual( + decode(<<"{ \"x\": true// comment", ?newline, ", \"y\": false }">>, [comments]), + [ + start_object, + {key, <<"x">>}, + {literal, true}, + {key, <<"y">>}, + {literal, false}, + end_object, + end_json + ] + )}, + {"/**/ comment midobject (pre comma)", ?_assertEqual( + decode(<<"{ \"x\": true/* comment */", ?newline, ", \"y\": false }">>, [comments]), + [ + start_object, + {key, <<"x">>}, + {literal, true}, + {key, <<"y">>}, + {literal, false}, + end_object, + end_json + ] + )}, + {"// comment precolon", ?_assertEqual( + decode(<<"{ \"key\" // comment", ?newline, ": true }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"/**/ comment precolon", ?_assertEqual( + decode(<<"{ \"key\"/* comment */: true }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"// comment postcolon", ?_assertEqual( + decode(<<"{ \"key\": // comment", ?newline, " true }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"/**/ comment postcolon", ?_assertEqual( + decode(<<"{ \"key\":/* comment */ true }">>, [comments]), + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )}, + {"// comment terminating zero", ?_assertEqual( + decode(<<"[ 0// comment", ?newline, "]">>, [comments]), + [start_array, {integer, 0}, end_array, end_json] + )}, + {"// comment terminating integer", ?_assertEqual( + decode(<<"[ 1// comment", ?newline, "]">>, [comments]), + [start_array, {integer, 1}, end_array, end_json] + )}, + {"// comment terminating float", ?_assertEqual( + decode(<<"[ 1.0// comment", ?newline, "]">>, [comments]), + [start_array, {float, 1.0}, end_array, end_json] + )}, + {"// comment terminating exp", ?_assertEqual( + decode(<<"[ 1e1// comment", ?newline, "]">>, [comments]), + [start_array, {float, 1.0e1}, end_array, end_json] + )}, + {"/**/ comment terminating zero", ?_assertEqual( + decode(<<"[ 0/* comment */ ]">>, [comments]), + [start_array, {integer, 0}, end_array, end_json] + )}, + {"/**/ comment terminating integer", ?_assertEqual( + decode(<<"[ 1/* comment */ ]">>, [comments]), + [start_array, {integer, 1}, end_array, end_json] + )}, + {"/**/ comment terminating float", ?_assertEqual( + decode(<<"[ 1.0/* comment */ ]">>, [comments]), + [start_array, {float, 1.0}, end_array, end_json] + )}, + {"/**/ comment terminating exp", ?_assertEqual( + decode(<<"[ 1e1/* comment */ ]">>, [comments]), + [start_array, {float, 1.0e1}, end_array, end_json] + )} ]. -extended_noncharacters_test_() -> +escape_forward_slash_test_() -> [ - {"extended noncharacters - badjson", - ?_assertEqual(check_bad(extended_noncharacters()), []) - }, - {"extended noncharacters - replaced", - ?_assertEqual(check_replaced(extended_noncharacters()), []) - } + {"escape forward slash test", ?_assertEqual( + decode(<<"[ \" \/ \" ]">>, [escape_forward_slash]), + [start_array, {string, <<" / ">>}, end_array, end_json] + )} ]. surrogates_test_() -> @@ -767,16 +1171,6 @@ control_test_() -> ?_assertEqual(check_bad(control_characters()), []) } ]. - -reserved_test_() -> - [ - {"reserved noncharacters - badjson", - ?_assertEqual(check_bad(reserved_space()), []) - }, - {"reserved noncharacters - replaced", - ?_assertEqual(check_replaced(reserved_space()), []) - } - ]. good_characters_test_() -> [ @@ -787,6 +1181,51 @@ good_characters_test_() -> ?_assertEqual(check_good(good_extended()), []) } ]. + +malformed_test_() -> + [ + {"malformed codepoint with 1 byte", + ?_assertEqual({error, badjson}, decode(<<128>>)) + }, + {"malformed codepoint with 2 bytes", + ?_assertEqual({error, badjson}, decode(<<128, 192>>)) + }, + {"malformed codepoint with 3 bytes", + ?_assertEqual({error, badjson}, decode(<<128, 192, 192>>)) + }, + {"malformed codepoint with 4 bytes", + ?_assertEqual({error, badjson}, decode(<<128, 192, 192, 192>>)) + } + ]. + +malformed_replaced_test_() -> + F = <<16#fffd/utf8>>, + [ + {"malformed codepoint with 1 byte", + ?_assertEqual( + [{string, <>}, end_json], + decode(<<34, 128, 34>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 2 bytes", + ?_assertEqual( + [{string, <>}, end_json], + decode(<<34, 128, 192, 34>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 3 bytes", + ?_assertEqual( + [{string, <>}, end_json], + decode(<<34, 128, 192, 192, 34>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 4 bytes", + ?_assertEqual( + [{string, <>}, end_json], + decode(<<34, 128, 192, 192, 192, 34>>, [loose_unicode]) + ) + } + ]. check_bad(List) -> @@ -812,40 +1251,23 @@ check([H|T], Opts, Acc) -> check(T, Opts, [{H, R}] ++ Acc). +decode(JSON) -> decode(JSON, []). + decode(JSON, Opts) -> try (decoder(jsx, [], Opts))(JSON) catch error:badarg -> {error, badjson} end. - -noncharacters() -> lists:seq(16#fffe, 16#ffff). - -extended_noncharacters() -> - [16#1fffe, 16#1ffff, 16#2fffe, 16#2ffff] - ++ [16#3fffe, 16#3ffff, 16#4fffe, 16#4ffff] - ++ [16#5fffe, 16#5ffff, 16#6fffe, 16#6ffff] - ++ [16#7fffe, 16#7ffff, 16#8fffe, 16#8ffff] - ++ [16#9fffe, 16#9ffff, 16#afffe, 16#affff] - ++ [16#bfffe, 16#bffff, 16#cfffe, 16#cffff] - ++ [16#dfffe, 16#dffff, 16#efffe, 16#effff] - ++ [16#ffffe, 16#fffff, 16#10fffe, 16#10ffff]. - surrogates() -> lists:seq(16#d800, 16#dfff). control_characters() -> lists:seq(1, 31). -reserved_space() -> lists:seq(16#fdd0, 16#fdef). - -good() -> [32, 33] - ++ lists:seq(16#23, 16#5b) - ++ lists:seq(16#5d, 16#d7ff) - ++ lists:seq(16#e000, 16#fdcf) - ++ lists:seq(16#fdf0, 16#fffd). +good() -> [32, 33] ++ lists:seq(16#23, 16#5b) ++ lists:seq(16#5d, 16#d7ff) ++ lists:seq(16#e000, 16#ffff). -good_extended() -> lists:seq(16#100000, 16#10fffd). +good_extended() -> lists:seq(16#100000, 16#10ffff). %% erlang refuses to encode certain codepoints, so fake them all to_fake_utf(N, utf8) when N < 16#0080 -> <<34/utf8, N:8, 34/utf8>>; diff --git a/src/jsx_encoder.erl b/src/jsx_encoder.erl index 68bff84..1dbd13f 100644 --- a/src/jsx_encoder.erl +++ b/src/jsx_encoder.erl @@ -25,7 +25,6 @@ -export([encoder/3]). - -spec encoder(Handler::module(), State::any(), Opts::jsx:opts()) -> jsx:encoder(). encoder(Handler, State, Opts) -> @@ -53,8 +52,8 @@ start(Term, {Handler, State}, Opts) -> Handler:handle_event(end_json, value(Term, {Handler, State}, Opts)). -value(String, {Handler, State}, _Opts) when is_binary(String) -> - Handler:handle_event({string, String}, State); +value(String, {Handler, State}, Opts) when is_binary(String) -> + Handler:handle_event({string, clean_string(String, Opts)}, State); value(Float, {Handler, State}, _Opts) when is_float(Float) -> Handler:handle_event({float, Float}, State); value(Int, {Handler, State}, _Opts) when is_integer(Int) -> @@ -78,9 +77,18 @@ list_or_object(List, {Handler, State}, Opts) -> object([{Key, Value}|Rest], {Handler, State}, Opts) -> - object(Rest, {Handler, - value(Value, {Handler, Handler:handle_event({key, fix_key(Key)}, State)}, Opts) - }, Opts); + object( + Rest, + { + Handler, + value( + Value, + {Handler, Handler:handle_event({key, clean_string(fix_key(Key), Opts)}, State)}, + Opts + ) + }, + Opts + ); object([], {Handler, State}, _Opts) -> Handler:handle_event(end_object, State); object(Term, Handler, Opts) -> ?error([Term, Handler, Opts]). @@ -91,8 +99,33 @@ list([], {Handler, State}, _Opts) -> Handler:handle_event(end_array, State); list(Term, Handler, Opts) -> ?error([Term, Handler, Opts]). -fix_key(Key) when is_binary(Key) -> Key; -fix_key(Key) when is_atom(Key) -> atom_to_binary(Key, utf8). +fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); +fix_key(Key) when is_binary(Key) -> Key. + + +clean_string(Bin, Opts) -> + case Opts#opts.json_escape of + true -> jsx_utils:json_escape(Bin, Opts); + false -> + case is_clean(Bin) of + true -> Bin; + false -> clean_string(Bin, [], Opts) + end + end. + + +is_clean(<<>>) -> true; +is_clean(<<_/utf8, Rest/binary>>) -> is_clean(Rest); +is_clean(_) -> false. + + +clean_string(Bin, _Acc, Opts=#opts{loose_unicode=false}) -> ?error([Bin, Opts]); +clean_string(<<>>, Acc, _Opts) -> unicode:characters_to_binary(lists:reverse(Acc)); +clean_string(<>, Acc, Opts) -> clean_string(Rest, [X] ++ Acc, Opts); +%% surrogates +clean_string(<<237, X, _, Rest/binary>>, Acc, Opts) when X >= 160 -> clean_string(Rest, [16#fffd] ++ Acc, Opts); +%% bad codepoints +clean_string(<<_, Rest/binary>>, Acc, Opts) -> clean_string(Rest, [16#fffd] ++ Acc, Opts). -ifdef(TEST). @@ -100,52 +133,56 @@ fix_key(Key) when is_atom(Key) -> atom_to_binary(Key, utf8). encode(Term) -> (encoder(jsx, [], []))(Term). +encode(Term, Opts) -> + try (encoder(jsx, [], Opts))(Term) + catch _:_ -> {error, badjson} + end. + encode_test_() -> [ - {"naked string", ?_assert(encode(<<"a string">>) - =:= [{string, <<"a string">>}, end_json]) - }, - {"naked integer", ?_assert(encode(123) - =:= [{integer, 123}, end_json]) - }, - {"naked float", ?_assert(encode(1.23) - =:= [{float, 1.23}, end_json]) - }, - {"naked literal", ?_assert(encode(null) - =:= [{literal, null}, end_json]) - }, - {"empty object", ?_assert(encode([{}]) - =:= [start_object, end_object, end_json]) - }, - {"empty list", ?_assert(encode([]) - =:= [start_array, end_array, end_json]) - }, - {"simple list", ?_assert(encode([1,2,3,true,false]) - =:= [start_array, + {"naked string", ?_assertEqual(encode(<<"a string">>), [{string, <<"a string">>}, end_json])}, + {"naked integer", ?_assertEqual(encode(123), [{integer, 123}, end_json])}, + {"naked float", ?_assertEqual(encode(1.23), [{float, 1.23}, end_json])}, + {"naked literal", ?_assertEqual(encode(null), [{literal, null}, end_json])}, + {"empty object", ?_assertEqual(encode([{}]), [start_object, end_object, end_json])}, + {"empty list", ?_assertEqual(encode([]), [start_array, end_array, end_json])}, + {"simple list", ?_assertEqual( + encode([1,2,3,true,false]), + [ + start_array, {integer, 1}, {integer, 2}, {integer, 3}, {literal, true}, {literal, false}, end_array, - end_json]) + end_json + ] + ) }, - {"simple object", ?_assert(encode([{<<"a">>, true}, {<<"b">>, false}]) - =:= [start_object, + {"simple object", ?_assertEqual( + encode([{<<"a">>, true}, {<<"b">>, false}]), + [ + start_object, {key, <<"a">>}, {literal, true}, {key, <<"b">>}, {literal, false}, end_object, - end_json]) + end_json + ] + ) }, - {"complex term", ?_assert(encode([ - {<<"a">>, true}, - {<<"b">>, false}, - {<<"c">>, [1,2,3]}, - {<<"d">>, [{<<"key">>, <<"value">>}]} - ]) =:= [start_object, + {"complex term", ?_assertEqual( + encode([ + {<<"a">>, true}, + {<<"b">>, false}, + {<<"c">>, [1,2,3]}, + {<<"d">>, [{<<"key">>, <<"value">>}]} + ]), + [ + start_object, {key, <<"a">>}, {literal, true}, {key, <<"b">>}, @@ -162,15 +199,113 @@ encode_test_() -> {string, <<"value">>}, end_object, end_object, - end_json]) + end_json + ] + ) }, - {"atom keys", ?_assert(encode([{key, <<"value">>}]) - =:= [start_object, - {key, <<"key">>}, - {string, <<"value">>}, - end_object, - end_json]) + {"atom keys", ?_assertEqual( + encode([{key, <<"value">>}]), + [start_object, {key, <<"key">>}, {string, <<"value">>}, end_object, end_json] + ) } ]. +surrogates_test_() -> + [ + {"surrogates - badjson", + ?_assertEqual(check_bad(surrogates()), []) + }, + {"surrogates - replaced", + ?_assertEqual(check_replaced(surrogates()), []) + } + ]. + +good_characters_test_() -> + [ + {"acceptable codepoints", + ?_assertEqual(check_good(good()), []) + }, + {"acceptable extended", + ?_assertEqual(check_good(good_extended()), []) + } + ]. + +malformed_test_() -> + [ + {"malformed codepoint with 1 byte", ?_assertError(badarg, encode(<<128>>))}, + {"malformed codepoint with 2 bytes", ?_assertError(badarg, encode(<<128, 192>>))}, + {"malformed codepoint with 3 bytes", ?_assertError(badarg, encode(<<128, 192, 192>>))}, + {"malformed codepoint with 4 bytes", ?_assertError(badarg, encode(<<128, 192, 192, 192>>))} + ]. + +malformed_replaced_test_() -> + F = <<16#fffd/utf8>>, + [ + {"malformed codepoint with 1 byte", + ?_assertEqual( + [{string, <>}, end_json], + encode(<<128>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 2 bytes", + ?_assertEqual( + [{string, <>}, end_json], + encode(<<128, 192>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 3 bytes", + ?_assertEqual( + [{string, <>}, end_json], + encode(<<128, 192, 192>>, [loose_unicode]) + ) + }, + {"malformed codepoint with 4 bytes", + ?_assertEqual( + [{string, <>}, end_json], + encode(<<128, 192, 192, 192>>, [loose_unicode]) + ) + } + ]. + +check_bad(List) -> + lists:dropwhile(fun({_, {error, badjson}}) -> true ; (_) -> false end, + check(List, [], []) + ). + +check_replaced(List) -> + lists:dropwhile(fun({_, [{string, <<16#fffd/utf8>>}|_]}) -> true + ; (_) -> false + end, + check(List, [loose_unicode], []) + ). + +check_good(List) -> + lists:dropwhile(fun({_, [{string, _}|_]}) -> true ; (_) -> false end, + check(List, [], []) + ). + +check([], _Opts, Acc) -> Acc; +check([H|T], Opts, Acc) -> + R = encode(to_fake_utf(H, utf8), Opts), + check(T, Opts, [{H, R}] ++ Acc). + + +surrogates() -> lists:seq(16#d800, 16#dfff). + +good() -> lists:seq(1, 16#d7ff) ++ lists:seq(16#e000, 16#ffff). + +good_extended() -> lists:seq(16#100000, 16#10ffff). + +%% erlang refuses to encode certain codepoints, so fake them all +to_fake_utf(N, utf8) when N < 16#0080 -> <>; +to_fake_utf(N, utf8) when N < 16#0800 -> + <<0:5, Y:5, X:6>> = <>, + <<2#110:3, Y:5, 2#10:2, X:6>>; +to_fake_utf(N, utf8) when N < 16#10000 -> + <> = <>, + <<2#1110:4, Z:4, 2#10:2, Y:6, 2#10:2, X:6>>; +to_fake_utf(N, utf8) -> + <<0:3, W:3, Z:6, Y:6, X:6>> = <>, + <<2#11110:5, W:3, 2#10:2, Z:6, 2#10:2, Y:6, 2#10:2, X:6>>. + -endif. \ No newline at end of file diff --git a/src/jsx_opts.hrl b/src/jsx_opts.hrl index d49254b..a184dbe 100644 --- a/src/jsx_opts.hrl +++ b/src/jsx_opts.hrl @@ -2,5 +2,8 @@ loose_unicode = false, escape_forward_slash = false, explicit_end = false, - parser = auto + single_quotes = false, + no_jsonp_escapes = false, + comments = false, + json_escape = false }). \ No newline at end of file diff --git a/src/jsx_to_json.erl b/src/jsx_to_json.erl index 6d012f5..4d2ea14 100644 --- a/src/jsx_to_json.erl +++ b/src/jsx_to_json.erl @@ -39,7 +39,7 @@ -spec to_json(Source::any(), Opts::opts()) -> binary(). to_json(Source, Opts) when is_list(Opts) -> - (jsx:encoder(?MODULE, Opts, jsx_utils:extract_opts(Opts)))(Source). + (jsx:encoder(?MODULE, Opts, jsx_utils:extract_opts([json_escape] ++ Opts)))(Source). -spec format(Source::binary(), Opts::opts()) -> binary(). @@ -135,8 +135,8 @@ handle_event(Event, {[array|Stack], Acc, Opts = #opts{depth = Depth}}) -> handle_event(end_json, {[], Acc, _Opts}) -> unicode:characters_to_binary(Acc, utf8). -encode(string, String, Opts) -> - [?quote, jsx_utils:json_escape(String, Opts), ?quote]; +encode(string, String, _Opts) -> + [?quote, String, ?quote]; encode(literal, Literal, _Opts) -> erlang:atom_to_list(Literal); encode(integer, Integer, _Opts) -> @@ -186,176 +186,148 @@ teardown_nicedecimal_meck(_) -> basic_format_test_() -> [ - {"empty object", ?_assert(format(<<"{}">>, []) =:= <<"{}">>)}, - {"empty array", ?_assert(format(<<"[]">>, []) =:= <<"[]">>)}, - {"naked integer", ?_assert(format(<<"123">>, []) =:= <<"123">>)}, + {"empty object", ?_assertEqual(format(<<"{}">>, []), <<"{}">>)}, + {"empty array", ?_assertEqual(format(<<"[]">>, []), <<"[]">>)}, + {"naked integer", ?_assertEqual(format(<<"123">>, []), <<"123">>)}, {foreach, fun() -> setup_nicedecimal_meck(<<"1.23">>) end, fun(R) -> teardown_nicedecimal_meck(R) end, - [{"naked float", ?_assert(format(<<"1.23">>, []) =:= <<"1.23">>)}] - }, - {"naked string", ?_assert(format(<<"\"hi\"">>, []) =:= <<"\"hi\"">>)}, - {"naked literal", ?_assert(format(<<"true">>, []) =:= <<"true">>)}, - {"simple object", - ?_assert(format(<<" { \"key\" :\n\t \"value\"\r\r\r\n } ">>, - [] - ) =:= <<"{\"key\":\"value\"}">> - ) - }, - {"really simple object", - ?_assert(format(<<"{\"k\":\"v\"}">>, []) =:= <<"{\"k\":\"v\"}">>) - }, - {"nested object", - ?_assert(format(<<"{\"k\":{\"k\":\"v\"}, \"j\":{}}">>, [] - ) =:= <<"{\"k\":{\"k\":\"v\"},\"j\":{}}">> - ) - }, - {"simple array", - ?_assert(format(<<" [\n\ttrue,\n\tfalse , \n \tnull\n] ">>, - [] - ) =:= <<"[true,false,null]">> - ) - }, - {"really simple array", ?_assert(format(<<"[1]">>, []) =:= <<"[1]">>)}, - {"nested array", ?_assert(format(<<"[[[]]]">>, []) =:= <<"[[[]]]">>)}, - {"nested structures", - ?_assert(format( - <<"[{\"key\":\"value\", - \"another key\": \"another value\", - \"a list\": [true, false] - }, - [[{}]] - ]">>, [] - ) =:= <<"[{\"key\":\"value\",\"another key\":\"another value\",\"a list\":[true,false]},[[{}]]]">> - ) + [{"naked float", ?_assertEqual(format(<<"1.23">>, []), <<"1.23">>)}] }, + {"naked string", ?_assertEqual(format(<<"\"hi\"">>, []), <<"\"hi\"">>)}, + {"naked literal", ?_assertEqual(format(<<"true">>, []), <<"true">>)}, + {"simple object", ?_assertEqual( + format(<<" { \"key\" :\n\t \"value\"\r\r\r\n } ">>, []), + <<"{\"key\":\"value\"}">> + )}, + {"really simple object", ?_assertEqual(format(<<"{\"k\":\"v\"}">>, []) , <<"{\"k\":\"v\"}">>)}, + {"nested object", ?_assertEqual( + format(<<"{\"k\":{\"k\":\"v\"}, \"j\":{}}">>, []), + <<"{\"k\":{\"k\":\"v\"},\"j\":{}}">> + )}, + {"simple array", ?_assertEqual( + format(<<" [\n\ttrue,\n\tfalse , \n \tnull\n] ">>, []), + <<"[true,false,null]">> + )}, + {"really simple array", ?_assertEqual(format(<<"[1]">>, []), <<"[1]">>)}, + {"nested array", ?_assertEqual(format(<<"[[[]]]">>, []), <<"[[[]]]">>)}, + {"nested structures", ?_assertEqual( + format(<<"[ + { + \"key\":\"value\", + \"another key\": \"another value\", + \"a list\": [true, false] + }, + [[{}]] + ]">>, []), + <<"[{\"key\":\"value\",\"another key\":\"another value\",\"a list\":[true,false]},[[{}]]]">> + )}, {"simple nested structure", - ?_assert(format(<<"[[],{\"k\":[[],{}],\"j\":{}},[]]">>, [] - ) =:= <<"[[],{\"k\":[[],{}],\"j\":{}},[]]">> + ?_assertEqual( + format(<<"[[],{\"k\":[[],{}],\"j\":{}},[]]">>, []), + <<"[[],{\"k\":[[],{}],\"j\":{}},[]]">> ) } ]. basic_to_json_test_() -> [ - {"empty object", ?_assert(to_json([{}], []) =:= <<"{}">>)}, - {"empty array", ?_assert(to_json([], []) =:= <<"[]">>)}, - {"naked integer", ?_assert(to_json(123, []) =:= <<"123">>)}, + {"empty object", ?_assertEqual(to_json([{}], []), <<"{}">>)}, + {"empty array", ?_assertEqual(to_json([], []), <<"[]">>)}, + {"naked integer", ?_assertEqual(to_json(123, []), <<"123">>)}, {foreach, fun() -> setup_nicedecimal_meck(<<"1.23">>) end, fun(R) -> teardown_nicedecimal_meck(R) end, - [{"naked float", ?_assert(to_json(1.23, []) =:= <<"1.23">>)}] + [{"naked float", ?_assertEqual(to_json(1.23, []) , <<"1.23">>)}] }, - {"naked string", ?_assert(to_json(<<"hi">>, []) =:= <<"\"hi\"">>)}, - {"naked literal", ?_assert(to_json(true, []) =:= <<"true">>)}, - {"simple object", - ?_assert(to_json( - [{<<"key">>, <<"value">>}], - [] - ) =:= <<"{\"key\":\"value\"}">> - ) - }, - {"nested object", - ?_assert(to_json( - [{<<"k">>,[{<<"k">>,<<"v">>}]},{<<"j">>,[{}]}], - [] - ) =:= <<"{\"k\":{\"k\":\"v\"},\"j\":{}}">> - ) - }, - {"simple array", - ?_assert(to_json( - [true, false, null], - [] - ) =:= <<"[true,false,null]">> - ) - }, - {"really simple array", ?_assert(to_json([1], []) =:= <<"[1]">>)}, - {"nested array", ?_assert(to_json([[[]]], []) =:= <<"[[[]]]">>)}, - {"nested structures", - ?_assert(to_json( + {"naked string", ?_assertEqual(to_json(<<"hi">>, []), <<"\"hi\"">>)}, + {"naked literal", ?_assertEqual(to_json(true, []), <<"true">>)}, + {"simple object", ?_assertEqual( + to_json( + [{<<"key">>, <<"value">>}], + [] + ), + <<"{\"key\":\"value\"}">> + )}, + {"nested object", ?_assertEqual( + to_json( + [{<<"k">>,[{<<"k">>,<<"v">>}]},{<<"j">>,[{}]}], + [] + ), + <<"{\"k\":{\"k\":\"v\"},\"j\":{}}">> + )}, + {"simple array", ?_assertEqual(to_json([true, false, null], []), <<"[true,false,null]">>)}, + {"really simple array", ?_assertEqual(to_json([1], []), <<"[1]">>)}, + {"nested array", ?_assertEqual(to_json([[[]]], []), <<"[[[]]]">>)}, + {"nested structures", ?_assertEqual( + to_json( + [ [ - [ - {<<"key">>, <<"value">>}, - {<<"another key">>, <<"another value">>}, - {<<"a list">>, [true, false]} - ], - [[[{}]]] + {<<"key">>, <<"value">>}, + {<<"another key">>, <<"another value">>}, + {<<"a list">>, [true, false]} ], - [] - ) =:= <<"[{\"key\":\"value\",\"another key\":\"another value\",\"a list\":[true,false]},[[{}]]]">> - ) - }, - {"simple nested structure", - ?_assert(to_json( - [[], [{<<"k">>, [[], [{}]]}, {<<"j">>, [{}]}], []], - [] - ) =:= <<"[[],{\"k\":[[],{}],\"j\":{}},[]]">> - ) - } + [[[{}]]] + ], + [] + ), + <<"[{\"key\":\"value\",\"another key\":\"another value\",\"a list\":[true,false]},[[{}]]]">> + )}, + {"simple nested structure", ?_assertEqual( + to_json( + [[], [{<<"k">>, [[], [{}]]}, {<<"j">>, [{}]}], []], + [] + ), + <<"[[],{\"k\":[[],{}],\"j\":{}},[]]">> + )} ]. opts_test_() -> [ - {"unspecified indent/space", - ?_assert(format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>, - [space, indent] - ) =:= <<"[\n true,\n false,\n null\n]">> - ) - }, - {"specific indent/space", - ?_assert(format( - <<"\n{\n\"key\" : [],\n\"another key\" : true\n}\n">>, - [{space, 2}, {indent, 3}] - ) =:= <<"{\n \"key\": [],\n \"another key\": true\n}">> - ) - }, - {"nested structures", - ?_assert(format( - <<"[{\"key\":\"value\", - \"another key\": \"another value\" - }, - [[true, false, null]] - ]">>, - [{space, 2}, {indent, 2}] - ) =:= <<"[\n {\n \"key\": \"value\",\n \"another key\": \"another value\"\n },\n [\n [\n true,\n false,\n null\n ]\n ]\n]">> - ) - }, - {"array spaces", - ?_assert(format(<<"[1,2,3]">>, - [{space, 2}] - ) =:= <<"[1, 2, 3]">> - ) - }, - {"object spaces", - ?_assert(format(<<"{\"a\":true,\"b\":true,\"c\":true}">>, - [{space, 2}] - ) =:= <<"{\"a\": true, \"b\": true, \"c\": true}">> - ) - }, + {"unspecified indent/space", ?_assertEqual( + format(<<" [\n\ttrue,\n\tfalse,\n\tnull\n] ">>, [space, indent]), + <<"[\n true,\n false,\n null\n]">> + )}, + {"specific indent/space", ?_assertEqual( + format( + <<"\n{\n\"key\" : [],\n\"another key\" : true\n}\n">>, + [{space, 2}, {indent, 3}] + ), + <<"{\n \"key\": [],\n \"another key\": true\n}">> + )}, + {"nested structures", ?_assertEqual( + format( + <<"[{\"key\":\"value\", \"another key\": \"another value\"}, [[true, false, null]]]">>, + [{space, 2}, {indent, 2}] + ), + <<"[\n {\n \"key\": \"value\",\n \"another key\": \"another value\"\n },\n [\n [\n true,\n false,\n null\n ]\n ]\n]">> + )}, + {"array spaces", ?_assertEqual( + format(<<"[1,2,3]">>, [{space, 2}]), + <<"[1, 2, 3]">> + )}, + {"object spaces", ?_assertEqual( + format(<<"{\"a\":true,\"b\":true,\"c\":true}">>, [{space, 2}]), + <<"{\"a\": true, \"b\": true, \"c\": true}">> + )}, {foreach, fun() -> setup_nicedecimal_meck(<<"1.23">>) end, fun(R) -> teardown_nicedecimal_meck(R) end, - [{ - "array indent", - ?_assert(format(<<"[1.23, 1.23, 1.23]">>, - [{indent, 2}] - ) =:= <<"[\n 1.23,\n 1.23,\n 1.23\n]">> - ) - }] + [{"array indent", ?_assertEqual( + format(<<"[1.23, 1.23, 1.23]">>, [{indent, 2}]), + <<"[\n 1.23,\n 1.23,\n 1.23\n]">> + )}] }, - {"object indent", - ?_assert(format(<<"{\"a\":true,\"b\":true,\"c\":true}">>, - [{indent, 2}] - ) =:= <<"{\n \"a\":true,\n \"b\":true,\n \"c\":true\n}">> - ) - } + {"object indent", ?_assertEqual( + format(<<"{\"a\":true,\"b\":true,\"c\":true}">>, [{indent, 2}]), + <<"{\n \"a\":true,\n \"b\":true,\n \"c\":true\n}">> + )} ]. ext_opts_test_() -> - [{"extopts", ?_assert(format(<<"[]">>, - [loose_unicode, {escape_forward_slash, true}] - ) =:= <<"[]">> - )} - ]. + [{"extopts", ?_assertEqual( + format(<<"[]">>, [loose_unicode, {escape_forward_slash, true}]), + <<"[]">> + )}]. -endif. \ No newline at end of file diff --git a/src/jsx_to_term.erl b/src/jsx_to_term.erl index b5c3b44..00785d0 100644 --- a/src/jsx_to_term.erl +++ b/src/jsx_to_term.erl @@ -33,15 +33,17 @@ -type opts() :: list(). - --spec to_term(Source::binary(), Opts::opts()) -> list({binary(), any()}) - | list(any()) +-type json_value() :: list({binary(), json_value()}) + | list(json_value()) | true | false | null | integer() | float() | binary(). + + +-spec to_term(Source::binary(), Opts::opts()) -> json_value(). to_term(Source, Opts) when is_list(Opts) -> (jsx:decoder(?MODULE, Opts, jsx_utils:extract_opts(Opts)))(Source). @@ -109,35 +111,29 @@ format_key(Key, Opts) -> basic_test_() -> [ - {"empty object", ?_assert(to_term(<<"{}">>, []) =:= [{}])}, - {"simple object", ?_assert(to_term(<<"{\"key\": true}">>, []) =:= [{<<"key">>, true}])}, - {"less simple object", - ?_assert(to_term(<<"{\"a\": 1, \"b\": 2}">>, []) =:= [{<<"a">>, 1}, {<<"b">>, 2}]) - }, - {"nested object", - ?_assert(to_term(<<"{\"key\": {\"key\": true}}">>, []) =:= [{<<"key">>, [{<<"key">>, true}]}]) - }, + {"empty object", ?_assertEqual(to_term(<<"{}">>, []), [{}])}, + {"simple object", ?_assertEqual(to_term(<<"{\"key\": true}">>, []), [{<<"key">>, true}])}, + {"less simple object", ?_assertEqual( + to_term(<<"{\"a\": 1, \"b\": 2}">>, []), + [{<<"a">>, 1}, {<<"b">>, 2}] + )}, + {"nested object", ?_assertEqual( + to_term(<<"{\"key\": {\"key\": true}}">>, []), + [{<<"key">>, [{<<"key">>, true}]}] + )}, {"empty array", ?_assert(to_term(<<"[]">>, []) =:= [])}, - {"list of lists", - ?_assert(to_term(<<"[[],[],[]]">>, []) =:= [[], [], []]) - }, - {"list of strings", - ?_assert(to_term(<<"[\"hi\", \"there\"]">>, []) =:= [<<"hi">>, <<"there">>]) - }, - {"list of numbers", - ?_assert(to_term(<<"[1, 2.0, 3e4, -5]">>, []) =:= [1, 2.0, 3.0e4, -5]) - }, - {"list of literals", - ?_assert(to_term(<<"[true,false,null]">>, []) =:= [true,false,null]) - }, - {"list of objects", - ?_assert(to_term(<<"[{}, {\"a\":1, \"b\":2}, {\"key\":[true,false]}]">>, []) - =:= [[{}], [{<<"a">>,1},{<<"b">>,2}], [{<<"key">>,[true,false]}]]) - } + {"list of lists", ?_assertEqual(to_term(<<"[[],[],[]]">>, []), [[], [], []])}, + {"list of strings", ?_assertEqual(to_term(<<"[\"hi\", \"there\"]">>, []), [<<"hi">>, <<"there">>])}, + {"list of numbers", ?_assertEqual(to_term(<<"[1, 2.0, 3e4, -5]">>, []), [1, 2.0, 3.0e4, -5])}, + {"list of literals", ?_assertEqual(to_term(<<"[true,false,null]">>, []), [true,false,null])}, + {"list of objects", ?_assertEqual( + to_term(<<"[{}, {\"a\":1, \"b\":2}, {\"key\":[true,false]}]">>, []), + [[{}], [{<<"a">>,1},{<<"b">>,2}], [{<<"key">>,[true,false]}]] + )} ]. comprehensive_test_() -> - {"comprehensive test", ?_assert(to_term(comp_json(), []) =:= comp_term())}. + {"comprehensive test", ?_assertEqual(to_term(comp_json(), []), comp_term())}. comp_json() -> <<"[ @@ -164,7 +160,7 @@ comp_term() -> ]. atom_labels_test_() -> - {"atom labels test", ?_assert(to_term(comp_json(), [{labels, atom}]) =:= atom_term())}. + {"atom labels test", ?_assertEqual(to_term(comp_json(), [{labels, atom}]), atom_term())}. atom_term() -> [ @@ -180,10 +176,10 @@ atom_term() -> naked_test_() -> [ - {"naked integer", ?_assert(to_term(<<"123">>, []) =:= 123)}, - {"naked float", ?_assert(to_term(<<"-4.32e-17">>, []) =:= -4.32e-17)}, - {"naked literal", ?_assert(to_term(<<"true">>, []) =:= true)}, - {"naked string", ?_assert(to_term(<<"\"string\"">>, []) =:= <<"string">>)} + {"naked integer", ?_assertEqual(to_term(<<"123">>, []), 123)}, + {"naked float", ?_assertEqual(to_term(<<"-4.32e-17">>, []), -4.32e-17)}, + {"naked literal", ?_assertEqual(to_term(<<"true">>, []), true)}, + {"naked string", ?_assertEqual(to_term(<<"\"string\"">>, []), <<"string">>)} ]. -endif. diff --git a/src/jsx_utils.erl b/src/jsx_utils.erl index 814092c..462e31d 100644 --- a/src/jsx_utils.erl +++ b/src/jsx_utils.erl @@ -43,21 +43,41 @@ parse_opts([escape_forward_slash|Rest], Opts) -> parse_opts(Rest, Opts#opts{escape_forward_slash=true}); parse_opts([explicit_end|Rest], Opts) -> parse_opts(Rest, Opts#opts{explicit_end=true}); +parse_opts([single_quotes|Rest], Opts) -> + parse_opts(Rest, Opts#opts{single_quotes=true}); +parse_opts([no_jsonp_escapes|Rest], Opts) -> + parse_opts(Rest, Opts#opts{no_jsonp_escapes=true}); +parse_opts([comments|Rest], Opts) -> + parse_opts(Rest, Opts#opts{comments=true}); +parse_opts([json_escape|Rest], Opts) -> + parse_opts(Rest, Opts#opts{json_escape=true}); parse_opts(_, _) -> {error, badarg}. +valid_flags() -> + [ + loose_unicode, + escape_forward_slash, + explicit_end, + single_quotes, + no_jsonp_escapes, + comments, + json_escape + ]. + + extract_opts(Opts) -> extract_parser_opts(Opts, []). extract_parser_opts([], Acc) -> Acc; extract_parser_opts([{K,V}|Rest], Acc) -> - case lists:member(K, [loose_unicode, escape_forward_slash, explicit_end]) of + case lists:member(K, valid_flags()) of true -> extract_parser_opts(Rest, [{K,V}] ++ Acc) ; false -> extract_parser_opts(Rest, Acc) end; extract_parser_opts([K|Rest], Acc) -> - case lists:member(K, [loose_unicode, escape_forward_slash, explicit_end]) of + case lists:member(K, valid_flags()) of true -> extract_parser_opts(Rest, [K] ++ Acc) ; false -> extract_parser_opts(Rest, Acc) end. @@ -68,59 +88,198 @@ extract_parser_opts([K|Rest], Acc) -> %% everything else should be a legal json string component json_escape(String, Opts) when is_binary(String) -> - json_escape(String, Opts, <<>>). + json_escape(String, Opts, 0, size(String)). -%% double quote -json_escape(<<$\", Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% backslash \ reverse solidus -json_escape(<<$\\, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% backspace -json_escape(<<$\b, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% form feed -json_escape(<<$\f, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% newline -json_escape(<<$\n, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% cr -json_escape(<<$\r, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% tab -json_escape(<<$\t, Rest/binary>>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -%% other control characters -json_escape(<>, Opts, Acc) when C >= 0, C < $\s -> - json_escape(Rest, - Opts, - <> - ); -%% escape forward slashes -- optionally -- to faciliate microsoft's retarded -%% date format -json_escape(<<$/, Rest/binary>>, Opts=#opts{escape_forward_slash=true}, Acc) -> - json_escape(Rest, Opts, <>); -%% escape u+2028 and u+2029 to avoid problems with jsonp -json_escape(<>, Opts, Acc) - when C == 16#2028; C == 16#2029 -> - json_escape(Rest, - Opts, - <> - ); -%% any other legal codepoint -json_escape(<>, Opts, Acc) -> - json_escape(Rest, Opts, <>); -json_escape(<<>>, _Opts, Acc) -> - Acc; -json_escape(Rest, Opts, Acc) -> - erlang:error(badarg, [Rest, Opts, Acc]). + +-define(control_character(X), + <> -> + json_escape( + <>, + Opts, + L + 6, + Len + 5 + ) +). + +json_escape(Str, Opts, L, Len) when L < Len -> + case Str of + ?control_character(0); + ?control_character(1); + ?control_character(2); + ?control_character(3); + ?control_character(4); + ?control_character(5); + ?control_character(6); + ?control_character(7); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + ?control_character(11); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + ?control_character(14); + ?control_character(15); + ?control_character(16); + ?control_character(17); + ?control_character(18); + ?control_character(19); + ?control_character(20); + ?control_character(21); + ?control_character(22); + ?control_character(23); + ?control_character(24); + ?control_character(25); + ?control_character(26); + ?control_character(27); + ?control_character(28); + ?control_character(29); + ?control_character(30); + ?control_character(31); + <<_:L/binary, 32, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 33, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + <<_:L/binary, 35, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 36, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 37, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 38, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 39, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 40, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 41, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 42, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 43, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 44, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 45, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 46, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <> -> + case Opts#opts.escape_forward_slash of + true -> + json_escape(<>, Opts, L + 2, Len + 1); + false -> + json_escape(<>, Opts, L + 1, Len) + end; + <<_:L/binary, 48, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 49, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 50, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 51, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 52, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 53, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 54, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 55, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 56, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 57, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 58, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 59, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 60, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 61, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 62, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 63, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 64, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 65, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 66, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 67, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 68, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 69, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 70, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 71, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 72, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 73, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 74, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 75, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 76, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 77, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 78, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 79, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 80, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 81, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 82, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 83, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 84, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 85, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 86, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 87, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 88, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 89, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 90, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 91, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <> -> json_escape(<>, Opts, L + 2, Len + 1); + <<_:L/binary, 93, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 94, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 95, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 96, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 97, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 98, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 99, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 100, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 101, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 102, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 103, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 104, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 105, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 106, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 107, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 108, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 109, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 110, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 111, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 112, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 113, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 114, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 115, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 116, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 117, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 118, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 119, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 120, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 121, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 122, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 123, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 124, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 125, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 126, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, 127, _/binary>> -> json_escape(Str, Opts, L + 1, Len); + <> -> + case Opts#opts.no_jsonp_escapes of + true -> + json_escape(<>, Opts, L + 3, Len); + false -> + B = unicode:characters_to_binary(json_escape_sequence(16#2028)), + json_escape(<>, Opts, L + size(B), Len + size(B) - size(<<16#2028/utf8>>)) + end; + <> -> + case Opts#opts.no_jsonp_escapes of + true -> + json_escape(<>, Opts, L + 3, Len); + false -> + B = unicode:characters_to_binary(json_escape_sequence(16#2029)), + json_escape(<>, Opts, L + size(B), Len + size(B) - size(<<16#2029/utf8>>)) + end; + <<_:L/binary, X/utf8, _/binary>> when X < 16#0080 -> + json_escape(Str, Opts, L + 1, Len); + <<_:L/binary, X/utf8, _/binary>> when X < 16#0800 -> + json_escape(Str, Opts, L + 2, Len); + <<_:L/binary, X/utf8, _/binary>> when X < 16#10000 -> + json_escape(Str, Opts, L + 3, Len); + <<_:L/binary, _/utf8, _/binary>> -> + json_escape(Str, Opts, L + 4, Len); + <> when X >= 160 -> + case Opts#opts.loose_unicode of + true -> json_escape(<>, Opts, L + 3, Len); + false -> erlang:error(badarg, [Str, Opts]) + end; + <> -> + case Opts#opts.loose_unicode of + true -> json_escape(<>, Opts, L + 3, Len + 2); + false -> erlang:error(badarg, [Str, Opts]) + end + end; +json_escape(Str, _, L, Len) when L =:= Len -> + Str. %% convert a codepoint to it's \uXXXX equiv. json_escape_sequence(X) -> <> = <>, - [$\\, $u, (to_hex(A)), (to_hex(B)), (to_hex(C)), (to_hex(D))]. + unicode:characters_to_binary([$\\, $u, (to_hex(A)), (to_hex(B)), (to_hex(C)), (to_hex(D))]). to_hex(10) -> $a; @@ -141,27 +300,55 @@ to_hex(X) -> X + 48. %% ascii "1" is [49], "2" is [50], etc... binary_escape_test_() -> [ {"json string escaping", - ?_assert(json_escape( - <<"\"\\\b\f\n\r\t">>, #opts{} - ) =:= <<"\\\"\\\\\\b\\f\\n\\r\\t">> + ?_assertEqual( + json_escape(<<"\"\\\b\f\n\r\t">>, #opts{}), + <<"\\\"\\\\\\b\\f\\n\\r\\t">> ) }, {"json string hex escape", - ?_assert(json_escape( - <<1, 2, 3, 11, 26, 30, 31>>, #opts{} - ) =:= <<"\\u0001\\u0002\\u0003\\u000b\\u001a\\u001e\\u001f">> + ?_assertEqual( + json_escape(<<0, 1, 2, 3, 11, 26, 30, 31>>, #opts{}), + <<"\\u0000\\u0001\\u0002\\u0003\\u000b\\u001a\\u001e\\u001f">> ) }, {"jsonp protection", - ?_assert(json_escape( - <<226, 128, 168, 226, 128, 169>>, #opts{} - ) =:= <<"\\u2028\\u2029">> + ?_assertEqual( + json_escape(<<226, 128, 168, 226, 128, 169>>, #opts{}), + <<"\\u2028\\u2029">> + ) + }, + {"no jsonp escapes", + ?_assertEqual( + json_escape(<<226, 128, 168, 226, 128, 169>>, #opts{no_jsonp_escapes=true}), + <<226, 128, 168, 226, 128, 169>> ) }, {"microsoft i hate your date format", - ?_assert(json_escape(<<"/Date(1303502009425)/">>, - #opts{escape_forward_slash=true} - ) =:= <<"\\/Date(1303502009425)\\/">> + ?_assertEqual( + json_escape(<<"/Date(1303502009425)/">>, #opts{escape_forward_slash=true}), + <<"\\/Date(1303502009425)\\/">> + ) + }, + {"bad utf8", + ?_assertError(badarg, json_escape(<<32, 64, 128, 255>>, #opts{})) + }, + {"bad utf8 ok", + ?_assertEqual( + json_escape(<<32, 64, 128, 255>>, #opts{loose_unicode=true}), + <<32, 64, 16#fffd/utf8, 16#fffd/utf8>> + ) + }, + {"bad surrogate", ?_assertError(badarg, json_escape(<<237, 160, 127>>, #opts{}))}, + {"bad surrogate ok", + ?_assertEqual( + json_escape(<<237, 160, 127>>, #opts{loose_unicode=true}), + <<16#fffd/utf8>> + ) + }, + {"all sizes of codepoints", + ?_assertEqual( + json_escape(unicode:characters_to_binary([0, 32, 16#80, 16#800, 16#10000]), #opts{}), + <<"\\u0000", 32/utf8, 16#80/utf8, 16#800/utf8, 16#10000/utf8>> ) } ]. diff --git a/src/jsx_verify.erl b/src/jsx_verify.erl index d49c90f..1a91b37 100644 --- a/src/jsx_verify.erl +++ b/src/jsx_verify.erl @@ -169,11 +169,7 @@ term_true_test_() -> {"empty array", ?_assert(is_term([], []))}, {"whitespace", ?_assert(is_term([ true ], []))}, {"nested terms", - ?_assert(is_term( - [[{x, [[{}], [{}], [{}]]}, {y, [{}]}], [{}], [[[]]]], - [] - ) - ) + ?_assert(is_term([[{x, [[{}], [{}], [{}]]}, {y, [{}]}], [{}], [[[]]]], [])) }, {"numbers", ?_assert(is_term([-1.0, -1, -0, 0, 1.0e-1, 1, 1.0, 1.0e1], []))