From 86f216d086b3f69fc93c417fdb26c067f264f532 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 26 Apr 2012 13:50:03 -0700 Subject: [PATCH 01/52] minor format fix in README --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 0e529ce..1f9c428 100644 --- a/README.markdown +++ b/README.markdown @@ -101,7 +101,7 @@ json objects are represented by erlang proplists. the empty object has the speci ### options ### -jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. flags are always atoms or {atom, Term} tuples. functions may have additional options beyond these, see individual function documentation for details +jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. flags are always atoms or `{atom, Term}` tuples. functions may have additional options beyond these, see individual function documentation for details #### `replaced_bad_utf8` #### From 04dd3c95c60ad38ce407c3e2900fb98bee648b16 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sat, 5 May 2012 20:12:37 -0700 Subject: [PATCH 02/52] fixes double compile issue, i hope --- rebar.config | 1 - 1 file changed, 1 deletion(-) diff --git a/rebar.config b/rebar.config index 5218e3b..853f732 100644 --- a/rebar.config +++ b/rebar.config @@ -3,7 +3,6 @@ {erl_opts, [ {i, "src"}, - {src_dirs, ["src"]}, warn_unused_vars, warn_export_all, warn_shadow_vars, From 2041d912313170a456414d6eec3ec49bba61a5c4 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 10 May 2012 23:43:31 -0700 Subject: [PATCH 03/52] reformat readme to better mirror erlang docs --- README.markdown | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/README.markdown b/README.markdown index 1f9c428..a6fe03d 100644 --- a/README.markdown +++ b/README.markdown @@ -32,15 +32,28 @@ jsx uses [rebar][rebar] for it's build chain to build the library: `rebar compile` -to convert a utf8 binary containing a json string into an erlang term: `jsx:to_term(JSON)` +to convert a utf8 binary containing a json string into an erlang term -to convert an erlang term into a utf8 binary containing a json string: `jsx:to_json(Term)` +```erlang + 1> jsx:to_term(<<"{\"library\": \"jsx\", \"awesome\": true}">>). + [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] +``` -to check if a binary is valid json: `jsx:is_json(JSON)` +to convert an erlang term into a utf8 binary containing a json string -to check if a term is valid json: `jsx:is_term(Term)` +```erlang + 1> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). + <<"[\"a\",\"list\",\"of\",\"words\"]">> +``` -to minify a json string: `jsx:format(JSON)` +to check if a binary or a term is valid json + +```erlang + 1> jsx:is_json(<<"[1]">>). + true + 2> jsx:is_term(1). + true +``` ## api ## From 3c15c0fd8c701cf9a92f2fe083c31e609111372f Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 10 May 2012 23:46:42 -0700 Subject: [PATCH 04/52] change rebar formatting in quickstart --- README.markdown | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/README.markdown b/README.markdown index a6fe03d..1af8114 100644 --- a/README.markdown +++ b/README.markdown @@ -8,7 +8,7 @@ jsx is released under the terms of the [MIT][MIT] license jsx uses [rebar][rebar] for it's build chain -[![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=master)](http://travis-ci.org/talentdeficit/jsx) +[![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=)](http://travis-ci.org/talentdeficit/jsx) ## index ## @@ -30,7 +30,19 @@ jsx uses [rebar][rebar] for it's build chain ## quickstart ## -to build the library: `rebar compile` +to build the library + +```bash + tanga:jsx alisdair$ rebar compile + ==> jsx (compile) + Compiled src/jsx_verify.erl + Compiled src/jsx_utils.erl + Compiled src/jsx_to_term.erl + Compiled src/jsx_to_json.erl + Compiled src/jsx.erl + Compiled src/jsx_encoder.erl + Compiled src/jsx_decoder.erl +``` to convert a utf8 binary containing a json string into an erlang term From e18322779e74300b0ed9b46e630ba2fb164f9fcd Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sat, 12 May 2012 23:28:48 +0000 Subject: [PATCH 05/52] add prettify/1 and minify/1 --- src/jsx.erl | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/jsx.erl b/src/jsx.erl index 78dc409..ba32b01 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -26,7 +26,7 @@ -export([to_json/1, to_json/2]). -export([to_term/1, to_term/2]). -export([is_json/1, is_json/2, is_term/1, is_term/2]). --export([format/1, format/2]). +-export([format/1, format/2, minify/1, prettify/1]). -export([encoder/3, decoder/3]). %% old api -export([term_to_json/1, term_to_json/2, json_to_term/1, json_to_term/2]). @@ -60,6 +60,16 @@ format(Source) -> format(Source, []). format(Source, Opts) -> jsx_to_json:format(Source, Opts). +-spec minify(Source::binary()) -> binary(). + +minify(Source) -> format(Source, []). + + +-spec prettify(Source::binary()) -> binary(). + +prettify(Source) -> format(Source, [space, {indent, 2}]). + + -spec to_term(Source::binary()) -> list({binary(), any()}) | list(any()) | true From a2929c31081698836fe64dbc53d9b58f38e58055 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 17 May 2012 23:05:13 -0700 Subject: [PATCH 06/52] update to use sinan or rebar to build/test --- .gitignore | 1 + README.markdown | 30 ++++++++++++++++++++---------- sinan.config | 4 ++++ 3 files changed, 25 insertions(+), 10 deletions(-) create mode 100644 sinan.config diff --git a/.gitignore b/.gitignore index ba3a5b2..fc357b7 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ ebin/ deps/ *.orig /.eunit +_build/ diff --git a/README.markdown b/README.markdown index 1af8114..edda095 100644 --- a/README.markdown +++ b/README.markdown @@ -6,7 +6,7 @@ copyright 2011, 2012 alisdair sullivan jsx is released under the terms of the [MIT][MIT] license -jsx uses [rebar][rebar] for it's build chain +jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain [![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=)](http://travis-ci.org/talentdeficit/jsx) @@ -30,18 +30,27 @@ jsx uses [rebar][rebar] for it's build chain ## quickstart ## -to build the library +to build the library + +```bash + tanga:jsx alisdair$ sinan build +``` +or ```bash tanga:jsx alisdair$ rebar compile - ==> jsx (compile) - Compiled src/jsx_verify.erl - Compiled src/jsx_utils.erl - Compiled src/jsx_to_term.erl - Compiled src/jsx_to_json.erl - Compiled src/jsx.erl - Compiled src/jsx_encoder.erl - Compiled src/jsx_decoder.erl +``` + +to run tests + +```bash + tanga:jsx alisdair$ sinan -r tests eunit +``` + +or + +```bash + tanga:jsx alisdair$ rebar eunit ``` to convert a utf8 binary containing a json string into an erlang term @@ -367,5 +376,6 @@ jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaie [yajl]: http://lloyd.github.com/yajl [MIT]: http://www.opensource.org/licenses/mit-license.html [rebar]: https://github.com/basho/rebar +[sinan]: https://github.com/erlware/sinan [meck]: https://github.com/eproxus/meck [rfc4627]: http://tools.ietf.org/html/rfc4627[html4-non-html-data]: http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.2 diff --git a/sinan.config b/sinan.config new file mode 100644 index 0000000..68b26b8 --- /dev/null +++ b/sinan.config @@ -0,0 +1,4 @@ +{project_name, jsx}. +{project_vsn, "1.2.1"}. + +{compile_args, [{release, tests}], [{d, 'TEST'}]}. \ No newline at end of file From d0fad06d1fcdb1b039d03ff081b3bd7b86ec78e5 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 17 May 2012 23:07:09 -0700 Subject: [PATCH 07/52] remove unsightly indentation from quickstart examples --- README.markdown | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.markdown b/README.markdown index edda095..4841286 100644 --- a/README.markdown +++ b/README.markdown @@ -33,47 +33,47 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain to build the library ```bash - tanga:jsx alisdair$ sinan build +tanga:jsx alisdair$ sinan build ``` or ```bash - tanga:jsx alisdair$ rebar compile +tanga:jsx alisdair$ rebar compile ``` to run tests ```bash - tanga:jsx alisdair$ sinan -r tests eunit +tanga:jsx alisdair$ sinan -r tests eunit ``` or ```bash - tanga:jsx alisdair$ rebar eunit +tanga:jsx alisdair$ rebar eunit ``` to convert a utf8 binary containing a json string into an erlang term ```erlang - 1> jsx:to_term(<<"{\"library\": \"jsx\", \"awesome\": true}">>). - [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] +1> jsx:to_term(<<"{\"library\": \"jsx\", \"awesome\": true}">>). +[{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] ``` to convert an erlang term into a utf8 binary containing a json string ```erlang - 1> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). - <<"[\"a\",\"list\",\"of\",\"words\"]">> +1> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). +<<"[\"a\",\"list\",\"of\",\"words\"]">> ``` to check if a binary or a term is valid json ```erlang - 1> jsx:is_json(<<"[1]">>). - true - 2> jsx:is_term(1). - true +1> jsx:is_json(<<"[1]">>). +true +2> jsx:is_term(1). +true ``` From f511aeba70cd5b8cb4734395870a575c1dfdb6f6 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 20 May 2012 16:59:11 -0700 Subject: [PATCH 08/52] expanded examples in quickstart --- README.markdown | 41 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/README.markdown b/README.markdown index 4841286..091d93d 100644 --- a/README.markdown +++ b/README.markdown @@ -58,24 +58,57 @@ to convert a utf8 binary containing a json string into an erlang term ```erlang 1> jsx:to_term(<<"{\"library\": \"jsx\", \"awesome\": true}">>). [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] +2> jsx:to_term(<<"[\"a\",\"list\",\"of\",\"words\"]">>). +[<<"a">>, <<"list">>, <<"of">>, <<"words">>] ``` to convert an erlang term into a utf8 binary containing a json string ```erlang -1> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). -<<"[\"a\",\"list\",\"of\",\"words\"]">> +1> jsx:to_json([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). +<<"{\"library\": \"jsx\", \"awesome\": true}">> +2> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). +<<"[\"a\",\"list\",\"of\",\"words\"]">> ``` to check if a binary or a term is valid json ```erlang -1> jsx:is_json(<<"[1]">>). +1> jsx:is_json(<<"[\"this is json\"]">>). true -2> jsx:is_term(1). +2> jsx:is_json("[\"this is not\"]"). +false +3> jsx:is_term([<<"this is a term">>]). true +4> jsx:is_term(["this is not"]). +false ``` +to minify some json + +```erlang +1> jsx:minify(<<"{ + \"a list\": [ + 1, + 2, + 3 + ] +}">>). +<<"{\"a list\":[1,2,3]}">> +``` + +to prettify some json + +```erlang +1> jsx:prettify(<<"{\"a list\":[1,2,3]}">>). +<<"{ + \"a list\": [ + 1, + 2, + 3 + ] +}">> +``` ## api ## From b19c1a4689ec688b0a6829077e41122d7fe4c08c Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 20 May 2012 17:10:07 -0700 Subject: [PATCH 09/52] fix travis ci status indicator --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 091d93d..a3e8b1b 100644 --- a/README.markdown +++ b/README.markdown @@ -8,7 +8,7 @@ jsx is released under the terms of the [MIT][MIT] license jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain -[![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=)](http://travis-ci.org/talentdeficit/jsx) +[![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=develop)](http://travis-ci.org/talentdeficit/jsx) ## index ## From 2d148c423d04039cbf8d586b01b159b53aa79b42 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Tue, 22 May 2012 21:41:07 -0700 Subject: [PATCH 10/52] remove all superfluous anchors --- README.markdown | 81 ++++++++++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/README.markdown b/README.markdown index a3e8b1b..eebdf5f 100644 --- a/README.markdown +++ b/README.markdown @@ -1,4 +1,4 @@ -# jsx (v1.2.1) # +# jsx (v1.2.1) # a sane [json][json] implementation for erlang, inspired by [yajl][yajl] @@ -13,22 +13,40 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain ## index ## -* [introduction](#intro) * [quickstart](#quickstart) -* [the api](#api) - - [json <-> erlang mapping](#mapping) +* [the api](#the-api) + - [json <-> erlang mapping](#json---erlang-mapping) + * [json representation](#json-representation) + * [erlang representation](#erlang-representation) + * [numbers](#numbers) + * [strings](#strings) + * [true, false and null](##true-false-and-null) + * [arrays](#arrays) + * [objects](#objects) - [options](#options) - - [incomplete input](#incompletes) - - [the encoder and decoder](#core) - - [handler callbacks](#handler) - - [converting json to erlang and vice versa](#convert) - - [formatting and minifying json text](#format) - - [verifying json and terms are valid input](#verify) -* [acknowledgments](#thanks) + * [replaced_bad_utf](#replaced_bad_utf8) + * [escaped_forward_slashes](#escaped_forward_slashes) + * [single_quoted_strings](#single_quoted_strings) + * [unescaped_jsonp](#unescaped_jsonp) + * [comments](#comments) + * [escaped_strings](#escaped_strings) + * [dirty_strings](#dirty_strings) + * [ignored_bad_escapes](#ignored_bad_escapes) + * [explicit_end](#explicit_end) + * [relax](#relax) + * [{pre_encode, F}](#pre_encode-f) + - [incomplete input](#incomplete-input) + - [the encoder and decoder](##the-encoder-and-decoder) + - [handler callbacks](#handler-callbacks) + - [converting json to erlang terms](#converting-json-to-erlang-terms) + - [converting erlang terms to json](#converting-erlang-terms-to-json) + - [formatting json texts](#formatting-json-texts) + - [verifying json texts](#verifying-json-texts) + - [verifying terms](#verifying-terms) +* [acknowledgements](#acknowledgements) - -## quickstart ## +## quickstart ## to build the library @@ -110,10 +128,10 @@ to prettify some json }">> ``` -## api ## +## the api ## -### json <-> erlang mapping ### +### json <-> erlang mapping ### **json** | **erlang** --------------------------------|-------------------------------- @@ -141,7 +159,7 @@ when converting from erlang to json, numbers are represented with their shortest #### strings #### -the [json spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` +the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates that appear otherwise are an error @@ -166,7 +184,7 @@ json arrays are represented with erlang lists of json values as described in thi json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values -### options ### +### options ### jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. flags are always atoms or `{atom, Term}` tuples. functions may have additional options beyond these, see individual function documentation for details @@ -176,7 +194,7 @@ json text input and json strings SHOULD be utf8 encoded binaries, appropriately #### `escaped_forward_slashes` #### -json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding JSON directly into a HTML or XML document. See: [html4-non-html-data] +json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding json directly into a html or xml document #### `single_quoted_strings` #### @@ -221,14 +239,14 @@ relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ign input can be any term, but output from the function must be a valid type for input -### incomplete input ### +### incomplete input ### jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see above -### the encoder and decoder ### +### the encoder and decoder ### jsx is built on top of two finite state automata, one that handles json texts and one that handles erlang terms. both take a callback module as an argument that acts similar to a fold over a list of json 'events'. these events and the handler module's callbacks are detailed in the next section @@ -250,7 +268,7 @@ types: decoder returns an anonymous function that handles binary json input and encoder returns an anonymous function that handles erlang term input. these are safe to reuse for multiple inputs -### handler callbacks ### +### handler callbacks ### `Handler` should export the following pair of functions @@ -284,9 +302,7 @@ the event `end_json` will always be the last event emitted, you should take care both `key` and `string` are `utf8` encoded binaries with all escaped values converted into the appropriate codepoints -### converting json to erlang and vice versa ### - -#### converting json to erlang terms #### +### converting json to erlang terms ### `to_term` parses a JSON text (a utf8 encoded binary) and produces an erlang term (see json <-> erlang mapping details above) @@ -320,7 +336,7 @@ the option `labels` controls how keys are converted from json to erlang terms. ` if more than one decoder is declared a badarg exception will result -#### converting erlang terms to json #### +### converting erlang terms to json ### `to_json` parses an erlang term and produces a JSON text (see json <-> erlang mapping details below) @@ -344,9 +360,7 @@ the option `{space, N}` inserts `N` spaces after every comma and colon in your j the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` -### formatting and minifying json text ### - -#### formatting json texts #### +### formatting json texts ### produces a JSON text from JSON text, reformatted @@ -371,10 +385,8 @@ the option `{indent, N}` inserts a newline and `N` spaces for each level of inde calling `format` with no options results in minified json text -### verifying json and terms are valid input ### +### verifying json texts ### -#### verifying json texts #### - returns true if input is a valid JSON text, false if not `is_json(MaybeJSON)` -> `true` | `false` | `{incomplete, Fun}` @@ -387,7 +399,7 @@ types: * `Opts` = as above -#### verifying terms #### +### verifying terms ### returns true if input is a valid erlang term that represents a JSON text, false if not @@ -401,7 +413,7 @@ types: * `Opts` = as above -## acknowledgements ## +## acknowledgements ## jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides, alex kropivny, steve strong, michael truog and dmitry kolesnikov @@ -411,4 +423,5 @@ jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaie [rebar]: https://github.com/basho/rebar [sinan]: https://github.com/erlware/sinan [meck]: https://github.com/eproxus/meck -[rfc4627]: http://tools.ietf.org/html/rfc4627[html4-non-html-data]: http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.2 +[rfc4627]: http://tools.ietf.org/html/rfc4627 +[html4-non-html-data]: http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.2 From 34fb38e0a572e0c632c847babb950cf5d30d70fe Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Tue, 22 May 2012 21:42:47 -0700 Subject: [PATCH 11/52] remove third level of toc --- README.markdown | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/README.markdown b/README.markdown index eebdf5f..814ddb2 100644 --- a/README.markdown +++ b/README.markdown @@ -16,25 +16,7 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain * [quickstart](#quickstart) * [the api](#the-api) - [json <-> erlang mapping](#json---erlang-mapping) - * [json representation](#json-representation) - * [erlang representation](#erlang-representation) - * [numbers](#numbers) - * [strings](#strings) - * [true, false and null](##true-false-and-null) - * [arrays](#arrays) - * [objects](#objects) - [options](#options) - * [replaced_bad_utf](#replaced_bad_utf8) - * [escaped_forward_slashes](#escaped_forward_slashes) - * [single_quoted_strings](#single_quoted_strings) - * [unescaped_jsonp](#unescaped_jsonp) - * [comments](#comments) - * [escaped_strings](#escaped_strings) - * [dirty_strings](#dirty_strings) - * [ignored_bad_escapes](#ignored_bad_escapes) - * [explicit_end](#explicit_end) - * [relax](#relax) - * [{pre_encode, F}](#pre_encode-f) - [incomplete input](#incomplete-input) - [the encoder and decoder](##the-encoder-and-decoder) - [handler callbacks](#handler-callbacks) From 9b86657951e95d4649c012fa59e506f514ea3dae Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Tue, 22 May 2012 21:44:43 -0700 Subject: [PATCH 12/52] remove unused link and fix toc link to encoder and decoder --- README.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.markdown b/README.markdown index 814ddb2..071b855 100644 --- a/README.markdown +++ b/README.markdown @@ -18,7 +18,7 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain - [json <-> erlang mapping](#json---erlang-mapping) - [options](#options) - [incomplete input](#incomplete-input) - - [the encoder and decoder](##the-encoder-and-decoder) + - [the encoder and decoder](#the-encoder-and-decoder) - [handler callbacks](#handler-callbacks) - [converting json to erlang terms](#converting-json-to-erlang-terms) - [converting erlang terms to json](#converting-erlang-terms-to-json) @@ -406,4 +406,3 @@ jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaie [sinan]: https://github.com/erlware/sinan [meck]: https://github.com/eproxus/meck [rfc4627]: http://tools.ietf.org/html/rfc4627 -[html4-non-html-data]: http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.2 From 66304b9f5dc1d5836b3718d154b6e91475d52d65 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Tue, 22 May 2012 21:51:23 -0700 Subject: [PATCH 13/52] fix types and specs in jsx.erl --- src/jsx.erl | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/src/jsx.erl b/src/jsx.erl index ba32b01..b389d8b 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -37,9 +37,18 @@ -endif. +-type json() :: list({binary(), json()}) + | list(json()) + | true + | false + | null + | integer() + | float() + | binary(). --spec to_json(Source::any()) -> binary(). --spec to_json(Source::any(), Opts::jsx_to_json:opts()) -> binary(). + +-spec to_json(Source::json()) -> binary(). +-spec to_json(Source::json(), Opts::jsx_to_json:opts()) -> binary(). to_json(Source) -> to_json(Source, []). @@ -70,22 +79,8 @@ minify(Source) -> format(Source, []). prettify(Source) -> format(Source, [space, {indent, 2}]). --spec to_term(Source::binary()) -> list({binary(), any()}) - | list(any()) - | true - | false - | null - | integer() - | float() - | binary(). --spec to_term(Source::binary(), Opts::jsx_to_term:opts()) -> list({binary(), any()}) - | list(any()) - | true - | false - | null - | integer() - | float() - | binary(). +-spec to_term(Source::binary()) -> json(). +-spec to_term(Source::binary(), Opts::jsx_to_term:opts()) -> json(). to_term(Source) -> to_term(Source, []). @@ -98,8 +93,8 @@ json_to_term(Source) -> to_term(Source, []). json_to_term(Source, Opts) -> to_term(Source, Opts). --spec is_json(Source::binary()) -> true | false. --spec is_json(Source::binary(), Opts::jsx_verify:opts()) -> true | false. +-spec is_json(Source::any()) -> true | false. +-spec is_json(Source::any(), Opts::jsx_verify:opts()) -> true | false. is_json(Source) -> is_json(Source, []). From f44ee7aeab4a49ec0f9e94bba848e4b790a67958 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Tue, 22 May 2012 23:37:37 -0700 Subject: [PATCH 14/52] add jsx_parser, a pda to be used in conjunction with custom tokenizers --- src/jsx_parser.erl | 269 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 269 insertions(+) create mode 100644 src/jsx_parser.erl diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl new file mode 100644 index 0000000..17d728e --- /dev/null +++ b/src/jsx_parser.erl @@ -0,0 +1,269 @@ +%% The MIT License + +%% Copyright (c) 2012 Alisdair Sullivan + +%% Permission is hereby granted, free of charge, to any person obtaining a copy +%% of this software and associated documentation files (the "Software"), to deal +%% in the Software without restriction, including without limitation the rights +%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +%% copies of the Software, and to permit persons to whom the Software is +%% furnished to do so, subject to the following conditions: + +%% The above copyright notice and this permission notice shall be included in +%% all copies or substantial portions of the Software. + +%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +%% THE SOFTWARE. + + +-module(jsx_parser). + +-export([parser/3]). + + +-spec parser(Handler::module(), State::any(), Opts::jsx:opts()) -> jsx:parser(). + +parser(Handler, State, Opts) -> + fun(Tokens) -> value(Tokens, {Handler, Handler:init(State)}, [], jsx_utils:parse_opts(Opts)) end. + + +-include("jsx_opts.hrl"). + + +%% error, incomplete and event macros +-ifndef(error). +-define(error(Args), + erlang:error(badarg, Args) +). +-endif. + + +-ifndef(incomplete). +-define(incomplete(State, Handler, Stack, Opts), + {incomplete, fun(Tokens) -> + State(Tokens, Handler, Stack, Opts) + ; (end_stream) -> + case State([end_json], + Handler, + Stack, + Opts#opts{explicit_end=false}) of + {incomplete, _} -> ?error([Handler, Stack, Opts]) + ; Events -> Events + end + end + } +). +-endif. + + +handle_event([], Handler, _Opts) -> Handler; +handle_event([Event|Rest], Handler, Opts) -> handle_event(Rest, handle_event(Event, Handler, Opts), Opts); +handle_event(Event, {Handler, State}, _Opts) -> {Handler, Handler:handle_event(Event, State)}. + + +value([start_object|Tokens], Handler, Stack, Opts) -> + object(Tokens, handle_event(start_object, Handler, Opts), [object|Stack], Opts); +value([start_array|Tokens], Handler, Stack, Opts) -> + array(Tokens, handle_event(start_array, Handler, Opts), [array|Stack], Opts); +value([{literal, true}|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, true}, Handler, Opts), Stack, Opts); +value([true|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, true}, Handler, Opts), Stack, Opts); +value([{literal, false}|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, false}, Handler, Opts), Stack, Opts); +value([false|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, false}, Handler, Opts), Stack, Opts); +value([{literal, null}|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, null}, Handler, Opts), Stack, Opts); +value([null|Tokens], Handler, Stack, Opts) -> + maybe_done(Tokens, handle_event({literal, null}, Handler, Opts), Stack, Opts); +value([{integer, Number}|Tokens], Handler, Stack, Opts) when is_integer(Number) -> + maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); +value([{float, Number}|Tokens], Handler, Stack, Opts) when is_float(Number) -> + maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); +value([{number, Number}|Tokens], Handler, Stack, Opts) when is_integer(Number) -> + maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); +value([{number, Number}|Tokens], Handler, Stack, Opts) when is_float(Number) -> + maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); +value([Number|Tokens], Handler, Stack, Opts) when is_integer(Number) -> + maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); +value([Number|Tokens], Handler, Stack, Opts) when is_float(Number) -> + maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); +value([{string, String}|Tokens], Handler, Stack, Opts) when is_binary(String) -> + maybe_done(Tokens, handle_event({string, String}, Handler, Opts), Stack, Opts); +value(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> + ?error([BadTokens, Handler, Stack, Opts]); +value(Token, Handler, Stack, Opts) -> + value([Token], Handler, Stack, Opts). + +object([end_object|Tokens], Handler, [object|Stack], Opts) -> + maybe_done(Tokens, handle_event(end_object, Handler, Opts), Stack, Opts); +object([{key, Key}|Tokens], Handler, Stack, Opts) when is_binary(Key) -> + value(Tokens, handle_event({key, Key}, Handler, Opts), Stack, Opts); +object(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> + ?error([BadTokens, Handler, Stack, Opts]); +object(Token, Handler, Stack, Opts) -> + object([Token], Handler, Stack, Opts). + +array([end_array|Tokens], Handler, [array|Stack], Opts) -> + maybe_done(Tokens, handle_event(end_array, Handler, Opts), Stack, Opts); +array(Tokens, Handler, Stack, Opts) when is_list(Tokens) -> + value(Tokens, Handler, Stack, Opts); +array(Token, Handler, Stack, Opts) -> + array([Token], Handler, Stack, Opts). + +maybe_done([end_json], Handler, [], Opts) -> + {_, State} = handle_event(end_json, Handler, Opts), + State; +maybe_done(Tokens, Handler, [object|_] = Stack, Opts) when is_list(Tokens) -> + object(Tokens, Handler, Stack, Opts); +maybe_done(Tokens, Handler, [array|_] = Stack, Opts) when is_list(Tokens) -> + array(Tokens, Handler, Stack, Opts); +maybe_done(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> + ?error([BadTokens, Handler, Stack, Opts]); +maybe_done(Token, Handler, Stack, Opts) -> + maybe_done([Token], Handler, Stack, Opts). + + + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +encode(Term) -> encode(Term, []). + +encode(Term, Opts) -> + try (parser(jsx, [], Opts))(Term) + catch _:_ -> {error, badjson} + end. + + +encode_test_() -> + [ + {"naked string", ?_assertEqual( + encode([{string, <<"a string\n">>}, end_json]), [{string, <<"a string\n">>}, end_json] + )}, + {"naked integer - simple rep", ?_assertEqual( + encode([123, end_json]), [{integer, 123}, end_json] + )}, + {"naked integer - alt rep", ?_assertEqual( + encode([{number, 123}, end_json]), [{integer, 123}, end_json] + )}, + {"naked integer - full rep", ?_assertEqual( + encode([{integer, 123}, end_json]), [{integer, 123}, end_json] + )}, + {"naked float - simple rep", ?_assertEqual( + encode([1.23, end_json]), [{float, 1.23}, end_json] + )}, + {"naked float - alt rep", ?_assertEqual( + encode([{number, 1.23}, end_json]), [{float, 1.23}, end_json] + )}, + {"naked float - full rep", ?_assertEqual( + encode([{float, 1.23}, end_json]), [{float, 1.23}, end_json] + )}, + {"naked literal - simple red", ?_assertEqual( + encode([null, end_json]), [{literal, null}, end_json] + )}, + {"naked literal - full rep", ?_assertEqual( + encode([{literal, null}, end_json]), [{literal, null}, end_json] + )}, + {"empty object", ?_assertEqual( + encode([start_object, end_object, end_json]), [start_object, end_object, end_json] + )}, + {"empty list", ?_assertEqual( + encode([start_array, end_array, end_json]), [start_array, end_array, end_json] + )}, + {"simple list", ?_assertEqual( + encode([ + start_array, + {integer, 1}, + {integer, 2}, + {integer, 3}, + {literal, true}, + {literal, false}, + end_array, + end_json + ]), + [ + start_array, + {integer, 1}, + {integer, 2}, + {integer, 3}, + {literal, true}, + {literal, false}, + end_array, + end_json + ] + ) + }, + {"simple object", ?_assertEqual( + encode([ + start_object, + {key, <<"a">>}, + {literal, true}, + {key, <<"b">>}, + {literal, false}, + end_object, + end_json + ]), + [ + start_object, + {key, <<"a">>}, + {literal, true}, + {key, <<"b">>}, + {literal, false}, + end_object, + end_json + ] + ) + }, + {"complex term", ?_assertEqual( + encode([ + start_object, + {key, <<"a">>}, + {literal, true}, + {key, <<"b">>}, + {literal, false}, + {key, <<"c">>}, + start_array, + {integer, 1}, + {integer, 2}, + {integer, 3}, + end_array, + {key, <<"d">>}, + start_object, + {key, <<"key">>}, + {string, <<"value">>}, + end_object, + end_object, + end_json + ]), + [ + start_object, + {key, <<"a">>}, + {literal, true}, + {key, <<"b">>}, + {literal, false}, + {key, <<"c">>}, + start_array, + {integer, 1}, + {integer, 2}, + {integer, 3}, + end_array, + {key, <<"d">>}, + start_object, + {key, <<"key">>}, + {string, <<"value">>}, + end_object, + end_object, + end_json + ] + ) + } + ]. + +-endif. \ No newline at end of file From 44446357d0d141b397cc627e166d5f808c00011e Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 00:52:53 -0700 Subject: [PATCH 15/52] add tests to jsx_parser, add it to app manifest --- src/jsx.app.src | 1 + src/jsx_parser.erl | 825 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 807 insertions(+), 19 deletions(-) diff --git a/src/jsx.app.src b/src/jsx.app.src index e42951a..59cad9b 100644 --- a/src/jsx.app.src +++ b/src/jsx.app.src @@ -6,6 +6,7 @@ jsx, jsx_encoder, jsx_decoder, + jsx_parser, jsx_to_json, jsx_to_term, jsx_utils, diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index 17d728e..9146c67 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -45,16 +45,16 @@ parser(Handler, State, Opts) -> -ifndef(incomplete). -define(incomplete(State, Handler, Stack, Opts), - {incomplete, fun(Tokens) -> - State(Tokens, Handler, Stack, Opts) - ; (end_stream) -> + {incomplete, fun(end_stream) -> case State([end_json], Handler, Stack, Opts#opts{explicit_end=false}) of {incomplete, _} -> ?error([Handler, Stack, Opts]) ; Events -> Events - end + end + ; (Tokens) -> + State(Tokens, Handler, Stack, Opts) end } ). @@ -70,32 +70,44 @@ value([start_object|Tokens], Handler, Stack, Opts) -> object(Tokens, handle_event(start_object, Handler, Opts), [object|Stack], Opts); value([start_array|Tokens], Handler, Stack, Opts) -> array(Tokens, handle_event(start_array, Handler, Opts), [array|Stack], Opts); +value([{literal, true}|Tokens], Handler, [], Opts) -> + done(Tokens, handle_event({literal, true}, Handler, Opts), [], Opts); +value([{literal, false}|Tokens], Handler, [], Opts) -> + done(Tokens, handle_event({literal, false}, Handler, Opts), [], Opts); +value([{literal, null}|Tokens], Handler, [], Opts) -> + done(Tokens, handle_event({literal, null}, Handler, Opts), [], Opts); value([{literal, true}|Tokens], Handler, Stack, Opts) -> maybe_done(Tokens, handle_event({literal, true}, Handler, Opts), Stack, Opts); -value([true|Tokens], Handler, Stack, Opts) -> - maybe_done(Tokens, handle_event({literal, true}, Handler, Opts), Stack, Opts); value([{literal, false}|Tokens], Handler, Stack, Opts) -> maybe_done(Tokens, handle_event({literal, false}, Handler, Opts), Stack, Opts); -value([false|Tokens], Handler, Stack, Opts) -> - maybe_done(Tokens, handle_event({literal, false}, Handler, Opts), Stack, Opts); value([{literal, null}|Tokens], Handler, Stack, Opts) -> maybe_done(Tokens, handle_event({literal, null}, Handler, Opts), Stack, Opts); -value([null|Tokens], Handler, Stack, Opts) -> - maybe_done(Tokens, handle_event({literal, null}, Handler, Opts), Stack, Opts); +value([Literal|Tokens], Handler, Stack, Opts) when Literal == true; Literal == false; Literal == null -> + value([{literal, Literal}] ++ Tokens, Handler, Stack, Opts); +value([{integer, Number}|Tokens], Handler, [], Opts) when is_integer(Number) -> + done(Tokens, handle_event({integer, Number}, Handler, Opts), [], Opts); +value([{float, Number}|Tokens], Handler, [], Opts) when is_float(Number) -> + done(Tokens, handle_event({float, Number}, Handler, Opts), [], Opts); value([{integer, Number}|Tokens], Handler, Stack, Opts) when is_integer(Number) -> maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); value([{float, Number}|Tokens], Handler, Stack, Opts) when is_float(Number) -> maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); value([{number, Number}|Tokens], Handler, Stack, Opts) when is_integer(Number) -> - maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); + value([{integer, Number}] ++ Tokens, Handler, Stack, Opts); value([{number, Number}|Tokens], Handler, Stack, Opts) when is_float(Number) -> - maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); + value([{float, Number}] ++ Tokens, Handler, Stack, Opts); value([Number|Tokens], Handler, Stack, Opts) when is_integer(Number) -> - maybe_done(Tokens, handle_event({integer, Number}, Handler, Opts), Stack, Opts); + value([{integer, Number}] ++ Tokens, Handler, Stack, Opts); value([Number|Tokens], Handler, Stack, Opts) when is_float(Number) -> - maybe_done(Tokens, handle_event({float, Number}, Handler, Opts), Stack, Opts); + value([{float, Number}] ++ Tokens, Handler, Stack, Opts); +value([{string, String}|Tokens], Handler, [], Opts) when is_binary(String) -> + done(Tokens, handle_event({string, clean_string(String, Opts)}, Handler, Opts), [], Opts); value([{string, String}|Tokens], Handler, Stack, Opts) when is_binary(String) -> - maybe_done(Tokens, handle_event({string, String}, Handler, Opts), Stack, Opts); + maybe_done(Tokens, handle_event({string, clean_string(String, Opts)}, Handler, Opts), Stack, Opts); +value([String|Tokens], Handler, Stack, Opts) when is_binary(String) -> + value([{string, String}] ++ Tokens, Handler, Stack, Opts); +value([], Handler, Stack, Opts) -> + ?incomplete(value, Handler, Stack, Opts); value(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> ?error([BadTokens, Handler, Stack, Opts]); value(Token, Handler, Stack, Opts) -> @@ -103,8 +115,12 @@ value(Token, Handler, Stack, Opts) -> object([end_object|Tokens], Handler, [object|Stack], Opts) -> maybe_done(Tokens, handle_event(end_object, Handler, Opts), Stack, Opts); -object([{key, Key}|Tokens], Handler, Stack, Opts) when is_binary(Key) -> - value(Tokens, handle_event({key, Key}, Handler, Opts), Stack, Opts); +object([{key, Key}|Tokens], Handler, Stack, Opts) when is_atom(Key); is_binary(Key) -> + value(Tokens, handle_event({key, clean_string(fix_key(Key), Opts)}, Handler, Opts), Stack, Opts); +object([Key|Tokens], Handler, Stack, Opts) when is_atom(Key); is_binary(Key) -> + value(Tokens, handle_event({key, clean_string(fix_key(Key), Opts)}, Handler, Opts), Stack, Opts); +object([], Handler, Stack, Opts) -> + ?incomplete(object, Handler, Stack, Opts); object(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> ?error([BadTokens, Handler, Stack, Opts]); object(Token, Handler, Stack, Opts) -> @@ -112,23 +128,428 @@ object(Token, Handler, Stack, Opts) -> array([end_array|Tokens], Handler, [array|Stack], Opts) -> maybe_done(Tokens, handle_event(end_array, Handler, Opts), Stack, Opts); +array([], Handler, Stack, Opts) -> + ?incomplete(array, Handler, Stack, Opts); array(Tokens, Handler, Stack, Opts) when is_list(Tokens) -> value(Tokens, Handler, Stack, Opts); array(Token, Handler, Stack, Opts) -> array([Token], Handler, Stack, Opts). maybe_done([end_json], Handler, [], Opts) -> - {_, State} = handle_event(end_json, Handler, Opts), - State; + done([], Handler, [], Opts); maybe_done(Tokens, Handler, [object|_] = Stack, Opts) when is_list(Tokens) -> object(Tokens, Handler, Stack, Opts); maybe_done(Tokens, Handler, [array|_] = Stack, Opts) when is_list(Tokens) -> array(Tokens, Handler, Stack, Opts); +maybe_done([], Handler, Stack, Opts) -> + ?incomplete(maybe_done, Handler, Stack, Opts); maybe_done(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> ?error([BadTokens, Handler, Stack, Opts]); maybe_done(Token, Handler, Stack, Opts) -> maybe_done([Token], Handler, Stack, Opts). +done(Tokens, Handler, [], Opts) when Tokens == [end_json]; Tokens == [] -> + {_, State} = handle_event(end_json, Handler, Opts), + State; +done(BadTokens, Handler, Stack, Opts) when is_list(BadTokens) -> + ?error([BadTokens, Handler, Stack, Opts]); +done(Token, Handler, Stack, Opts) -> + done([Token], Handler, Stack, Opts). + + +fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); +fix_key(Key) when is_binary(Key) -> Key. + + +clean_string(Bin, Opts) -> + case Opts#opts.replaced_bad_utf8 orelse Opts#opts.escaped_strings of + true -> clean(Bin, [], Opts) + ; false -> ensure_clean(Bin), Bin + end. + + +%% fast path for no escaping and no correcting, throws error if string is 'bad' +ensure_clean(<<>>) -> ok; +ensure_clean(<<0, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<1, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<2, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<3, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<4, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<5, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<6, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<7, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<8, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<9, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<10, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<11, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<12, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<13, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<14, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<15, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<16, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<17, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<18, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<19, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<20, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<21, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<22, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<23, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<24, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<25, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<26, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<27, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<28, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<29, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<30, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<31, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<32, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<33, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<34, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<35, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<36, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<37, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<38, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<39, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<40, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<41, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<42, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<43, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<44, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<45, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<46, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<47, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<48, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<49, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<50, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<51, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<52, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<53, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<54, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<55, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<56, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<57, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<58, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<59, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<60, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<61, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<62, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<63, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<64, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<65, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<66, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<67, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<68, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<69, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<70, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<71, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<72, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<73, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<74, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<75, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<76, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<77, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<78, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<79, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<80, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<81, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<82, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<83, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<84, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<85, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<86, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<87, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<88, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<89, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<90, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<91, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<92, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<93, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<94, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<95, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<96, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<97, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<98, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<99, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<100, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<101, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<102, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<103, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<104, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<105, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<106, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<107, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<108, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<109, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<110, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<111, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<112, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<113, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<114, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<115, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<116, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<117, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<118, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<119, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<120, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<121, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<122, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<123, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<124, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<125, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<126, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<<127, Rest/binary>>) -> ensure_clean(Rest); +ensure_clean(<>) when X < 16#800 -> ensure_clean(Rest); +ensure_clean(<>) when X < 16#dcff -> ensure_clean(Rest); +ensure_clean(<>) when X > 16#dfff, X < 16#fdd0 -> ensure_clean(Rest); +ensure_clean(<>) when X > 16#fdef, X < 16#fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#10000, X < 16#1fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#20000, X < 16#2fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#30000, X < 16#3fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#40000, X < 16#4fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#50000, X < 16#5fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#60000, X < 16#6fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#70000, X < 16#7fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#80000, X < 16#8fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#90000, X < 16#9fffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#a0000, X < 16#afffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#b0000, X < 16#bfffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#c0000, X < 16#cfffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#d0000, X < 16#dfffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#e0000, X < 16#efffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#f0000, X < 16#ffffe -> ensure_clean(Rest); +ensure_clean(<>) when X >= 16#100000, X < 16#10fffe -> ensure_clean(Rest); +ensure_clean(Bin) -> erlang:error(badarg, [Bin]). + + +%% escape and/or replace bad codepoints if requested +clean(<<>>, Acc, _Opts) -> unicode:characters_to_binary(lists:reverse(Acc)); +clean(<<0, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(0, Opts) ++ Acc, Opts); +clean(<<1, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(1, Opts) ++ Acc, Opts); +clean(<<2, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(2, Opts) ++ Acc, Opts); +clean(<<3, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(3, Opts) ++ Acc, Opts); +clean(<<4, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(4, Opts) ++ Acc, Opts); +clean(<<5, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(5, Opts) ++ Acc, Opts); +clean(<<6, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(6, Opts) ++ Acc, Opts); +clean(<<7, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(7, Opts) ++ Acc, Opts); +clean(<<8, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(8, Opts) ++ Acc, Opts); +clean(<<9, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(9, Opts) ++ Acc, Opts); +clean(<<10, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(10, Opts) ++ Acc, Opts); +clean(<<11, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(11, Opts) ++ Acc, Opts); +clean(<<12, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(12, Opts) ++ Acc, Opts); +clean(<<13, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(13, Opts) ++ Acc, Opts); +clean(<<14, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(14, Opts) ++ Acc, Opts); +clean(<<15, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(15, Opts) ++ Acc, Opts); +clean(<<16, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(16, Opts) ++ Acc, Opts); +clean(<<17, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(17, Opts) ++ Acc, Opts); +clean(<<18, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(18, Opts) ++ Acc, Opts); +clean(<<19, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(19, Opts) ++ Acc, Opts); +clean(<<20, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(20, Opts) ++ Acc, Opts); +clean(<<21, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(21, Opts) ++ Acc, Opts); +clean(<<22, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(22, Opts) ++ Acc, Opts); +clean(<<23, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(23, Opts) ++ Acc, Opts); +clean(<<24, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(24, Opts) ++ Acc, Opts); +clean(<<25, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(25, Opts) ++ Acc, Opts); +clean(<<26, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(26, Opts) ++ Acc, Opts); +clean(<<27, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(27, Opts) ++ Acc, Opts); +clean(<<28, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(28, Opts) ++ Acc, Opts); +clean(<<29, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(29, Opts) ++ Acc, Opts); +clean(<<30, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(30, Opts) ++ Acc, Opts); +clean(<<31, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(31, Opts) ++ Acc, Opts); +clean(<<32, Rest/binary>>, Acc, Opts) -> clean(Rest, [32] ++ Acc, Opts); +clean(<<33, Rest/binary>>, Acc, Opts) -> clean(Rest, [33] ++ Acc, Opts); +clean(<<34, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(34, Opts) ++ Acc, Opts); +clean(<<35, Rest/binary>>, Acc, Opts) -> clean(Rest, [35] ++ Acc, Opts); +clean(<<36, Rest/binary>>, Acc, Opts) -> clean(Rest, [36] ++ Acc, Opts); +clean(<<37, Rest/binary>>, Acc, Opts) -> clean(Rest, [37] ++ Acc, Opts); +clean(<<38, Rest/binary>>, Acc, Opts) -> clean(Rest, [38] ++ Acc, Opts); +clean(<<39, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(39, Opts) ++ Acc, Opts); +clean(<<40, Rest/binary>>, Acc, Opts) -> clean(Rest, [40] ++ Acc, Opts); +clean(<<41, Rest/binary>>, Acc, Opts) -> clean(Rest, [41] ++ Acc, Opts); +clean(<<42, Rest/binary>>, Acc, Opts) -> clean(Rest, [42] ++ Acc, Opts); +clean(<<43, Rest/binary>>, Acc, Opts) -> clean(Rest, [43] ++ Acc, Opts); +clean(<<44, Rest/binary>>, Acc, Opts) -> clean(Rest, [44] ++ Acc, Opts); +clean(<<45, Rest/binary>>, Acc, Opts) -> clean(Rest, [45] ++ Acc, Opts); +clean(<<46, Rest/binary>>, Acc, Opts) -> clean(Rest, [46] ++ Acc, Opts); +clean(<<47, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(47, Opts) ++ Acc, Opts); +clean(<<48, Rest/binary>>, Acc, Opts) -> clean(Rest, [48] ++ Acc, Opts); +clean(<<49, Rest/binary>>, Acc, Opts) -> clean(Rest, [49] ++ Acc, Opts); +clean(<<50, Rest/binary>>, Acc, Opts) -> clean(Rest, [50] ++ Acc, Opts); +clean(<<51, Rest/binary>>, Acc, Opts) -> clean(Rest, [51] ++ Acc, Opts); +clean(<<52, Rest/binary>>, Acc, Opts) -> clean(Rest, [52] ++ Acc, Opts); +clean(<<53, Rest/binary>>, Acc, Opts) -> clean(Rest, [53] ++ Acc, Opts); +clean(<<54, Rest/binary>>, Acc, Opts) -> clean(Rest, [54] ++ Acc, Opts); +clean(<<55, Rest/binary>>, Acc, Opts) -> clean(Rest, [55] ++ Acc, Opts); +clean(<<56, Rest/binary>>, Acc, Opts) -> clean(Rest, [56] ++ Acc, Opts); +clean(<<57, Rest/binary>>, Acc, Opts) -> clean(Rest, [57] ++ Acc, Opts); +clean(<<58, Rest/binary>>, Acc, Opts) -> clean(Rest, [58] ++ Acc, Opts); +clean(<<59, Rest/binary>>, Acc, Opts) -> clean(Rest, [59] ++ Acc, Opts); +clean(<<60, Rest/binary>>, Acc, Opts) -> clean(Rest, [60] ++ Acc, Opts); +clean(<<61, Rest/binary>>, Acc, Opts) -> clean(Rest, [61] ++ Acc, Opts); +clean(<<62, Rest/binary>>, Acc, Opts) -> clean(Rest, [62] ++ Acc, Opts); +clean(<<63, Rest/binary>>, Acc, Opts) -> clean(Rest, [63] ++ Acc, Opts); +clean(<<64, Rest/binary>>, Acc, Opts) -> clean(Rest, [64] ++ Acc, Opts); +clean(<<65, Rest/binary>>, Acc, Opts) -> clean(Rest, [65] ++ Acc, Opts); +clean(<<66, Rest/binary>>, Acc, Opts) -> clean(Rest, [66] ++ Acc, Opts); +clean(<<67, Rest/binary>>, Acc, Opts) -> clean(Rest, [67] ++ Acc, Opts); +clean(<<68, Rest/binary>>, Acc, Opts) -> clean(Rest, [68] ++ Acc, Opts); +clean(<<69, Rest/binary>>, Acc, Opts) -> clean(Rest, [69] ++ Acc, Opts); +clean(<<70, Rest/binary>>, Acc, Opts) -> clean(Rest, [70] ++ Acc, Opts); +clean(<<71, Rest/binary>>, Acc, Opts) -> clean(Rest, [71] ++ Acc, Opts); +clean(<<72, Rest/binary>>, Acc, Opts) -> clean(Rest, [72] ++ Acc, Opts); +clean(<<73, Rest/binary>>, Acc, Opts) -> clean(Rest, [73] ++ Acc, Opts); +clean(<<74, Rest/binary>>, Acc, Opts) -> clean(Rest, [74] ++ Acc, Opts); +clean(<<75, Rest/binary>>, Acc, Opts) -> clean(Rest, [75] ++ Acc, Opts); +clean(<<76, Rest/binary>>, Acc, Opts) -> clean(Rest, [76] ++ Acc, Opts); +clean(<<77, Rest/binary>>, Acc, Opts) -> clean(Rest, [77] ++ Acc, Opts); +clean(<<78, Rest/binary>>, Acc, Opts) -> clean(Rest, [78] ++ Acc, Opts); +clean(<<79, Rest/binary>>, Acc, Opts) -> clean(Rest, [79] ++ Acc, Opts); +clean(<<80, Rest/binary>>, Acc, Opts) -> clean(Rest, [80] ++ Acc, Opts); +clean(<<81, Rest/binary>>, Acc, Opts) -> clean(Rest, [81] ++ Acc, Opts); +clean(<<82, Rest/binary>>, Acc, Opts) -> clean(Rest, [82] ++ Acc, Opts); +clean(<<83, Rest/binary>>, Acc, Opts) -> clean(Rest, [83] ++ Acc, Opts); +clean(<<84, Rest/binary>>, Acc, Opts) -> clean(Rest, [84] ++ Acc, Opts); +clean(<<85, Rest/binary>>, Acc, Opts) -> clean(Rest, [85] ++ Acc, Opts); +clean(<<86, Rest/binary>>, Acc, Opts) -> clean(Rest, [86] ++ Acc, Opts); +clean(<<87, Rest/binary>>, Acc, Opts) -> clean(Rest, [87] ++ Acc, Opts); +clean(<<88, Rest/binary>>, Acc, Opts) -> clean(Rest, [88] ++ Acc, Opts); +clean(<<89, Rest/binary>>, Acc, Opts) -> clean(Rest, [89] ++ Acc, Opts); +clean(<<90, Rest/binary>>, Acc, Opts) -> clean(Rest, [90] ++ Acc, Opts); +clean(<<91, Rest/binary>>, Acc, Opts) -> clean(Rest, [91] ++ Acc, Opts); +clean(<<92, Rest/binary>>, Acc, Opts) -> clean(Rest, maybe_replace(92, Opts) ++ Acc, Opts); +clean(<<93, Rest/binary>>, Acc, Opts) -> clean(Rest, [93] ++ Acc, Opts); +clean(<<94, Rest/binary>>, Acc, Opts) -> clean(Rest, [94] ++ Acc, Opts); +clean(<<95, Rest/binary>>, Acc, Opts) -> clean(Rest, [95] ++ Acc, Opts); +clean(<<96, Rest/binary>>, Acc, Opts) -> clean(Rest, [96] ++ Acc, Opts); +clean(<<97, Rest/binary>>, Acc, Opts) -> clean(Rest, [97] ++ Acc, Opts); +clean(<<98, Rest/binary>>, Acc, Opts) -> clean(Rest, [98] ++ Acc, Opts); +clean(<<99, Rest/binary>>, Acc, Opts) -> clean(Rest, [99] ++ Acc, Opts); +clean(<<100, Rest/binary>>, Acc, Opts) -> clean(Rest, [100] ++ Acc, Opts); +clean(<<101, Rest/binary>>, Acc, Opts) -> clean(Rest, [101] ++ Acc, Opts); +clean(<<102, Rest/binary>>, Acc, Opts) -> clean(Rest, [102] ++ Acc, Opts); +clean(<<103, Rest/binary>>, Acc, Opts) -> clean(Rest, [103] ++ Acc, Opts); +clean(<<104, Rest/binary>>, Acc, Opts) -> clean(Rest, [104] ++ Acc, Opts); +clean(<<105, Rest/binary>>, Acc, Opts) -> clean(Rest, [105] ++ Acc, Opts); +clean(<<106, Rest/binary>>, Acc, Opts) -> clean(Rest, [106] ++ Acc, Opts); +clean(<<107, Rest/binary>>, Acc, Opts) -> clean(Rest, [107] ++ Acc, Opts); +clean(<<108, Rest/binary>>, Acc, Opts) -> clean(Rest, [108] ++ Acc, Opts); +clean(<<109, Rest/binary>>, Acc, Opts) -> clean(Rest, [109] ++ Acc, Opts); +clean(<<110, Rest/binary>>, Acc, Opts) -> clean(Rest, [110] ++ Acc, Opts); +clean(<<111, Rest/binary>>, Acc, Opts) -> clean(Rest, [111] ++ Acc, Opts); +clean(<<112, Rest/binary>>, Acc, Opts) -> clean(Rest, [112] ++ Acc, Opts); +clean(<<113, Rest/binary>>, Acc, Opts) -> clean(Rest, [113] ++ Acc, Opts); +clean(<<114, Rest/binary>>, Acc, Opts) -> clean(Rest, [114] ++ Acc, Opts); +clean(<<115, Rest/binary>>, Acc, Opts) -> clean(Rest, [115] ++ Acc, Opts); +clean(<<116, Rest/binary>>, Acc, Opts) -> clean(Rest, [116] ++ Acc, Opts); +clean(<<117, Rest/binary>>, Acc, Opts) -> clean(Rest, [117] ++ Acc, Opts); +clean(<<118, Rest/binary>>, Acc, Opts) -> clean(Rest, [118] ++ Acc, Opts); +clean(<<119, Rest/binary>>, Acc, Opts) -> clean(Rest, [119] ++ Acc, Opts); +clean(<<120, Rest/binary>>, Acc, Opts) -> clean(Rest, [120] ++ Acc, Opts); +clean(<<121, Rest/binary>>, Acc, Opts) -> clean(Rest, [121] ++ Acc, Opts); +clean(<<122, Rest/binary>>, Acc, Opts) -> clean(Rest, [122] ++ Acc, Opts); +clean(<<123, Rest/binary>>, Acc, Opts) -> clean(Rest, [123] ++ Acc, Opts); +clean(<<124, Rest/binary>>, Acc, Opts) -> clean(Rest, [124] ++ Acc, Opts); +clean(<<125, Rest/binary>>, Acc, Opts) -> clean(Rest, [125] ++ Acc, Opts); +clean(<<126, Rest/binary>>, Acc, Opts) -> clean(Rest, [126] ++ Acc, Opts); +clean(<<127, Rest/binary>>, Acc, Opts) -> clean(Rest, [127] ++ Acc, Opts); +clean(<>, Acc, Opts) when X < 16#800 -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X == 16#2028; X == 16#2029 -> + clean(Rest, maybe_replace(X, Opts) ++ Acc, Opts); +clean(<>, Acc, Opts) when X < 16#dcff -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X > 16#dfff, X < 16#fdd0 -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X > 16#fdef, X < 16#fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#10000, X < 16#1fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#20000, X < 16#2fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#30000, X < 16#3fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#40000, X < 16#4fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#50000, X < 16#5fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#60000, X < 16#6fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#70000, X < 16#7fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#80000, X < 16#8fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#90000, X < 16#9fffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#a0000, X < 16#afffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#b0000, X < 16#bfffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#c0000, X < 16#cfffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#d0000, X < 16#dfffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#e0000, X < 16#efffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#f0000, X < 16#ffffe -> + clean(Rest, [X] ++ Acc, Opts); +clean(<>, Acc, Opts) when X >= 16#100000, X < 16#10fffe -> + clean(Rest, [X] ++ Acc, Opts); +%% noncharacters +clean(<<_/utf8, Rest/binary>>, Acc, Opts) -> + clean(Rest, maybe_replace(noncharacter, Opts) ++ Acc, Opts); +%% surrogates +clean(<<237, X, _, Rest/binary>>, Acc, Opts) when X >= 160 -> + clean(Rest, maybe_replace(surrogate, Opts) ++ Acc, Opts); +%% u+fffe and u+ffff for R14BXX +clean(<<239, 191, X, Rest/binary>>, Acc, Opts) when X == 190; X == 191 -> + clean(Rest, maybe_replace(noncharacter, Opts) ++ Acc, Opts); +%% overlong encodings and missing continuations of a 2 byte sequence +clean(<>, Acc, Opts) when X >= 192, X =< 223 -> + clean(strip_continuations(Rest, 1), maybe_replace(badutf, Opts) ++ Acc, Opts); +%% overlong encodings and missing continuations of a 3 byte sequence +clean(<>, Acc, Opts) when X >= 224, X =< 239 -> + clean(strip_continuations(Rest, 2), maybe_replace(badutf, Opts) ++ Acc, Opts); +%% overlong encodings and missing continuations of a 4 byte sequence +clean(<>, Acc, Opts) when X >= 240, X =< 247 -> + clean(strip_continuations(Rest, 3), maybe_replace(badutf, Opts) ++ Acc, Opts); +clean(<<_, Rest/binary>>, Acc, Opts) -> + clean(Rest, maybe_replace(badutf, Opts) ++ Acc, Opts). + + +strip_continuations(Bin, 0) -> Bin; +strip_continuations(<>, N) when X >= 128, X =< 191 -> + strip_continuations(Rest, N - 1); +%% not a continuation byte +strip_continuations(Bin, _) -> Bin. + + +maybe_replace(X, #opts{dirty_strings=true}) when is_integer(X) -> [X]; +maybe_replace($\b, #opts{escaped_strings=true}) -> [$b, $\\]; +maybe_replace($\t, #opts{escaped_strings=true}) -> [$t, $\\]; +maybe_replace($\n, #opts{escaped_strings=true}) -> [$n, $\\]; +maybe_replace($\f, #opts{escaped_strings=true}) -> [$f, $\\]; +maybe_replace($\r, #opts{escaped_strings=true}) -> [$r, $\\]; +maybe_replace($\", #opts{escaped_strings=true}) -> [$\", $\\]; +maybe_replace($', Opts=#opts{escaped_strings=true}) -> + case Opts#opts.single_quoted_strings of + true -> [$', $\\] + ; false -> [$'] + end; +maybe_replace($/, Opts=#opts{escaped_strings=true}) -> + case Opts#opts.escaped_forward_slashes of + true -> [$/, $\\] + ; false -> [$/] + end; +maybe_replace($\\, #opts{escaped_strings=true}) -> [$\\, $\\]; +maybe_replace(X, Opts=#opts{escaped_strings=true}) when X == 16#2028; X == 16#2029 -> + case Opts#opts.unescaped_jsonp of + true -> [X] + ; false -> lists:reverse(jsx_utils:json_escape_sequence(X)) + end; +maybe_replace(X, #opts{escaped_strings=true}) when X < 32 -> + lists:reverse(jsx_utils:json_escape_sequence(X)); +maybe_replace(noncharacter, #opts{replaced_bad_utf8=true}) -> [16#fffd]; +maybe_replace(surrogate, #opts{replaced_bad_utf8=true}) -> [16#fffd]; +maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]. -ifdef(TEST). @@ -263,7 +684,373 @@ encode_test_() -> end_json ] ) + }, + {"atom keys", ?_assertEqual( + encode([start_object, {key, key}, {string, <<"value">>}, end_object, end_json]), + [start_object, {key, <<"key">>}, {string, <<"value">>}, end_object, end_json] + )} + ]. + + +xcode(Bin) -> xcode(Bin, #opts{}). + +xcode(Bin, [replaced_bad_utf8]) -> xcode(Bin, #opts{replaced_bad_utf8=true}); +xcode(Bin, Opts) -> + try clean_string(Bin, Opts) + catch error:badarg -> {error, badarg} + end. + + +is_bad({error, badarg}) -> true; +is_bad(_) -> false. + + +bad_utf8_test_() -> + [ + {"orphan continuation byte u+0080", + ?_assert(is_bad(xcode(<<16#0080>>))) + }, + {"orphan continuation byte u+0080 replaced", + ?_assertEqual(xcode(<<16#0080>>, [replaced_bad_utf8]), <<16#fffd/utf8>>) + }, + {"orphan continuation byte u+00bf", + ?_assert(is_bad(xcode(<<16#00bf>>))) + }, + {"orphan continuation byte u+00bf replaced", + ?_assertEqual(xcode(<<16#00bf>>, [replaced_bad_utf8]), <<16#fffd/utf8>>) + }, + {"2 continuation bytes", + ?_assert(is_bad(xcode(<<(binary:copy(<<16#0080>>, 2))/binary>>))) + }, + {"2 continuation bytes replaced", + ?_assertEqual( + xcode(<<(binary:copy(<<16#0080>>, 2))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, 2) + ) + }, + {"3 continuation bytes", + ?_assert(is_bad(xcode(<<(binary:copy(<<16#0080>>, 3))/binary>>))) + }, + {"3 continuation bytes replaced", + ?_assertEqual( + xcode(<<(binary:copy(<<16#0080>>, 3))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, 3) + ) + }, + {"4 continuation bytes", + ?_assert(is_bad(xcode(<<(binary:copy(<<16#0080>>, 4))/binary>>))) + }, + {"4 continuation bytes replaced", + ?_assertEqual( + xcode(<<(binary:copy(<<16#0080>>, 4))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, 4) + ) + }, + {"5 continuation bytes", + ?_assert(is_bad(xcode(<<(binary:copy(<<16#0080>>, 5))/binary>>))) + }, + {"5 continuation bytes replaced", + ?_assertEqual( + xcode(<<(binary:copy(<<16#0080>>, 5))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, 5) + ) + }, + {"6 continuation bytes", + ?_assert(is_bad(xcode(<<(binary:copy(<<16#0080>>, 6))/binary>>))) + }, + {"6 continuation bytes replaced", + ?_assertEqual( + xcode(<<(binary:copy(<<16#0080>>, 6))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, 6) + ) + }, + {"all continuation bytes", + ?_assert(is_bad(xcode(<<(list_to_binary(lists:seq(16#0080, 16#00bf)))/binary>>))) + }, + {"all continuation bytes replaced", + ?_assertEqual( + xcode(<<(list_to_binary(lists:seq(16#0080, 16#00bf)))/binary>>, [replaced_bad_utf8]), + binary:copy(<<16#fffd/utf8>>, length(lists:seq(16#0080, 16#00bf))) + ) + }, + {"lonely start byte", + ?_assert(is_bad(xcode(<<16#00c0>>))) + }, + {"lonely start byte replaced", + ?_assertEqual( + xcode(<<16#00c0>>, [replaced_bad_utf8]), + <<16#fffd/utf8>> + ) + }, + {"lonely start bytes (2 byte)", + ?_assert(is_bad(xcode(<<16#00c0, 32, 16#00df>>))) + }, + {"lonely start bytes (2 byte) replaced", + ?_assertEqual( + xcode(<<16#00c0, 32, 16#00df>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32, 16#fffd/utf8>> + ) + }, + {"lonely start bytes (3 byte)", + ?_assert(is_bad(xcode(<<16#00e0, 32, 16#00ef>>))) + }, + {"lonely start bytes (3 byte) replaced", + ?_assertEqual( + xcode(<<16#00e0, 32, 16#00ef>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32, 16#fffd/utf8>> + ) + }, + {"lonely start bytes (4 byte)", + ?_assert(is_bad(xcode(<<16#00f0, 32, 16#00f7>>))) + }, + {"lonely start bytes (4 byte) replaced", + ?_assertEqual( + xcode(<<16#00f0, 32, 16#00f7>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32, 16#fffd/utf8>> + ) + }, + {"missing continuation byte (3 byte)", + ?_assert(is_bad(xcode(<<224, 160, 32>>))) + }, + {"missing continuation byte (3 byte) replaced", + ?_assertEqual( + xcode(<<224, 160, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"missing continuation byte (4 byte missing one)", + ?_assert(is_bad(xcode(<<240, 144, 128, 32>>))) + }, + {"missing continuation byte (4 byte missing one) replaced", + ?_assertEqual( + xcode(<<240, 144, 128, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"missing continuation byte (4 byte missing two)", + ?_assert(is_bad(xcode(<<240, 144, 32>>))) + }, + {"missing continuation byte (4 byte missing two) replaced", + ?_assertEqual( + xcode(<<240, 144, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"overlong encoding of u+002f (2 byte)", + ?_assert(is_bad(xcode(<<16#c0, 16#af, 32>>))) + }, + {"overlong encoding of u+002f (2 byte) replaced", + ?_assertEqual( + xcode(<<16#c0, 16#af, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"overlong encoding of u+002f (3 byte)", + ?_assert(is_bad(xcode(<<16#e0, 16#80, 16#af, 32>>))) + }, + {"overlong encoding of u+002f (3 byte) replaced", + ?_assertEqual( + xcode(<<16#e0, 16#80, 16#af, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"overlong encoding of u+002f (4 byte)", + ?_assert(is_bad(xcode(<<16#f0, 16#80, 16#80, 16#af, 32>>))) + }, + {"overlong encoding of u+002f (4 byte) replaced", + ?_assertEqual( + xcode(<<16#f0, 16#80, 16#80, 16#af, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"highest overlong 2 byte sequence", + ?_assert(is_bad(xcode(<<16#c1, 16#bf, 32>>))) + }, + {"highest overlong 2 byte sequence replaced", + ?_assertEqual( + xcode(<<16#c1, 16#bf, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"highest overlong 3 byte sequence", + ?_assert(is_bad(xcode(<<16#e0, 16#9f, 16#bf, 32>>))) + }, + {"highest overlong 3 byte sequence replaced", + ?_assertEqual( + xcode(<<16#e0, 16#9f, 16#bf, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) + }, + {"highest overlong 4 byte sequence", + ?_assert(is_bad(xcode(<<16#f0, 16#8f, 16#bf, 16#bf, 32>>))) + }, + {"highest overlong 4 byte sequence replaced", + ?_assertEqual( + xcode(<<16#f0, 16#8f, 16#bf, 16#bf, 32>>, [replaced_bad_utf8]), + <<16#fffd/utf8, 32>> + ) } ]. + +escapes_test_() -> + [ + {"backspace escape", ?_assertEqual(encode(<<"\b">>, [escaped_strings]), [{string, <<"\\b">>}, end_json])}, + {"formfeed escape", ?_assertEqual(encode(<<"\f">>, [escaped_strings]), [{string, <<"\\f">>}, end_json])}, + {"newline escape", ?_assertEqual(encode(<<"\n">>, [escaped_strings]), [{string, <<"\\n">>}, end_json])}, + {"carriage return escape", ?_assertEqual(encode(<<"\r">>, [escaped_strings]), [{string, <<"\\r">>}, end_json])}, + {"tab escape", ?_assertEqual(encode(<<"\t">>, [escaped_strings]), [{string, <<"\\t">>}, end_json])}, + {"quote escape", ?_assertEqual(encode(<<"\"">>, [escaped_strings]), [{string, <<"\\\"">>}, end_json])}, + {"single quote escape", ?_assertEqual(encode(<<"'">>, [escaped_strings, single_quoted_strings]), [{string, <<"\\'">>}, end_json])}, + {"no single quote escape", ?_assertEqual(encode(<<"'">>, [escaped_strings]), [{string, <<"'">>}, end_json])}, + {"forward slash escape", ?_assertEqual(encode(<<"/">>, [escaped_strings, escaped_forward_slashes]), [{string, <<"\\/">>}, end_json])}, + {"no forward slash escape", ?_assertEqual(encode(<<"/">>, [escaped_strings]), [{string, <<"/">>}, end_json])}, + {"back slash escape", ?_assertEqual(encode(<<"\\">>, [escaped_strings]), [{string, <<"\\\\">>}, end_json])}, + {"jsonp escape", ?_assertEqual( + encode(<<16#2028/utf8, 16#2029/utf8>>, [escaped_strings]), + [{string, <<"\\u2028\\u2029">>}, end_json] + )}, + {"no jsonp escape", ?_assertEqual( + encode(<<16#2028/utf8, 16#2029/utf8>>, [escaped_strings, unescaped_jsonp]), + [{string, <<16#2028/utf8, 16#2029/utf8>>}, end_json] + )}, + {"control escape", ?_assertEqual(encode(<<0>>, [escaped_strings]), [{string, <<"\\u0000">>}, end_json])}, + {"dirty strings", ?_assertEqual(encode(<<"\n">>, [escaped_strings, dirty_strings]), [{string, <<"\n">>}, end_json])}, + {"ignore bad escapes", ?_assertEqual(encode(<<"\\x25">>, [escaped_strings, ignored_bad_escapes]), [{string, <<"\\\\x25">>}, end_json])} + ]. + + +surrogates_test_() -> + [ + {"surrogates - badjson", + ?_assert(check_bad(surrogates())) + }, + {"surrogates - replaced", + ?_assert(check_replaced(surrogates())) + } + ]. + + +good_characters_test_() -> + [ + {"acceptable codepoints", + ?_assert(check_good(good())) + }, + {"acceptable codepoints - escaped_strings", + ?_assert(check_good(good(), [escaped_strings])) + }, + {"acceptable codepoints - replaced_bad_utf8", + ?_assert(check_good(good(), [escaped_strings])) + }, + {"acceptable codepoints - escaped_strings + replaced_bad_utf8", + ?_assert(check_good(good(), [escaped_strings, replaced_bad_utf8])) + }, + {"acceptable extended", + ?_assert(check_good(good_extended())) + }, + {"acceptable extended - escaped_strings", + ?_assert(check_good(good_extended(), [escaped_strings])) + }, + {"acceptable extended - escaped_strings", + ?_assert(check_good(good_extended(), [replaced_bad_utf8])) + } + ]. + + +reserved_test_() -> + [ + {"reserved noncharacters - badjson", + ?_assert(check_bad(reserved_space())) + }, + {"reserved noncharacters - replaced", + ?_assert(check_replaced(reserved_space())) + } + ]. + + +noncharacters_test_() -> + [ + {"noncharacters - badjson", + ?_assert(check_bad(noncharacters())) + }, + {"noncharacters - replaced", + ?_assert(check_replaced(noncharacters())) + } + ]. + + +extended_noncharacters_test_() -> + [ + {"extended noncharacters - badjson", + ?_assert(check_bad(extended_noncharacters())) + }, + {"extended noncharacters - replaced", + ?_assert(check_replaced(extended_noncharacters())) + } + ]. + + +check_bad(List) -> + [] == lists:dropwhile(fun({_, {error, badjson}}) -> true ; (_) -> false end, + check(List, [], []) + ). + + +check_replaced(List) -> + [] == lists:dropwhile(fun({_, [{string, <<16#fffd/utf8>>}|_]}) -> true ; (_) -> false + end, + check(List, [replaced_bad_utf8], []) + ). + + +check_good(List) -> check_good(List, []). + +check_good(List, Opts) -> + [] == lists:dropwhile(fun({_, [{string, _}|_]}) -> true ; (_) -> false end, + check(List, Opts, []) + ). + + +check([], _Opts, Acc) -> Acc; +check([H|T], Opts, Acc) -> + R = encode(to_fake_utf(H, utf8), Opts), + check(T, Opts, [{H, R}] ++ Acc). + + +noncharacters() -> lists:seq(16#fffe, 16#ffff). + +extended_noncharacters() -> + [16#1fffe, 16#1ffff, 16#2fffe, 16#2ffff] + ++ [16#3fffe, 16#3ffff, 16#4fffe, 16#4ffff] + ++ [16#5fffe, 16#5ffff, 16#6fffe, 16#6ffff] + ++ [16#7fffe, 16#7ffff, 16#8fffe, 16#8ffff] + ++ [16#9fffe, 16#9ffff, 16#afffe, 16#affff] + ++ [16#bfffe, 16#bffff, 16#cfffe, 16#cffff] + ++ [16#dfffe, 16#dffff, 16#efffe, 16#effff] + ++ [16#ffffe, 16#fffff, 16#10fffe, 16#10ffff]. + +surrogates() -> lists:seq(16#d800, 16#dfff). + +reserved_space() -> lists:seq(16#fdd0, 16#fdef). + +good() -> lists:seq(16#0000, 16#d7ff) ++ lists:seq(16#e000, 16#fdcf) ++ lists:seq(16#fdf0, 16#fffd). + +good_extended() -> [16#10000, 16#20000, 16#30000, 16#40000, 16#50000, + 16#60000, 16#70000, 16#80000, 16#90000, 16#a0000, + 16#b0000, 16#c0000, 16#d0000, 16#e0000, 16#f0000 + ] ++ lists:seq(16#100000, 16#10fffd). + + +%% erlang refuses to encode certain codepoints, so fake them all +to_fake_utf(N, utf8) when N < 16#0080 -> <>; +to_fake_utf(N, utf8) when N < 16#0800 -> + <<0:5, Y:5, X:6>> = <>, + <<2#110:3, Y:5, 2#10:2, X:6>>; +to_fake_utf(N, utf8) when N < 16#10000 -> + <> = <>, + <<2#1110:4, Z:4, 2#10:2, Y:6, 2#10:2, X:6>>; +to_fake_utf(N, utf8) -> + <<0:3, W:3, Z:6, Y:6, X:6>> = <>, + <<2#11110:5, W:3, 2#10:2, Z:6, 2#10:2, Y:6, 2#10:2, X:6>>. + -endif. \ No newline at end of file From cb39d33edac626128ed6b295fae6401712e9179e Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 00:57:13 -0700 Subject: [PATCH 16/52] add incomplete test to jsx_parser --- src/jsx_parser.erl | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index 9146c67..d6b0ca2 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -555,6 +555,22 @@ maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]. -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). + +incomplete_test_() -> + F = parser(jsx, [], []), + [ + {"incomplete test", ?_assertEqual( + begin + {incomplete, A} = F(start_object), + {incomplete, B} = A(key), + {incomplete, C} = B(true), + {incomplete, D} = C(end_object), + D(end_json) + end, + [start_object, {key, <<"key">>}, {literal, true}, end_object, end_json] + )} + ]. + encode(Term) -> encode(Term, []). encode(Term, Opts) -> From 9d34818d52adb0aa7d42b2ffa3eba5238f278a14 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 01:00:37 -0700 Subject: [PATCH 17/52] remove unused explicit_end check --- src/jsx_parser.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index d6b0ca2..75f9ed9 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -49,7 +49,7 @@ parser(Handler, State, Opts) -> case State([end_json], Handler, Stack, - Opts#opts{explicit_end=false}) of + Opts) of {incomplete, _} -> ?error([Handler, Stack, Opts]) ; Events -> Events end From 608f2437cfd2a29a332cc1b5bfdfdeb65509abdf Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 01:07:54 -0700 Subject: [PATCH 18/52] change behaviour of dirty_strings option, dont attempt to escape or ensure clean at all --- src/jsx_encoder.erl | 2 +- src/jsx_parser.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/jsx_encoder.erl b/src/jsx_encoder.erl index 5302712..d587725 100644 --- a/src/jsx_encoder.erl +++ b/src/jsx_encoder.erl @@ -107,6 +107,7 @@ fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); fix_key(Key) when is_binary(Key) -> Key. +clean_string(Bin, Opts=#opts{dirty_strings=true}) -> Bin; clean_string(Bin, Opts) -> case Opts#opts.replaced_bad_utf8 orelse Opts#opts.escaped_strings of true -> clean(Bin, [], Opts) @@ -468,7 +469,6 @@ strip_continuations(<>, N) when X >= 128, X =< 191 -> strip_continuations(Bin, _) -> Bin. -maybe_replace(X, #opts{dirty_strings=true}) when is_integer(X) -> [X]; maybe_replace($\b, #opts{escaped_strings=true}) -> [$b, $\\]; maybe_replace($\t, #opts{escaped_strings=true}) -> [$t, $\\]; maybe_replace($\n, #opts{escaped_strings=true}) -> [$n, $\\]; diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index 75f9ed9..a290e3b 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -161,6 +161,7 @@ fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); fix_key(Key) when is_binary(Key) -> Key. +clean_string(Bin, Opts=#opts{dirty_strings=true}) -> Bin; clean_string(Bin, Opts) -> case Opts#opts.replaced_bad_utf8 orelse Opts#opts.escaped_strings of true -> clean(Bin, [], Opts) @@ -522,7 +523,6 @@ strip_continuations(<>, N) when X >= 128, X =< 191 -> strip_continuations(Bin, _) -> Bin. -maybe_replace(X, #opts{dirty_strings=true}) when is_integer(X) -> [X]; maybe_replace($\b, #opts{escaped_strings=true}) -> [$b, $\\]; maybe_replace($\t, #opts{escaped_strings=true}) -> [$t, $\\]; maybe_replace($\n, #opts{escaped_strings=true}) -> [$n, $\\]; From 57f067aaef883ad108480661bfca1ba951f7f054 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 06:47:58 -0700 Subject: [PATCH 19/52] better types and specs --- src/jsx.erl | 61 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/src/jsx.erl b/src/jsx.erl index b389d8b..7d87092 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -27,7 +27,7 @@ -export([to_term/1, to_term/2]). -export([is_json/1, is_json/2, is_term/1, is_term/2]). -export([format/1, format/2, minify/1, prettify/1]). --export([encoder/3, decoder/3]). +-export([encoder/3, decoder/3, parser/3]). %% old api -export([term_to_json/1, term_to_json/2, json_to_term/1, json_to_term/2]). @@ -37,8 +37,8 @@ -endif. --type json() :: list({binary(), json()}) - | list(json()) +-type json_term() :: list({binary(), json_term()}) + | list(json_term()) | true | false | null @@ -46,9 +46,11 @@ | float() | binary(). +-type json_text() :: binary(). --spec to_json(Source::json()) -> binary(). --spec to_json(Source::json(), Opts::jsx_to_json:opts()) -> binary(). + +-spec to_json(Source::json_term()) -> json_text(). +-spec to_json(Source::json_term(), Opts::jsx_to_json:opts()) -> json_text(). to_json(Source) -> to_json(Source, []). @@ -61,26 +63,26 @@ term_to_json(Source) -> to_json(Source, []). term_to_json(Source, Opts) -> to_json(Source, Opts). --spec format(Source::binary()) -> binary(). --spec format(Source::binary(), Opts::jsx_to_json:opts()) -> binary(). +-spec format(Source::json_text()) -> json_text(). +-spec format(Source::json_text(), Opts::jsx_to_json:opts()) -> json_text(). format(Source) -> format(Source, []). format(Source, Opts) -> jsx_to_json:format(Source, Opts). --spec minify(Source::binary()) -> binary(). +-spec minify(Source::json_text()) -> json_text(). minify(Source) -> format(Source, []). --spec prettify(Source::binary()) -> binary(). +-spec prettify(Source::json_text()) -> json_text(). prettify(Source) -> format(Source, [space, {indent, 2}]). --spec to_term(Source::binary()) -> json(). --spec to_term(Source::binary(), Opts::jsx_to_term:opts()) -> json(). +-spec to_term(Source::json_text()) -> json_term(). +-spec to_term(Source::json_text(), Opts::jsx_to_term:opts()) -> json_term(). to_term(Source) -> to_term(Source, []). @@ -109,16 +111,49 @@ is_term(Source) -> is_term(Source, []). is_term(Source, Opts) -> jsx_verify:is_term(Source, Opts). --spec decoder(Handler::module(), State::any(), Opts::list()) -> fun(). +-type decoder() :: fun((json_text() | end_stream) -> any()). + +-spec decoder(Handler::module(), State::any(), Opts::list()) -> decoder(). decoder(Handler, State, Opts) -> jsx_decoder:decoder(Handler, State, Opts). --spec encoder(Handler::module(), State::any(), Opts::list()) -> fun(). +-type encoder() :: fun((json_term() | end_stream) -> any()). + +-spec encoder(Handler::module(), State::any(), Opts::list()) -> encoder(). encoder(Handler, State, Opts) -> jsx_encoder:encoder(Handler, State, Opts). +-type token() :: [token()] + | start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | binary() + | {number, integer() | float()} + | {integer, integer()} + | {float, float()} + | integer() + | float() + | {literal, true} + | {literal, false} + | {literal, null} + | true + | false + | null + | end_json. + + +-type parser() :: fun((token() | end_stream) -> any()). + +-spec parser(Handler::module(), State::any(), Opts::list()) -> parser(). + +parser(Handler, State, Opts) -> jsx_parser:parser(Handler, State, Opts). + + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). From 2aa8cd2894a69ac3ebb1f8d53ee17f3c6f1fde25 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 21:57:01 -0700 Subject: [PATCH 20/52] README updated for v1.3, to_term and to_json replaced by decode and encode respectively --- README.markdown | 508 ++++++++++++++++++++++++++---------------------- src/jsx.erl | 41 ++-- 2 files changed, 295 insertions(+), 254 deletions(-) diff --git a/README.markdown b/README.markdown index 071b855..6700f8f 100644 --- a/README.markdown +++ b/README.markdown @@ -1,6 +1,6 @@ # jsx (v1.2.1) # -a sane [json][json] implementation for erlang, inspired by [yajl][yajl] +an erlang application for consuming, producing and manipulating [json][json]. inspired by [yajl][yajl] copyright 2011, 2012 alisdair sullivan @@ -14,17 +14,12 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain ## index ## * [quickstart](#quickstart) -* [the api](#the-api) +* [description](#description) - [json <-> erlang mapping](#json---erlang-mapping) - - [options](#options) - [incomplete input](#incomplete-input) - - [the encoder and decoder](#the-encoder-and-decoder) - - [handler callbacks](#handler-callbacks) - - [converting json to erlang terms](#converting-json-to-erlang-terms) - - [converting erlang terms to json](#converting-erlang-terms-to-json) - - [formatting json texts](#formatting-json-texts) - - [verifying json texts](#verifying-json-texts) - - [verifying terms](#verifying-terms) +* [data types](#data-types) +* [exports](#exports) +* [callback exports](#callback_exports) * [acknowledgements](#acknowledgements) @@ -110,7 +105,13 @@ to prettify some json }">> ``` -## the api ## + +## description ## + + +jsx is an erlang application for consuming, producing and manipulating [json][json] +jsx strives to be quick but complete, correct but pragmatic, and approachable but powerful. it handles json as encountered in common use with extensions to handle even less common usage. comments, strings quoted with `'` instead of `"`, json fragments and json streams, and invalid utf8 are all supported +jsx is a collection of functions useful when dealing with json in erlang. jsx is also a json compiler with separate parsing and semantic analysis stages. new, custom, semantic analysis steps are relatively simple to add. the syntactic analysis stage is also exposed separately for use with user defined tokenizers ### json <-> erlang mapping ### @@ -123,276 +124,313 @@ to prettify some json `array` | `[]` and `[JSON]` `object` | `[{}]` and `[{binary() OR atom(), JSON}]` -#### json #### - +* json json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that, they are detailed in the options section below - jsx also supports json fragments; valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint - -#### erlang #### - +* erlang only the erlang terms in the table above are supported. non supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly - -#### numbers #### - +* numbers javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors - when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` - -#### strings #### - +* strings the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` - the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates that appear otherwise are an error - json string escapes of the form `\uXXXX` will be converted to their equivalent codepoint during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be - in the interests of pragmatism, there is an option for looser parsing, see options below - all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see options below) - this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings - -#### true, false and null #### - +* true, false and null the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise - -#### arrays #### - +* arrays json arrays are represented with erlang lists of json values as described in this section - -#### objects #### - +* objects json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values -### options ### - -jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. flags are always atoms or `{atom, Term}` tuples. functions may have additional options beyond these, see individual function documentation for details - -#### `replaced_bad_utf8` #### - -json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. if this option is present attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec. this applies both to malformed unicode and disallowed codepoints - -#### `escaped_forward_slashes` #### - -json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding json directly into a html or xml document - -#### `single_quoted_strings` #### - -some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to deliminate keys and strings. this option allows json containing single quotes as structural (deliminator) characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings deliminated by single quotes - -double quotes must ALWAYS be escaped, regardless of what kind of quotes deliminate the string they are found in - -the parser will never emit json with keys or strings deliminated by single quotes - -#### `unescaped_jsonp` #### - -javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping if, for some reason, you object to this - -#### `comments` #### - -json has no official comments but some parsers allow c style comments. this flag allows comments (both `// ...` and `/* ... */` style) anywhere whitespace is allowed - -#### `escaped_strings` #### - -by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint and encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes - -#### `dirty_strings` #### - -json escaping is lossy, it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping - -#### `ignored_bad_escapes` #### - -during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that if you combine this option with `escaped_strings` the escape character itself will be escaped - -#### `explicit_end` #### - -this option treats all exhausted inputs as incomplete, as explained below. the parser will not attempt to return a final state until the function is called with the value `end_stream` - -#### `relax` #### - -relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can - -#### `{pre_encode, F}` #### - -`F` is a function of arity 1 that pre-process input to the encoder. only input evaluated in a *value* context is pre-processed in this manner (so keys are not pre-processed, but objects and arrays are). if more than one pre encoder is declared, a `badarg` exception will occur - -input can be any term, but output from the function must be a valid type for input - - ### incomplete input ### -jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory - -however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see above +jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory +however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see below -### the encoder and decoder ### - -jsx is built on top of two finite state automata, one that handles json texts and one that handles erlang terms. both take a callback module as an argument that acts similar to a fold over a list of json 'events'. these events and the handler module's callbacks are detailed in the next section - -`jsx:decoder/3` and `jsx:encoder/3` are the entry points for the decoder and encoder, respectively - -`decoder(Handler, InitialState, Opts)` -> `Fun((JSON) -> Any)` - -`encoder(Handler, InitialState, Opts)` -> `Fun((Term) -> Any)` - -types: - -- `Handler` = `atom()`, should be the name of a callback module, see below -- `InitialState` = `term()`, passed as is to `Handler:init/1` -- `Opts` = see above -- `JSON` = `utf8` encoded json text -- `Term` = an erlang term as specified above in the mapping section -- `Any` = `term()` - -decoder returns an anonymous function that handles binary json input and encoder returns an anonymous function that handles erlang term input. these are safe to reuse for multiple inputs - - -### handler callbacks ### - -`Handler` should export the following pair of functions - -`Handler:init(InitialState)` -> `State` - -`Handler:handle_event(Event, State)` -> `NewState` - -types: - -- `InitialState`, `State`, `NewState` = any erlang term -- `Event` = - * `start_object` - * `end_object` - * `start_array` - * `end_array` - * `end_json` - * `{key, binary()}` - * `{string, binary()}` - * `{integer, integer()}` - * `{float, float()}` - * `{literal, true}` - * `{literal, false}` - * `{literal, null}` - -`init/1` is called with the `initialState` argument from `decoder/3` or `encoder/3` and should take care of any initialization your handler requires and return a new state - -`handle_event/2` is called for each `Event` emitted by the decoder/encoder with the output of the previous `handle_event/2` call (or `init/1` call, if `handle_event/2` has not yet been called) - -the event `end_json` will always be the last event emitted, you should take care of any cleanup in `handle_event/2` when encountering `end_json`. the state returned from this call will be returned as the final result of the anonymous function - -both `key` and `string` are `utf8` encoded binaries with all escaped values converted into the appropriate codepoints - - -### converting json to erlang terms ### - -`to_term` parses a JSON text (a utf8 encoded binary) and produces an erlang term (see json <-> erlang mapping details above) - -`to_term(JSON)` -> `Term` - -`to_term(JSON, Opts)` -> `Term` - -types: - -* `JSON` = as above in the mapping section -* `Term` = as above in the mapping section -* `Opts` = as above in the opts section, but see also additional opts below -* `Opt` = - - `labels` - - `{labels, Label}` - - `Label` = - * `binary` - * `atom` - * `existing_atom` - - `{post_decode, F}` - -the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms, and results in a badarg error if keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom`, except it will not add new atoms to the atom table - -`{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: +## data types ## ```erlang - 1> F = fun(V) when is_list(V) -> V; (V) -> false end. - 2> jsx:to_term(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). - [{<<"a list">>, [false, false, false]}] +json_term() = [json_term()] + | [{binary() | atom(), json_term()}] + | true + | false + | null + | integer() + | float() + | binary() +``` +the erlang representation of json. binaries should be `utf8` encoded (but see below in options) + +```erlang +json_text() = binary() +``` +a utf8 encoded binary containing a json string + +```erlang +tokens() = token() | [token()] + +token() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | binary() + | {number, integer() | float()} + | {integer, integer()} + | {float, float()} + | integer() + | float() + | {literal, true} + | {literal, false} + | {literal, null} + | true + | false + | null + | end_json +``` +the internal representation used during syntactic analysis + +```erlang +event() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | {integer, integer()} + | {float, float()} + | {literal, true} + | {literal, false} + | {literal, null} + | end_json +``` +the internal representation used during semantic analysis + +```erlang +options() = [option()] + +option() = replaced_bad_utf8 + | escaped_forward_slashes + | single_quoted_strings + | unescaped_jsonp + | comments + | escaped_strings + | dirty_strings + | ignored_bad_escapes + | relax + | explicit_end +``` +jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these, see individual function documentation for details +* `replaced_bad_utf8` +json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. if this option is present attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec. this applies both to malformed unicode and disallowed codepoints +* `escaped_forward_slashes` +json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding json directly into a html or xml document +* `single_quoted_strings` +some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to deliminate keys and strings. this option allows json containing single quotes as structural (deliminator) characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings deliminated by single quotes +double quotes must ALWAYS be escaped, regardless of what kind of quotes deliminate the string they are found in +the parser will never emit json with keys or strings deliminated by single quotes +* `unescaped_jsonp` +javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping +* `comments` +json has no official comments but some parsers allow c style comments. this flag allows comments (both `// ...` and `/* ... */` style) anywhere whitespace is allowed +* `escaped_strings` +by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint and encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes +* `dirty_strings` +json escaping is lossy, it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping +* `ignored_bad_escapes` +during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that if you combine this option with `escaped_strings` the escape character itself will be escaped +* `explicit_end` +this option treats all exhausted inputs as incomplete, as explained below. the parser will not attempt to return a final state until the function is called with the value `end_stream` +* `relax` +relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can + + +## exports ## + +### encoder/3, decoder/3 and parser/3 ### + +```erlang +decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) +encoder(Module, Args, Opts) -> Fun((JSONTerm) -> any()) +parser(Module, Args, Opts) -> Fun((Tokens) -> any()) + + Module = atom() + Args = any() + Opts = options() + JSONText = json_text() + JSONTerm = json_term() + Tokens = tokens() ``` -if more than one decoder is declared a badarg exception will result +jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages. (actually, semantic analysis takes place during syntactic analysis, for efficiency) included are two tokenizers, one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user defined tokenizers (`parser/3`) +all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails +`Module` is the name of the callback module +`Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state +`Opts` are detailed [above](#data_types) +see [below](#callback_exports) for details on the callback module -### converting erlang terms to json ### - -`to_json` parses an erlang term and produces a JSON text (see json <-> erlang mapping details below) +### decode/1,2 ### -`to_json(Term)` -> `JSON` +```erlang +decode(JSON) -> Term +decode(JSON, Opts) -> Term -`to_json(Term, Opts)` -> `JSON` + JSON = json_text() + Term = json_term() + Opts = [option() | labels | {labels, Label} | {post_decode, F}] + Label = binary | atom | existing_atom + F = fun((any()) -> any()) +``` +`decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) +the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms, and results in a badarg error if keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom`, except it will not add new atoms to the atom table +`{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: +```erlang +1> F = fun(V) when is_list(V) -> V; (V) -> false end. +2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). +[{<<"a list">>, [false, false, false]}] +``` +if more than one decoder is declared a `badarg` error exception will result +raises a `badarg` error exception if input is not valid json -types: - -* `JSON` = as above in the mapping section -* `Term` = as above in the mapping section -* `Opts` = as above in the opts section, but see also additional opts below -* `Opt` = - - `space` - - `{space, N}` - - `indent` - - `{indent, N}` +### encode/1,2 ### +```erlang +encode(Term) -> JSON +encode(Term, Opts) -> JSON + + Term = json_term() + JSON = json_text() + Opts = [option() | {pre_encode, F} | space | {space, N} | indent | {indent, N}] + F = fun((any()) -> any()) + N = pos_integer() +``` +`encode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) +`{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: +```erlang +1> F = fun(V) when is_list(V) -> V; (V) -> false end. +2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]. +<<"{\"a list\": [false, false, false]}">> +``` +if more than one decoder is declared a `badarg` error exception will result the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` +raises a `badarg` error exception if input is not a valid erlang representation of json +### format/1,2 ### -### formatting json texts ### - -produces a JSON text from JSON text, reformatted - -`format(JSON)` -> `JSON` - -`format(JSON, Opts)` -> `JSON` - -types: - -* `JSON` = as above in the mapping section -* `Opts` = as above in the opts section, but see also additional opts below -* `Opt` = - - `space` - - `{space, N}` - - `indent` - - `{indent, N}` +```erlang +format(JSON) -> JSON +format(JSON, Opts) -> JSON + JSON = json_text() + Opts = [option() | space | {space, N} | indent | {indent, N}] + N = pos_integer() +``` +`format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` +raises a `badarg` error exception if input is not valid json -calling `format` with no options results in minified json text +### minify/1 ### + +```erlang +minify(JSON) -> JSON + + JSON = json_text() +``` +`minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace +raises a `badarg` error exception if input is not valid json + +### prettify/1 ### + +```erlang +prettify(JSON) -> JSON + + JSON = json_text() +``` +`prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` +raises a `badarg` error exception if input is not valid json + +### is_json/1,2 ### + +```erlang +is_json(MaybeJSON) -> true | false +is_json(MaybeJSON, Opts) -> true | false + + MaybeJSON = any() + Opts = options() +``` +returns true if input is a valid json text, false if not +what exactly constitutes valid json may be altered per [options](#data_types) + +### is_term/1,2 ### + +```erlang +is_term(MaybeJSON) -> true | false +is_term(MaybeJSON, Opts) -> true | false + + MaybeJSON = any() + Opts = options() +``` +returns true if input is a valid erlang representation of json, false if not +what exactly constitutes valid json may be altered per [options](#data_types) -### verifying json texts ### +## callback exports ## -returns true if input is a valid JSON text, false if not +the following functions should be exported from a `jsx` callback module -`is_json(MaybeJSON)` -> `true` | `false` | `{incomplete, Fun}` +### Module:init/1 ### -`is_json(MaybeJSON, Opts)` -> `true` | `false` | `{incomplete, Fun}` +```erlang +Module:init(Args) -> InitialState -types: + Args = any() + InitialState = any() +``` +whenever `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` -* `MaybeJSON` = `any()` -* `Opts` = as above +### Module:handle_event/2 ### +```erlang +Module:handle_event(Event, State) -> NewState -### verifying terms ### - -returns true if input is a valid erlang term that represents a JSON text, false if not - -`is_term(MaybeJSON)` -> `true` | `false` - -`is_term(MaybeJSON, Opts)` -> `true` | `false` - -types: - -* `MaybeJSON` = `any()` -* `Opts` = as above + Event = events() + State = any() + NewState = any() +``` +semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: +* `start_object` +the start of a json object +* `end_object` +the end of a json object +* `start_array` +the start of a json array +* `end_array` +the end of a json array +* `{key, binary()}` +a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions +* `{string, binary()}` +a json string. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions +* `{integer, integer()}` +an erlang integer (bignum) +* `{float, float()}` +an erlang float +* `{literal, true}` +the atom `true` +* `{literal, false}` +the atom `false` +* `{literal, null}` +the atom `null` +* `end_json` +this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis ## acknowledgements ## diff --git a/src/jsx.erl b/src/jsx.erl index 7d87092..56d3e3f 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -23,13 +23,14 @@ -module(jsx). --export([to_json/1, to_json/2]). --export([to_term/1, to_term/2]). +-export([encode/1, encode/2, decode/1, decode/2]). -export([is_json/1, is_json/2, is_term/1, is_term/2]). -export([format/1, format/2, minify/1, prettify/1]). -export([encoder/3, decoder/3, parser/3]). %% old api -export([term_to_json/1, term_to_json/2, json_to_term/1, json_to_term/2]). +-export([to_json/1, to_json/2]). +-export([to_term/1, to_term/2]). %% test handler -ifdef(TEST). @@ -49,18 +50,19 @@ -type json_text() :: binary(). --spec to_json(Source::json_term()) -> json_text(). --spec to_json(Source::json_term(), Opts::jsx_to_json:opts()) -> json_text(). +-spec encode(Source::json_term()) -> json_text(). +-spec encode(Source::json_term(), Opts::jsx_to_json:opts()) -> json_text(). -to_json(Source) -> to_json(Source, []). +encode(Source) -> encode(Source, []). -to_json(Source, Opts) -> jsx_to_json:to_json(Source, Opts). +encode(Source, Opts) -> jsx_to_json:to_json(Source, Opts). -%% old api, alias for to_json/x +%% old api, alias for encode/x -term_to_json(Source) -> to_json(Source, []). - -term_to_json(Source, Opts) -> to_json(Source, Opts). +to_json(Source) -> encode(Source, []). +to_json(Source, Opts) -> encode(Source, Opts). +term_to_json(Source) -> encode(Source, []). +term_to_json(Source, Opts) -> encode(Source, Opts). -spec format(Source::json_text()) -> json_text(). @@ -81,18 +83,19 @@ minify(Source) -> format(Source, []). prettify(Source) -> format(Source, [space, {indent, 2}]). --spec to_term(Source::json_text()) -> json_term(). --spec to_term(Source::json_text(), Opts::jsx_to_term:opts()) -> json_term(). +-spec decode(Source::json_text()) -> json_term(). +-spec decode(Source::json_text(), Opts::jsx_to_term:opts()) -> json_term(). -to_term(Source) -> to_term(Source, []). +decode(Source) -> decode(Source, []). -to_term(Source, Opts) -> jsx_to_term:to_term(Source, Opts). +decode(Source, Opts) -> jsx_to_term:to_term(Source, Opts). %% old api, alias for to_term/x -json_to_term(Source) -> to_term(Source, []). - -json_to_term(Source, Opts) -> to_term(Source, Opts). +to_term(Source) -> decode(Source, []). +to_term(Source, Opts) -> decode(Source, Opts). +json_to_term(Source) -> decode(Source, []). +json_to_term(Source, Opts) -> decode(Source, Opts). -spec is_json(Source::any()) -> true | false. @@ -242,7 +245,7 @@ jsx_decoder_gen([Test|Rest]) -> JSX = proplists:get_value(jsx, Test), Flags = proplists:get_value(jsx_flags, Test, []), {generator, fun() -> - [{Name, ?_assertEqual(decode(JSON, Flags), JSX)}, + [{Name, ?_assertEqual(test_decode(JSON, Flags), JSX)}, {Name ++ " (incremental)", ?_assertEqual(incremental_decode(JSON, Flags), JSX) } @@ -282,7 +285,7 @@ parse_tests([], _Dir, Acc) -> Acc. -decode(JSON, Flags) -> +test_decode(JSON, Flags) -> try case (jsx_decoder:decoder(?MODULE, [], Flags))(JSON) of {incomplete, More} -> From d25ff624f7a9d8e40d0219f82e84bb1a5a41cff1 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:16:44 -0700 Subject: [PATCH 21/52] add CHANGES --- CHANGES.markdown | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 CHANGES.markdown diff --git a/CHANGES.markdown b/CHANGES.markdown new file mode 100644 index 0000000..5533ae9 --- /dev/null +++ b/CHANGES.markdown @@ -0,0 +1,39 @@ +v1.3 + +* introduces `prettify/1` and `minify/1`, shortcuts for `format/2` +* introduce `encode/1,2` and `decode/1,2` as primary interface to built in tokenizers. `to_json/1,2` and `to_term/1,2` remain accessible but not advertised +* new `parser/3` function exposes syntactic analysis stage for use with user defined tokenizers +* improved documentation + +v1.2.1 + +* fixes incorrect handling of escaped forward slashes, thanks bob ippolito + +v1.2 + +* rewritten handling of string escaping to improve performance +* `pre_encode` and `post_decode` hooks, see README +* `relax` option + +v1.1.2 + +* add `dirty_strings` option +* more fixes for invalid unicode in strings + +v1.1.1 + +* fixes bug regarding handling of invalid unicode in R14Bxx + +v1.1 + +* improvements to string escaping and json generation performance + +v1.0.2 + +* fixes to function specs +* rewritten README +* `comments` option + +v1.0.1 + +* rebar fix \ No newline at end of file From 44d1c196a2b567ae14cab971ba99575003bdd922 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:17:36 -0700 Subject: [PATCH 22/52] bump to v1.3 --- README.markdown | 2 +- src/jsx.app.src | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.markdown b/README.markdown index 6700f8f..8e45d8a 100644 --- a/README.markdown +++ b/README.markdown @@ -1,4 +1,4 @@ -# jsx (v1.2.1) # +# jsx (v1.3) # an erlang application for consuming, producing and manipulating [json][json]. inspired by [yajl][yajl] diff --git a/src/jsx.app.src b/src/jsx.app.src index 59cad9b..ccbcc93 100644 --- a/src/jsx.app.src +++ b/src/jsx.app.src @@ -1,7 +1,7 @@ {application, jsx, [ {description, "a streaming, evented json parsing toolkit"}, - {vsn, "1.2.1"}, + {vsn, "1.3"}, {modules, [ jsx, jsx_encoder, From 656a2777d8fe49d78ef33a7ee027aa0f81b7596c Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:25:07 -0700 Subject: [PATCH 23/52] README formatting --- README.markdown | 115 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 8e45d8a..750c566 100644 --- a/README.markdown +++ b/README.markdown @@ -110,7 +110,9 @@ to prettify some json jsx is an erlang application for consuming, producing and manipulating [json][json] + jsx strives to be quick but complete, correct but pragmatic, and approachable but powerful. it handles json as encountered in common use with extensions to handle even less common usage. comments, strings quoted with `'` instead of `"`, json fragments and json streams, and invalid utf8 are all supported + jsx is a collection of functions useful when dealing with json in erlang. jsx is also a json compiler with separate parsing and semantic analysis stages. new, custom, semantic analysis steps are relatively simple to add. the syntactic analysis stage is also exposed separately for use with user defined tokenizers @@ -125,36 +127,57 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is `object` | `[{}]` and `[{binary() OR atom(), JSON}]` * json + json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that, they are detailed in the options section below + jsx also supports json fragments; valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint + * erlang + only the erlang terms in the table above are supported. non supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly + * numbers + javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors + when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` * strings + the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` + the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates that appear otherwise are an error + json string escapes of the form `\uXXXX` will be converted to their equivalent codepoint during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be + in the interests of pragmatism, there is an option for looser parsing, see options below + all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see options below) + this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings + * true, false and null + the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise + * arrays + json arrays are represented with erlang lists of json values as described in this section + * objects + json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values ### incomplete input ### jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory + however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see below ## data types ## + ```erlang json_term() = [json_term()] | [{binary() | atom(), json_term()}] @@ -165,11 +188,13 @@ json_term() = [json_term()] | float() | binary() ``` + the erlang representation of json. binaries should be `utf8` encoded (but see below in options) ```erlang json_text() = binary() ``` + a utf8 encoded binary containing a json string ```erlang @@ -195,6 +220,7 @@ token() = start_object | null | end_json ``` + the internal representation used during syntactic analysis ```erlang @@ -211,6 +237,7 @@ event() = start_object | {literal, null} | end_json ``` + the internal representation used during semantic analysis ```erlang @@ -227,33 +254,57 @@ option() = replaced_bad_utf8 | relax | explicit_end ``` + jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these, see individual function documentation for details + * `replaced_bad_utf8` + json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. if this option is present attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec. this applies both to malformed unicode and disallowed codepoints + * `escaped_forward_slashes` + json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding json directly into a html or xml document + * `single_quoted_strings` + some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to deliminate keys and strings. this option allows json containing single quotes as structural (deliminator) characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings deliminated by single quotes + double quotes must ALWAYS be escaped, regardless of what kind of quotes deliminate the string they are found in + the parser will never emit json with keys or strings deliminated by single quotes + * `unescaped_jsonp` + javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping + * `comments` + json has no official comments but some parsers allow c style comments. this flag allows comments (both `// ...` and `/* ... */` style) anywhere whitespace is allowed + * `escaped_strings` + by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint and encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes + * `dirty_strings` + json escaping is lossy, it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping + * `ignored_bad_escapes` + during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that if you combine this option with `escaped_strings` the escape character itself will be escaped + * `explicit_end` + this option treats all exhausted inputs as incomplete, as explained below. the parser will not attempt to return a final state until the function is called with the value `end_stream` + * `relax` + relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can ## exports ## + ### encoder/3, decoder/3 and parser/3 ### ```erlang @@ -270,12 +321,18 @@ parser(Module, Args, Opts) -> Fun((Tokens) -> any()) ``` jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages. (actually, semantic analysis takes place during syntactic analysis, for efficiency) included are two tokenizers, one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user defined tokenizers (`parser/3`) + all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails + `Module` is the name of the callback module + `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state + `Opts` are detailed [above](#data_types) + see [below](#callback_exports) for details on the callback module + ### decode/1,2 ### ```erlang @@ -288,17 +345,23 @@ decode(JSON, Opts) -> Term Label = binary | atom | existing_atom F = fun((any()) -> any()) ``` + `decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) + the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms, and results in a badarg error if keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom`, except it will not add new atoms to the atom table + `{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: + ```erlang 1> F = fun(V) when is_list(V) -> V; (V) -> false end. 2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). [{<<"a list">>, [false, false, false]}] ``` + if more than one decoder is declared a `badarg` error exception will result raises a `badarg` error exception if input is not valid json + ### encode/1,2 ### ```erlang @@ -311,18 +374,26 @@ encode(Term, Opts) -> JSON F = fun((any()) -> any()) N = pos_integer() ``` + `encode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) + `{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: + ```erlang 1> F = fun(V) when is_list(V) -> V; (V) -> false end. 2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]. <<"{\"a list\": [false, false, false]}">> ``` + if more than one decoder is declared a `badarg` error exception will result + the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` + the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` + raises a `badarg` error exception if input is not a valid erlang representation of json + ### format/1,2 ### ```erlang @@ -333,11 +404,16 @@ format(JSON, Opts) -> JSON Opts = [option() | space | {space, N} | indent | {indent, N}] N = pos_integer() ``` + `format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` + the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` + the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` + raises a `badarg` error exception if input is not valid json + ### minify/1 ### ```erlang @@ -345,9 +421,12 @@ minify(JSON) -> JSON JSON = json_text() ``` + `minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace + raises a `badarg` error exception if input is not valid json + ### prettify/1 ### ```erlang @@ -355,9 +434,12 @@ prettify(JSON) -> JSON JSON = json_text() ``` + `prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` + raises a `badarg` error exception if input is not valid json + ### is_json/1,2 ### ```erlang @@ -367,9 +449,12 @@ is_json(MaybeJSON, Opts) -> true | false MaybeJSON = any() Opts = options() ``` + returns true if input is a valid json text, false if not + what exactly constitutes valid json may be altered per [options](#data_types) + ### is_term/1,2 ### ```erlang @@ -379,7 +464,9 @@ is_term(MaybeJSON, Opts) -> true | false MaybeJSON = any() Opts = options() ``` + returns true if input is a valid erlang representation of json, false if not + what exactly constitutes valid json may be altered per [options](#data_types) @@ -395,6 +482,7 @@ Module:init(Args) -> InitialState Args = any() InitialState = any() ``` + whenever `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` ### Module:handle_event/2 ### @@ -406,30 +494,55 @@ Module:handle_event(Event, State) -> NewState State = any() NewState = any() ``` + semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: + * `start_object` + the start of a json object + * `end_object` + the end of a json object + * `start_array` + the start of a json array + * `end_array` + the end of a json array + * `{key, binary()}` + a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions + * `{string, binary()}` + a json string. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions + * `{integer, integer()}` + an erlang integer (bignum) + * `{float, float()}` + an erlang float + * `{literal, true}` + the atom `true` + * `{literal, false}` + the atom `false` + * `{literal, null}` + the atom `null` + * `end_json` + this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis @@ -443,4 +556,4 @@ jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaie [rebar]: https://github.com/basho/rebar [sinan]: https://github.com/erlware/sinan [meck]: https://github.com/eproxus/meck -[rfc4627]: http://tools.ietf.org/html/rfc4627 +[rfc4627]: http://tools.ietf.org/html/rfc4627 \ No newline at end of file From c15623a767e655dfb119e1441077417958587530 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:30:02 -0700 Subject: [PATCH 24/52] remove unused variables --- src/jsx_encoder.erl | 2 +- src/jsx_parser.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/jsx_encoder.erl b/src/jsx_encoder.erl index d587725..3b52f67 100644 --- a/src/jsx_encoder.erl +++ b/src/jsx_encoder.erl @@ -107,7 +107,7 @@ fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); fix_key(Key) when is_binary(Key) -> Key. -clean_string(Bin, Opts=#opts{dirty_strings=true}) -> Bin; +clean_string(Bin, #opts{dirty_strings=true}) -> Bin; clean_string(Bin, Opts) -> case Opts#opts.replaced_bad_utf8 orelse Opts#opts.escaped_strings of true -> clean(Bin, [], Opts) diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index a290e3b..5b898bc 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -161,7 +161,7 @@ fix_key(Key) when is_atom(Key) -> fix_key(atom_to_binary(Key, utf8)); fix_key(Key) when is_binary(Key) -> Key. -clean_string(Bin, Opts=#opts{dirty_strings=true}) -> Bin; +clean_string(Bin, #opts{dirty_strings=true}) -> Bin; clean_string(Bin, Opts) -> case Opts#opts.replaced_bad_utf8 orelse Opts#opts.escaped_strings of true -> clean(Bin, [], Opts) From a46d25129a9c0a5914d73b3bd29d45a9659db620 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:46:17 -0700 Subject: [PATCH 25/52] fix typo in README example --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 750c566..677ee4a 100644 --- a/README.markdown +++ b/README.markdown @@ -381,7 +381,7 @@ encode(Term, Opts) -> JSON ```erlang 1> F = fun(V) when is_list(V) -> V; (V) -> false end. -2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]. +2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]). <<"{\"a list\": [false, false, false]}">> ``` From 85debfd1cae323fb0ba005e41959ac74cacba3b4 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 22:51:23 -0700 Subject: [PATCH 26/52] minor README tweaks --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 677ee4a..48fdcee 100644 --- a/README.markdown +++ b/README.markdown @@ -472,7 +472,7 @@ what exactly constitutes valid json may be altered per [options](#data_types) ## callback exports ## -the following functions should be exported from a `jsx` callback module +the following functions should be exported from a jsx callback module ### Module:init/1 ### From b594499e896d847f0d4e48359875c9d57694c7cd Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Wed, 23 May 2012 23:04:21 -0700 Subject: [PATCH 27/52] more minor README tweaks --- README.markdown | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.markdown b/README.markdown index 48fdcee..15ec200 100644 --- a/README.markdown +++ b/README.markdown @@ -358,7 +358,8 @@ the option `labels` controls how keys are converted from json to erlang terms. ` [{<<"a list">>, [false, false, false]}] ``` -if more than one decoder is declared a `badarg` error exception will result +if more than one post decoder is declared a `badarg` error exception will result + raises a `badarg` error exception if input is not valid json @@ -385,7 +386,7 @@ encode(Term, Opts) -> JSON <<"{\"a list\": [false, false, false]}">> ``` -if more than one decoder is declared a `badarg` error exception will result +if more than one pre encoder is declared a `badarg` error exception will result the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` From fe0ba760695f4bf10a7dc8aa6d3c292820f67e30 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 00:33:45 -0700 Subject: [PATCH 28/52] technical proofreading courtesy minn --- README.markdown | 61 +++++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/README.markdown b/README.markdown index 15ec200..dace707 100644 --- a/README.markdown +++ b/README.markdown @@ -128,30 +128,31 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is * json -json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that, they are detailed in the options section below +json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that. they are detailed in the [options](#data_types) section below -jsx also supports json fragments; valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint +jsx also supports json fragments: valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint * erlang -only the erlang terms in the table above are supported. non supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly +only the erlang terms in the table above are supported. non-supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly * numbers javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` + * strings -the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings and other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` +the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings. other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` -the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates that appear otherwise are an error +the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors -json string escapes of the form `\uXXXX` will be converted to their equivalent codepoint during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be +json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be -in the interests of pragmatism, there is an option for looser parsing, see options below +in the interest of pragmatism there is an option for looser parsing. see [options](#data_types) below -all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see options below) +all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see [options](#data_types) below) this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings @@ -165,14 +166,14 @@ json arrays are represented with erlang lists of json values as described in thi * objects -json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent using the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values +json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values ### incomplete input ### -jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor or to parse large json texts without needing to keep them entirely in memory +jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory -however, it is important to recognize that jsx is greedy by default. if input is exhausted and the json text is not unambiguously incomplete jsx will consider the parsing complete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour, see below +however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see [options](#data_types) ## data types ## @@ -255,23 +256,23 @@ option() = replaced_bad_utf8 | explicit_end ``` -jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these, see individual function documentation for details +jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details * `replaced_bad_utf8` -json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. if this option is present attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec. this applies both to malformed unicode and disallowed codepoints +json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints * `escaped_forward_slashes` -json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding, you may want to use this if you are embedding json directly into a html or xml document +json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document * `single_quoted_strings` -some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to deliminate keys and strings. this option allows json containing single quotes as structural (deliminator) characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings deliminated by single quotes +some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes -double quotes must ALWAYS be escaped, regardless of what kind of quotes deliminate the string they are found in +double quotes must ALWAYS be escaped, regardless of what kind of quotes delimit the string they are found in -the parser will never emit json with keys or strings deliminated by single quotes +the parser will never emit json with keys or strings delimited by single quotes * `unescaped_jsonp` @@ -279,23 +280,23 @@ javascript interpreters treat the codepoints `u+2028` and `u+2029` as significan * `comments` -json has no official comments but some parsers allow c style comments. this flag allows comments (both `// ...` and `/* ... */` style) anywhere whitespace is allowed +json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) * `escaped_strings` -by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint and encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes +by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes * `dirty_strings` -json escaping is lossy, it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping +json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping * `ignored_bad_escapes` -during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that if you combine this option with `escaped_strings` the escape character itself will be escaped +during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped * `explicit_end` -this option treats all exhausted inputs as incomplete, as explained below. the parser will not attempt to return a final state until the function is called with the value `end_stream` +this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` * `relax` @@ -320,7 +321,7 @@ parser(Module, Args, Opts) -> Fun((Tokens) -> any()) Tokens = tokens() ``` -jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages. (actually, semantic analysis takes place during syntactic analysis, for efficiency) included are two tokenizers, one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user defined tokenizers (`parser/3`) +jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages (actually, semantic analysis takes place during syntactic analysis, for efficiency). included are two tokenizers; one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user-defined tokenizers (`parser/3`) all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails @@ -348,7 +349,7 @@ decode(JSON, Opts) -> Term `decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) -the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms, and results in a badarg error if keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom`, except it will not add new atoms to the atom table +the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms and results in a badarg error if the keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom` except it will not add new atoms to the atom table `{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: @@ -358,7 +359,7 @@ the option `labels` controls how keys are converted from json to erlang terms. ` [{<<"a list">>, [false, false, false]}] ``` -if more than one post decoder is declared a `badarg` error exception will result +declaring more than one post-decoder will result in a `badarg` error exception raises a `badarg` error exception if input is not valid json @@ -386,7 +387,7 @@ encode(Term, Opts) -> JSON <<"{\"a list\": [false, false, false]}">> ``` -if more than one pre encoder is declared a `badarg` error exception will result +declaring more than one pre-encoder will result in a `badarg` error exception the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` @@ -484,7 +485,7 @@ Module:init(Args) -> InitialState InitialState = any() ``` -whenever `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` +whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` ### Module:handle_event/2 ### @@ -516,11 +517,11 @@ the end of a json array * `{key, binary()}` -a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions +a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions * `{string, binary()}` -a json string. it will usually be a `utf8` encoded binary, see [options](#data_types) for possible exceptions +a json string. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions * `{integer, integer()}` @@ -549,7 +550,7 @@ this event is emitted when syntactic analysis is completed. you should do any cl ## acknowledgements ## -jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides, alex kropivny, steve strong, michael truog and dmitry kolesnikov +jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides, alex kropivny, steve strong, michael truog, dmitry kolesnikov and minn thant [json]: http://json.org [yajl]: http://lloyd.github.com/yajl From 227a7868999bcf7a86f5f31fab637aa0f895467d Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 08:33:30 -0700 Subject: [PATCH 29/52] clearer language regarding sinan/rebar --- README.markdown | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/README.markdown b/README.markdown index dace707..2973151 100644 --- a/README.markdown +++ b/README.markdown @@ -6,7 +6,7 @@ copyright 2011, 2012 alisdair sullivan jsx is released under the terms of the [MIT][MIT] license -jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain +jsx may be built using either [sinan][sinan] or [rebar][rebar] [![Build Status](https://secure.travis-ci.org/talentdeficit/jsx.png?branch=develop)](http://travis-ci.org/talentdeficit/jsx) @@ -25,26 +25,16 @@ jsx uses [sinan][sinan] or [rebar][rebar] for it's build chain ## quickstart ## -to build the library +to build the library and run tests ```bash tanga:jsx alisdair$ sinan build +tanga:jsx alisdair$ sinan -r tests eunit ``` or ```bash tanga:jsx alisdair$ rebar compile -``` - -to run tests - -```bash -tanga:jsx alisdair$ sinan -r tests eunit -``` - -or - -```bash tanga:jsx alisdair$ rebar eunit ``` From aa3c8acf03ce7502f481c800bcdb503b91a7e19c Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 08:34:52 -0700 Subject: [PATCH 30/52] replace all incidences of `to_json` and `to_term` with `encode` and `decode` --- README.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.markdown b/README.markdown index 2973151..59a45b8 100644 --- a/README.markdown +++ b/README.markdown @@ -41,18 +41,18 @@ tanga:jsx alisdair$ rebar eunit to convert a utf8 binary containing a json string into an erlang term ```erlang -1> jsx:to_term(<<"{\"library\": \"jsx\", \"awesome\": true}">>). +1> jsx:decode(<<"{\"library\": \"jsx\", \"awesome\": true}">>). [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] -2> jsx:to_term(<<"[\"a\",\"list\",\"of\",\"words\"]">>). +2> jsx:decode(<<"[\"a\",\"list\",\"of\",\"words\"]">>). [<<"a">>, <<"list">>, <<"of">>, <<"words">>] ``` to convert an erlang term into a utf8 binary containing a json string ```erlang -1> jsx:to_json([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). +1> jsx:encode([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). <<"{\"library\": \"jsx\", \"awesome\": true}">> -2> jsx:to_json([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). +2> jsx:encode([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). <<"[\"a\",\"list\",\"of\",\"words\"]">> ``` From 782e6573ef24e81cc6003bbb46359b0788c25c3c Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 22:05:21 -0700 Subject: [PATCH 31/52] add links to contributors github accounts --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 59a45b8..0981177 100644 --- a/README.markdown +++ b/README.markdown @@ -540,7 +540,7 @@ this event is emitted when syntactic analysis is completed. you should do any cl ## acknowledgements ## -jsx wouldn't be what it is without the contributions of paul davis, lloyd hilaiel, john engelhart, bob ippolito, fernando benavides, alex kropivny, steve strong, michael truog, dmitry kolesnikov and minn thant +jsx wouldn't be what it is without the contributions of [paul davis](https://github.com/davisp), [lloyd hilaiel](https://github.com/lloyd), [john engelhart](https://github.com/johnezang), [bob ippolito](https://github.com/etrepum), [fernando benavides](https://github.com/elbrujohalcon), [alex kropivny](https://github.com/amtal), [steve strong](https://github.com/srstrong), [michael truog](https://github.com/okeuday), [dmitry kolesnikov](https://github.com/fogfish) and [minn thant](https://github.com/emptytea) [json]: http://json.org [yajl]: http://lloyd.github.com/yajl From 192d4757d312487aaf7a8afe3f39c9ab2d27b026 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 22:30:37 -0700 Subject: [PATCH 32/52] fix typo in contributors --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 0981177..c78c0b8 100644 --- a/README.markdown +++ b/README.markdown @@ -540,7 +540,7 @@ this event is emitted when syntactic analysis is completed. you should do any cl ## acknowledgements ## -jsx wouldn't be what it is without the contributions of [paul davis](https://github.com/davisp), [lloyd hilaiel](https://github.com/lloyd), [john engelhart](https://github.com/johnezang), [bob ippolito](https://github.com/etrepum), [fernando benavides](https://github.com/elbrujohalcon), [alex kropivny](https://github.com/amtal), [steve strong](https://github.com/srstrong), [michael truog](https://github.com/okeuday), [dmitry kolesnikov](https://github.com/fogfish) and [minn thant](https://github.com/emptytea) +jsx wouldn't be what it is without the contributions of [paul davis](https://github.com/davisp), [lloyd hilaiel](https://github.com/lloyd), [john engelhart](https://github.com/johnezang), [bob ippolito](https://github.com/etrepum), [fernando benavides](https://github.com/elbrujohalcon), [alex kropivny](https://github.com/amtal), [steve strong](https://github.com/srstrong), [michael truog](https://github.com/okeuday), [dmitry kolesnikov](https://github.com/fogfish) and [emptytea](https://github.com/emptytea) [json]: http://json.org [yajl]: http://lloyd.github.com/yajl From f0950dc4c9577ff30f869a5b18f2a47a142130ee Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 22:34:30 -0700 Subject: [PATCH 33/52] typo in parser tests --- src/jsx_parser.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index 5b898bc..af549e5 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -602,7 +602,7 @@ encode_test_() -> {"naked float - full rep", ?_assertEqual( encode([{float, 1.23}, end_json]), [{float, 1.23}, end_json] )}, - {"naked literal - simple red", ?_assertEqual( + {"naked literal - simple rep", ?_assertEqual( encode([null, end_json]), [{literal, null}, end_json] )}, {"naked literal - full rep", ?_assertEqual( From d081bd2539b51b7c7ffafe84e3ffdd55ec3dc692 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Thu, 24 May 2012 23:07:03 -0700 Subject: [PATCH 34/52] change all stray badjson to badarg --- priv/test_cases/bad_low_surrogate.test | 2 +- priv/test_cases/bad_naked_number.test | 2 +- priv/test_cases/unbalanced_array.test | 2 +- priv/test_cases/unpaired_surrogate.test | 2 +- src/jsx.erl | 6 ++-- src/jsx_decoder.erl | 14 +++++----- src/jsx_encoder.erl | 15 +++++----- src/jsx_parser.erl | 37 +++++++++++++++++++------ 8 files changed, 51 insertions(+), 29 deletions(-) diff --git a/priv/test_cases/bad_low_surrogate.test b/priv/test_cases/bad_low_surrogate.test index 9f36e55..7d85b62 100644 --- a/priv/test_cases/bad_low_surrogate.test +++ b/priv/test_cases/bad_low_surrogate.test @@ -1,3 +1,3 @@ {name, "bad_low_surrogate"}. -{jsx, {error, badjson}}. +{jsx, {error, badarg}}. {json, "bad_low_surrogate.json"}. diff --git a/priv/test_cases/bad_naked_number.test b/priv/test_cases/bad_naked_number.test index 2781ebb..b620a6e 100644 --- a/priv/test_cases/bad_naked_number.test +++ b/priv/test_cases/bad_naked_number.test @@ -1,3 +1,3 @@ {name, "bad naked number"}. -{jsx, {error, badjson}}. +{jsx, {error, badarg}}. {json, "bad_naked_number.json"}. diff --git a/priv/test_cases/unbalanced_array.test b/priv/test_cases/unbalanced_array.test index c271c0d..41a2a5e 100644 --- a/priv/test_cases/unbalanced_array.test +++ b/priv/test_cases/unbalanced_array.test @@ -1,3 +1,3 @@ {name, "unbalanced array"}. -{jsx, {error, badjson}}. +{jsx, {error, badarg}}. {json, "unbalanced_array.json"}. diff --git a/priv/test_cases/unpaired_surrogate.test b/priv/test_cases/unpaired_surrogate.test index e2da5c1..972a34e 100644 --- a/priv/test_cases/unpaired_surrogate.test +++ b/priv/test_cases/unpaired_surrogate.test @@ -1,3 +1,3 @@ {name, "unpaired_surrogate"}. -{jsx, {error, badjson}}. +{jsx, {error, badarg}}. {json, "unpaired_surrogate.json"}. diff --git a/src/jsx.erl b/src/jsx.erl index 56d3e3f..cb389d8 100644 --- a/src/jsx.erl +++ b/src/jsx.erl @@ -290,20 +290,20 @@ test_decode(JSON, Flags) -> case (jsx_decoder:decoder(?MODULE, [], Flags))(JSON) of {incomplete, More} -> case More(<<" ">>) of - {incomplete, _} -> {error, badjson} + {incomplete, _} -> {error, badarg} ; Events -> Events end ; Events -> Events end catch - error:badarg -> {error, badjson} + error:badarg -> {error, badarg} end. incremental_decode(<>, Flags) -> P = jsx_decoder:decoder(?MODULE, [], Flags ++ [explicit_end]), try incremental_decode_loop(P(C), Rest) - catch error:badarg -> {error, badjson} + catch error:badarg -> {error, badarg} end. incremental_decode_loop({incomplete, More}, <<>>) -> diff --git a/src/jsx_decoder.erl b/src/jsx_decoder.erl index 5e72394..e42cff7 100644 --- a/src/jsx_decoder.erl +++ b/src/jsx_decoder.erl @@ -1245,7 +1245,7 @@ decode(JSON, Opts) -> try (decoder(jsx, [], Opts))(JSON) catch - error:badarg -> {error, badjson} + error:badarg -> {error, badarg} end. @@ -1482,7 +1482,7 @@ escapes_test_() -> noncharacters_test_() -> [ - {"noncharacters - badjson", + {"noncharacters - badarg", ?_assert(check_bad(noncharacters())) }, {"noncharacters - replaced", @@ -1493,7 +1493,7 @@ noncharacters_test_() -> extended_noncharacters_test_() -> [ - {"extended noncharacters - badjson", + {"extended noncharacters - badarg", ?_assert(check_bad(extended_noncharacters())) }, {"extended noncharacters - replaced", @@ -1504,7 +1504,7 @@ extended_noncharacters_test_() -> surrogates_test_() -> [ - {"surrogates - badjson", + {"surrogates - badarg", ?_assert(check_bad(surrogates())) }, {"surrogates - replaced", @@ -1515,7 +1515,7 @@ surrogates_test_() -> control_test_() -> [ - {"control characters - badjson", + {"control characters - badarg", ?_assert(check_bad(control_characters())) } ]. @@ -1523,7 +1523,7 @@ control_test_() -> reserved_test_() -> [ - {"reserved noncharacters - badjson", + {"reserved noncharacters - badarg", ?_assert(check_bad(reserved_space())) }, {"reserved noncharacters - replaced", @@ -1553,7 +1553,7 @@ good_characters_test_() -> check_bad(List) -> - [] == lists:dropwhile(fun({_, {error, badjson}}) -> true ; (_) -> false end, + [] == lists:dropwhile(fun({_, {error, badarg}}) -> true ; (_) -> false end, check(List, [], []) ). diff --git a/src/jsx_encoder.erl b/src/jsx_encoder.erl index 3b52f67..67e2642 100644 --- a/src/jsx_encoder.erl +++ b/src/jsx_encoder.erl @@ -495,7 +495,8 @@ maybe_replace(X, #opts{escaped_strings=true}) when X < 32 -> lists:reverse(jsx_utils:json_escape_sequence(X)); maybe_replace(noncharacter, #opts{replaced_bad_utf8=true}) -> [16#fffd]; maybe_replace(surrogate, #opts{replaced_bad_utf8=true}) -> [16#fffd]; -maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]. +maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]; +maybe_replace(_, _) -> erlang:error(badarg). -ifdef(TEST). @@ -707,7 +708,7 @@ encode(Term) -> encode(Term, []). encode(Term, Opts) -> try (encoder(jsx, [], Opts))(Term) - catch _:_ -> {error, badjson} + catch _:_ -> {error, badarg} end. @@ -903,7 +904,7 @@ escapes_test_() -> surrogates_test_() -> [ - {"surrogates - badjson", + {"surrogates - badarg", ?_assert(check_bad(surrogates())) }, {"surrogates - replaced", @@ -940,7 +941,7 @@ good_characters_test_() -> reserved_test_() -> [ - {"reserved noncharacters - badjson", + {"reserved noncharacters - badarg", ?_assert(check_bad(reserved_space())) }, {"reserved noncharacters - replaced", @@ -951,7 +952,7 @@ reserved_test_() -> noncharacters_test_() -> [ - {"noncharacters - badjson", + {"noncharacters - badarg", ?_assert(check_bad(noncharacters())) }, {"noncharacters - replaced", @@ -962,7 +963,7 @@ noncharacters_test_() -> extended_noncharacters_test_() -> [ - {"extended noncharacters - badjson", + {"extended noncharacters - badarg", ?_assert(check_bad(extended_noncharacters())) }, {"extended noncharacters - replaced", @@ -972,7 +973,7 @@ extended_noncharacters_test_() -> check_bad(List) -> - [] == lists:dropwhile(fun({_, {error, badjson}}) -> true ; (_) -> false end, + [] == lists:dropwhile(fun({_, {error, badarg}}) -> true ; (_) -> false end, check(List, [], []) ). diff --git a/src/jsx_parser.erl b/src/jsx_parser.erl index af549e5..eef70c1 100644 --- a/src/jsx_parser.erl +++ b/src/jsx_parser.erl @@ -549,7 +549,8 @@ maybe_replace(X, #opts{escaped_strings=true}) when X < 32 -> lists:reverse(jsx_utils:json_escape_sequence(X)); maybe_replace(noncharacter, #opts{replaced_bad_utf8=true}) -> [16#fffd]; maybe_replace(surrogate, #opts{replaced_bad_utf8=true}) -> [16#fffd]; -maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]. +maybe_replace(badutf, #opts{replaced_bad_utf8=true}) -> [16#fffd]; +maybe_replace(_, _) -> erlang:error(badarg). -ifdef(TEST). @@ -573,9 +574,9 @@ incomplete_test_() -> encode(Term) -> encode(Term, []). -encode(Term, Opts) -> +encode(Term, Opts) -> try (parser(jsx, [], Opts))(Term) - catch _:_ -> {error, badjson} + catch error:badarg -> {error, badarg} end. @@ -707,6 +708,26 @@ encode_test_() -> )} ]. +encode_failures_test_() -> + [ + {"unwrapped values", ?_assertEqual( + {error, badarg}, + encode([{string, <<"a string\n">>}, {string, <<"a string\n">>}, end_json]) + )}, + {"unbalanced array", ?_assertEqual( + {error, badarg}, + encode([start_array, end_array, end_array, end_json]) + )}, + {"premature finish", ?_assertEqual( + {error, badarg}, + encode([start_object, {key, <<"key">>, start_array, end_json}]) + )}, + {"really premature finish", ?_assertEqual( + {error, badarg}, + encode([end_json]) + )} + ]. + xcode(Bin) -> xcode(Bin, #opts{}). @@ -938,7 +959,7 @@ escapes_test_() -> surrogates_test_() -> [ - {"surrogates - badjson", + {"surrogates - badarg", ?_assert(check_bad(surrogates())) }, {"surrogates - replaced", @@ -975,7 +996,7 @@ good_characters_test_() -> reserved_test_() -> [ - {"reserved noncharacters - badjson", + {"reserved noncharacters - badarg", ?_assert(check_bad(reserved_space())) }, {"reserved noncharacters - replaced", @@ -986,7 +1007,7 @@ reserved_test_() -> noncharacters_test_() -> [ - {"noncharacters - badjson", + {"noncharacters - badarg", ?_assert(check_bad(noncharacters())) }, {"noncharacters - replaced", @@ -997,7 +1018,7 @@ noncharacters_test_() -> extended_noncharacters_test_() -> [ - {"extended noncharacters - badjson", + {"extended noncharacters - badarg", ?_assert(check_bad(extended_noncharacters())) }, {"extended noncharacters - replaced", @@ -1007,7 +1028,7 @@ extended_noncharacters_test_() -> check_bad(List) -> - [] == lists:dropwhile(fun({_, {error, badjson}}) -> true ; (_) -> false end, + [] == lists:dropwhile(fun({_, {error, badarg}}) -> true ; (_) -> false end, check(List, [], []) ). From a78a6e28d8026b91cd03c4a860ce46dd1a8e5d08 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 07:12:43 -0700 Subject: [PATCH 35/52] experiment for readme formatting --- README.markdown | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.markdown b/README.markdown index c78c0b8..8a09152 100644 --- a/README.markdown +++ b/README.markdown @@ -489,13 +489,9 @@ Module:handle_event(Event, State) -> NewState semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: -* `start_object` +* `start_object`

the start of a json object

-the start of a json object - -* `end_object` - -the end of a json object +* `end_object`

the end of a json object

* `start_array` From 228910caa97b7ccadd4c103d6faf61588d8f99e7 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 09:19:32 -0700 Subject: [PATCH 36/52] attempt #2 at better list formatting --- README.markdown | 115 ++++++++++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 58 deletions(-) diff --git a/README.markdown b/README.markdown index 8a09152..509f4ae 100644 --- a/README.markdown +++ b/README.markdown @@ -25,75 +25,74 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] ## quickstart ## -to build the library and run tests +* to build the library and run tests -```bash -tanga:jsx alisdair$ sinan build -tanga:jsx alisdair$ sinan -r tests eunit -``` -or + ```bash + tanga:jsx alisdair$ sinan build + tanga:jsx alisdair$ sinan -r tests eunit + ``` + or + ```bash + tanga:jsx alisdair$ rebar compile + tanga:jsx alisdair$ rebar eunit + ``` -```bash -tanga:jsx alisdair$ rebar compile -tanga:jsx alisdair$ rebar eunit -``` +* to convert a utf8 binary containing a json string into an erlang term -to convert a utf8 binary containing a json string into an erlang term + ```erlang + 1> jsx:decode(<<"{\"library\": \"jsx\", \"awesome\": true}">>). + [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] + 2> jsx:decode(<<"[\"a\",\"list\",\"of\",\"words\"]">>). + [<<"a">>, <<"list">>, <<"of">>, <<"words">>] + ``` -```erlang -1> jsx:decode(<<"{\"library\": \"jsx\", \"awesome\": true}">>). -[{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] -2> jsx:decode(<<"[\"a\",\"list\",\"of\",\"words\"]">>). -[<<"a">>, <<"list">>, <<"of">>, <<"words">>] -``` +* to convert an erlang term into a utf8 binary containing a json string -to convert an erlang term into a utf8 binary containing a json string + ```erlang + 1> jsx:encode([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). + <<"{\"library\": \"jsx\", \"awesome\": true}">> + 2> jsx:encode([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). + <<"[\"a\",\"list\",\"of\",\"words\"]">> + ``` -```erlang -1> jsx:encode([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). -<<"{\"library\": \"jsx\", \"awesome\": true}">> -2> jsx:encode([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). -<<"[\"a\",\"list\",\"of\",\"words\"]">> -``` +* to check if a binary or a term is valid json -to check if a binary or a term is valid json + ```erlang + 1> jsx:is_json(<<"[\"this is json\"]">>). + true + 2> jsx:is_json("[\"this is not\"]"). + false + 3> jsx:is_term([<<"this is a term">>]). + true + 4> jsx:is_term(["this is not"]). + false + ``` -```erlang -1> jsx:is_json(<<"[\"this is json\"]">>). -true -2> jsx:is_json("[\"this is not\"]"). -false -3> jsx:is_term([<<"this is a term">>]). -true -4> jsx:is_term(["this is not"]). -false -``` +* to minify some json -to minify some json + ```erlang + 1> jsx:minify(<<"{ + \"a list\": [ + 1, + 2, + 3 + ] + }">>). + <<"{\"a list\":[1,2,3]}">> + ``` -```erlang -1> jsx:minify(<<"{ - \"a list\": [ - 1, - 2, - 3 - ] -}">>). -<<"{\"a list\":[1,2,3]}">> -``` +* to prettify some json -to prettify some json - -```erlang -1> jsx:prettify(<<"{\"a list\":[1,2,3]}">>). -<<"{ - \"a list\": [ - 1, - 2, - 3 - ] -}">> -``` + ```erlang + 1> jsx:prettify(<<"{\"a list\":[1,2,3]}">>). + <<"{ + \"a list\": [ + 1, + 2, + 3 + ] + }">> + ``` ## description ## From 9db3c0a71d5b48eb6a983e947b9d2477cb5c5429 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 09:30:54 -0700 Subject: [PATCH 37/52] more reformatting README --- README.markdown | 223 ++++++++++++++++++++++++------------------------ 1 file changed, 112 insertions(+), 111 deletions(-) diff --git a/README.markdown b/README.markdown index 509f4ae..fb5364e 100644 --- a/README.markdown +++ b/README.markdown @@ -115,47 +115,47 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is `array` | `[]` and `[JSON]` `object` | `[{}]` and `[{binary() OR atom(), JSON}]` -* json +* json -json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that. they are detailed in the [options](#data_types) section below + json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that. they are detailed in the [options](#data_types) section below -jsx also supports json fragments: valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint + jsx also supports json fragments: valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint -* erlang +* erlang -only the erlang terms in the table above are supported. non-supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly + only the erlang terms in the table above are supported. non-supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly -* numbers +* numbers -javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors + javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors -when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` + when converting from erlang to json, numbers are represented with their shortest representation that will round trip without loss of precision. this means that some floats may be superficially dissimilar (although functionally equivalent). for example, `1.0000000000000001` will be represented by `1.0` -* strings +* strings -the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings. other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` + the json [spec][rfc4627] is frustratingly vague on the exact details of json strings. json must be unicode, but no encoding is specified. javascript explicitly allows strings containing codepoints explicitly disallowed by unicode. json allows implementations to set limits on the content of strings. other implementations attempt to resolve this in various ways. this implementation, in default operation, only accepts strings that meet the constraints set out in the json spec (strings are sequences of unicode codepoints deliminated by `"` (`u+0022`) that may not contain control codes unless properly escaped with `\` (`u+005c`)) and that are encoded in `utf8` -the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors + the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors -json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be + json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be -in the interest of pragmatism there is an option for looser parsing. see [options](#data_types) below + in the interest of pragmatism there is an option for looser parsing. see [options](#data_types) below -all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see [options](#data_types) below) + all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see [options](#data_types) below) -this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings + this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings -* true, false and null +* true, false and null -the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise + the json primitives `true`, `false` and `null` are represented by the erlang atoms `true`, `false` and `null`. surprise -* arrays +* arrays -json arrays are represented with erlang lists of json values as described in this section + json arrays are represented with erlang lists of json values as described in this section -* objects +* objects -json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values + json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values ### incomplete input ### @@ -168,128 +168,129 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## data types ## -```erlang -json_term() = [json_term()] - | [{binary() | atom(), json_term()}] - | true - | false - | null - | integer() - | float() - | binary() -``` +* + ```erlang + json_term() = [json_term()] + | [{binary() | atom(), json_term()}] + | true + | false + | null + | integer() + | float() + | binary() + ``` -the erlang representation of json. binaries should be `utf8` encoded (but see below in options) +* the erlang representation of json. binaries should be `utf8` encoded (but see below in options) -```erlang -json_text() = binary() -``` + ```erlang + json_text() = binary() + ``` -a utf8 encoded binary containing a json string +* a utf8 encoded binary containing a json string -```erlang -tokens() = token() | [token()] + ```erlang + tokens() = token() | [token()] + + token() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | binary() + | {number, integer() | float()} + | {integer, integer()} + | {float, float()} + | integer() + | float() + | {literal, true} + | {literal, false} + | {literal, null} + | true + | false + | null + | end_json + ``` -token() = start_object - | end_object - | start_array - | end_array - | {key, binary()} - | {string, binary()} - | binary() - | {number, integer() | float()} - | {integer, integer()} - | {float, float()} - | integer() - | float() - | {literal, true} - | {literal, false} - | {literal, null} - | true - | false - | null - | end_json -``` +* the internal representation used during syntactic analysis -the internal representation used during syntactic analysis + ```erlang + event() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | {integer, integer()} + | {float, float()} + | {literal, true} + | {literal, false} + | {literal, null} + | end_json + ``` -```erlang -event() = start_object - | end_object - | start_array - | end_array - | {key, binary()} - | {string, binary()} - | {integer, integer()} - | {float, float()} - | {literal, true} - | {literal, false} - | {literal, null} - | end_json -``` +* the internal representation used during semantic analysis -the internal representation used during semantic analysis + ```erlang + options() = [option()] -```erlang -options() = [option()] + option() = replaced_bad_utf8 + | escaped_forward_slashes + | single_quoted_strings + | unescaped_jsonp + | comments + | escaped_strings + | dirty_strings + | ignored_bad_escapes + | relax + | explicit_end + ``` -option() = replaced_bad_utf8 - | escaped_forward_slashes - | single_quoted_strings - | unescaped_jsonp - | comments - | escaped_strings - | dirty_strings - | ignored_bad_escapes - | relax - | explicit_end -``` + jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details -jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details + - `replaced_bad_utf8` -* `replaced_bad_utf8` + json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints -json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints + - `escaped_forward_slashes` -* `escaped_forward_slashes` + json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document -json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document + - `single_quoted_strings` -* `single_quoted_strings` + some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes -some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes + double quotes must ALWAYS be escaped, regardless of what kind of quotes delimit the string they are found in -double quotes must ALWAYS be escaped, regardless of what kind of quotes delimit the string they are found in + the parser will never emit json with keys or strings delimited by single quotes -the parser will never emit json with keys or strings delimited by single quotes + - `unescaped_jsonp` -* `unescaped_jsonp` + javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping -javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping + - `comments` -* `comments` + json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) -json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) + - `escaped_strings` -* `escaped_strings` + by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes -by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes + - `dirty_strings` -* `dirty_strings` + json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping -json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping + - `ignored_bad_escapes` -* `ignored_bad_escapes` + during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped -during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped + - `explicit_end` -* `explicit_end` + this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` -this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` + - `relax` -* `relax` - -relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can + relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can ## exports ## From abe3e1958a3b1c8dda97088cbac10c0b0f396cfc Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 09:36:30 -0700 Subject: [PATCH 38/52] minor reformat fix in README --- README.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.markdown b/README.markdown index fb5364e..30c237b 100644 --- a/README.markdown +++ b/README.markdown @@ -168,8 +168,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## data types ## -* - ```erlang +* ```erlang json_term() = [json_term()] | [{binary() | atom(), json_term()}] | true From efcea75c437a998e41c8d2e14f51038830bc5a9e Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 10:14:20 -0700 Subject: [PATCH 39/52] moooore format fixes --- README.markdown | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/README.markdown b/README.markdown index 30c237b..8aee87f 100644 --- a/README.markdown +++ b/README.markdown @@ -167,8 +167,9 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## data types ## +* `json_term()` -* ```erlang + ```erlang json_term() = [json_term()] | [{binary() | atom(), json_term()}] | true @@ -179,13 +180,17 @@ however, it is important to recognize that jsx is greedy by default. jsx will co | binary() ``` -* the erlang representation of json. binaries should be `utf8` encoded (but see below in options) + the erlang representation of json. binaries should be `utf8` encoded (but see below in options) + +* `json_text()` ```erlang json_text() = binary() ``` -* a utf8 encoded binary containing a json string + a utf8 encoded binary containing a json string + +* `tokens()` & `token()` ```erlang tokens() = token() | [token()] @@ -211,7 +216,9 @@ however, it is important to recognize that jsx is greedy by default. jsx will co | end_json ``` -* the internal representation used during syntactic analysis + the internal representation used during syntactic analysis + +* `event()` ```erlang event() = start_object @@ -228,7 +235,9 @@ however, it is important to recognize that jsx is greedy by default. jsx will co | end_json ``` -* the internal representation used during semantic analysis + the internal representation used during semantic analysis + +* `options()` & `option()` ```erlang options() = [option()] From 608b05876c5d72281091668c710972104656c79b Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 11:55:26 -0700 Subject: [PATCH 40/52] more list formatting adjustments --- README.markdown | 307 ++++++++++++++++++++++++------------------------ 1 file changed, 155 insertions(+), 152 deletions(-) diff --git a/README.markdown b/README.markdown index 8aee87f..6ed06c2 100644 --- a/README.markdown +++ b/README.markdown @@ -304,242 +304,245 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## exports ## -### encoder/3, decoder/3 and parser/3 ### +* ### encoder/3, decoder/3 and parser/3 ### -```erlang -decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) -encoder(Module, Args, Opts) -> Fun((JSONTerm) -> any()) -parser(Module, Args, Opts) -> Fun((Tokens) -> any()) + ```erlang + decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) + encoder(Module, Args, Opts) -> Fun((JSONTerm) -> any()) + parser(Module, Args, Opts) -> Fun((Tokens) -> any()) - Module = atom() - Args = any() - Opts = options() - JSONText = json_text() - JSONTerm = json_term() - Tokens = tokens() -``` + Module = atom() + Args = any() + Opts = options() + JSONText = json_text() + JSONTerm = json_term() + Tokens = tokens() + ``` -jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages (actually, semantic analysis takes place during syntactic analysis, for efficiency). included are two tokenizers; one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user-defined tokenizers (`parser/3`) + jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages (actually, semantic analysis takes place during syntactic analysis, for efficiency). included are two tokenizers; one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user-defined tokenizers (`parser/3`) -all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails + all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails -`Module` is the name of the callback module + `Module` is the name of the callback module -`Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state + `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state -`Opts` are detailed [above](#data_types) + `Opts` are detailed [above](#data_types) -see [below](#callback_exports) for details on the callback module + see [below](#callback_exports) for details on the callback module + +* ### decode/1,2 ### + + ```erlang + decode(JSON) -> Term + decode(JSON, Opts) -> Term + + JSON = json_text() + Term = json_term() + Opts = [option() | labels | {labels, Label} | {post_decode, F}] + Label = binary | atom | existing_atom + F = fun((any()) -> any()) + ``` + + `decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) + + the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms and results in a badarg error if the keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom` except it will not add new atoms to the atom table + + `{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: + + ```erlang + 1> F = fun(V) when is_list(V) -> V; (V) -> false end. + 2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). + [{<<"a list">>, [false, false, false]}] + ``` + + declaring more than one post-decoder will result in a `badarg` error exception + + raises a `badarg` error exception if input is not valid json -### decode/1,2 ### +* ### encode/1,2 ### -```erlang -decode(JSON) -> Term -decode(JSON, Opts) -> Term + ```erlang + encode(Term) -> JSON + encode(Term, Opts) -> JSON - JSON = json_text() - Term = json_term() - Opts = [option() | labels | {labels, Label} | {post_decode, F}] - Label = binary | atom | existing_atom - F = fun((any()) -> any()) -``` + Term = json_term() + JSON = json_text() + Opts = [option() | {pre_encode, F} | space | {space, N} | indent | {indent, N}] + F = fun((any()) -> any()) + N = pos_integer() + ``` -`decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) + `encode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) -the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms and results in a badarg error if the keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom` except it will not add new atoms to the atom table + the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` -`{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: + the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` -```erlang -1> F = fun(V) when is_list(V) -> V; (V) -> false end. -2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). -[{<<"a list">>, [false, false, false]}] -``` + `{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: -declaring more than one post-decoder will result in a `badarg` error exception + ```erlang + 1> F = fun(V) when is_list(V) -> V; (V) -> false end. + 2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]). + <<"{\"a list\": [false, false, false]}">> + ``` -raises a `badarg` error exception if input is not valid json + declaring more than one pre-encoder will result in a `badarg` error exception + + raises a `badarg` error exception if input is not a valid erlang representation of json -### encode/1,2 ### +* ### format/1,2 ### -```erlang -encode(Term) -> JSON -encode(Term, Opts) -> JSON + ```erlang + format(JSON) -> JSON + format(JSON, Opts) -> JSON - Term = json_term() - JSON = json_text() - Opts = [option() | {pre_encode, F} | space | {space, N} | indent | {indent, N}] - F = fun((any()) -> any()) - N = pos_integer() -``` + JSON = json_text() + Opts = [option() | space | {space, N} | indent | {indent, N}] + N = pos_integer() + ``` -`encode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) + `format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` -`{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: + the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` + + the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` -```erlang -1> F = fun(V) when is_list(V) -> V; (V) -> false end. -2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]). -<<"{\"a list\": [false, false, false]}">> -``` - -declaring more than one pre-encoder will result in a `badarg` error exception - -the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - -the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` - -raises a `badarg` error exception if input is not a valid erlang representation of json + raises a `badarg` error exception if input is not valid json -### format/1,2 ### +* ### minify/1 ### -```erlang -format(JSON) -> JSON -format(JSON, Opts) -> JSON + ```erlang + minify(JSON) -> JSON - JSON = json_text() - Opts = [option() | space | {space, N} | indent | {indent, N}] - N = pos_integer() -``` + JSON = json_text() + ``` -`format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` + `minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace -the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - -the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` - -raises a `badarg` error exception if input is not valid json + raises a `badarg` error exception if input is not valid json -### minify/1 ### +* ### prettify/1 ### -```erlang -minify(JSON) -> JSON + ```erlang + prettify(JSON) -> JSON - JSON = json_text() -``` + JSON = json_text() + ``` -`minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace + `prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` -raises a `badarg` error exception if input is not valid json + raises a `badarg` error exception if input is not valid json -### prettify/1 ### +* ### is_json/1,2 ### -```erlang -prettify(JSON) -> JSON + ```erlang + is_json(MaybeJSON) -> true | false + is_json(MaybeJSON, Opts) -> true | false - JSON = json_text() -``` + MaybeJSON = any() + Opts = options() + ``` -`prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` + returns true if input is a valid json text, false if not -raises a `badarg` error exception if input is not valid json + what exactly constitutes valid json may be altered per [options](#data_types) -### is_json/1,2 ### +* ### is_term/1,2 ### -```erlang -is_json(MaybeJSON) -> true | false -is_json(MaybeJSON, Opts) -> true | false + ```erlang + is_term(MaybeJSON) -> true | false + is_term(MaybeJSON, Opts) -> true | false - MaybeJSON = any() - Opts = options() -``` + MaybeJSON = any() + Opts = options() + ``` -returns true if input is a valid json text, false if not + returns true if input is a valid erlang representation of json, false if not -what exactly constitutes valid json may be altered per [options](#data_types) - - -### is_term/1,2 ### - -```erlang -is_term(MaybeJSON) -> true | false -is_term(MaybeJSON, Opts) -> true | false - - MaybeJSON = any() - Opts = options() -``` - -returns true if input is a valid erlang representation of json, false if not - -what exactly constitutes valid json may be altered per [options](#data_types) + what exactly constitutes valid json may be altered per [options](#data_types) ## callback exports ## the following functions should be exported from a jsx callback module -### Module:init/1 ### +* ### Module:init/1 ### -```erlang -Module:init(Args) -> InitialState + ```erlang + Module:init(Args) -> InitialState - Args = any() - InitialState = any() -``` + Args = any() + InitialState = any() + ``` -whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` + whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` -### Module:handle_event/2 ### +* ### Module:handle_event/2 ### -```erlang -Module:handle_event(Event, State) -> NewState + ```erlang + Module:handle_event(Event, State) -> NewState - Event = events() - State = any() - NewState = any() -``` + Event = events() + State = any() + NewState = any() + ``` -semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: + semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: -* `start_object`

the start of a json object

+ - `start_object` + + the start of a json object -* `end_object`

the end of a json object

+ - `end_object` + + the end of a json object -* `start_array` + - `start_array` -the start of a json array + the start of a json array -* `end_array` + - `end_array` -the end of a json array + the end of a json array -* `{key, binary()}` + - `{key, binary()}` -a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions + a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions -* `{string, binary()}` + - `{string, binary()}` -a json string. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions + a json string. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions -* `{integer, integer()}` + - `{integer, integer()}` -an erlang integer (bignum) + an erlang integer (bignum) -* `{float, float()}` + - `{float, float()}` -an erlang float + an erlang float -* `{literal, true}` + - `{literal, true}` -the atom `true` + the atom `true` -* `{literal, false}` + - `{literal, false}` -the atom `false` + the atom `false` -* `{literal, null}` + - `{literal, null}` -the atom `null` + the atom `null` -* `end_json` + - `end_json` -this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis + this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis ## acknowledgements ## From b9b4472cf5fe4b6b97543936ecb84e873745c8ac Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 12:00:19 -0700 Subject: [PATCH 41/52] we'll just edit it live --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 6ed06c2..e7c2684 100644 --- a/README.markdown +++ b/README.markdown @@ -484,7 +484,7 @@ the following functions should be exported from a jsx callback module whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` -* ### Module:handle_event/2 ### +### * Module:handle_event/2 ### ```erlang Module:handle_event(Event, State) -> NewState From e97234c84103913c4eb3d148ba6f6347752a6b64 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 12:04:44 -0700 Subject: [PATCH 42/52] that didn't work, this might --- README.markdown | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.markdown b/README.markdown index e7c2684..78f9cdf 100644 --- a/README.markdown +++ b/README.markdown @@ -304,7 +304,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## exports ## -* ### encoder/3, decoder/3 and parser/3 ### +* `encoder/3`, `decoder/3` & `parser/3` ```erlang decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) @@ -331,7 +331,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co see [below](#callback_exports) for details on the callback module -* ### decode/1,2 ### +* `decode/1,2` ```erlang decode(JSON) -> Term @@ -361,7 +361,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co raises a `badarg` error exception if input is not valid json -* ### encode/1,2 ### +* `encode/1,2` ```erlang encode(Term) -> JSON @@ -393,7 +393,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co raises a `badarg` error exception if input is not a valid erlang representation of json -* ### format/1,2 ### +* `format/1,2` ```erlang format(JSON) -> JSON @@ -413,7 +413,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co raises a `badarg` error exception if input is not valid json -* ### minify/1 ### +* `minify/1` ```erlang minify(JSON) -> JSON @@ -426,7 +426,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co raises a `badarg` error exception if input is not valid json -* ### prettify/1 ### +* `prettify/1` ```erlang prettify(JSON) -> JSON @@ -439,7 +439,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co raises a `badarg` error exception if input is not valid json -* ### is_json/1,2 ### +* `is_json/1,2` ```erlang is_json(MaybeJSON) -> true | false @@ -454,7 +454,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co what exactly constitutes valid json may be altered per [options](#data_types) -* ### is_term/1,2 ### +* `is_term/1,2` ```erlang is_term(MaybeJSON) -> true | false @@ -473,7 +473,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co the following functions should be exported from a jsx callback module -* ### Module:init/1 ### +* `Module:init/1 ```erlang Module:init(Args) -> InitialState @@ -484,7 +484,7 @@ the following functions should be exported from a jsx callback module whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` -### * Module:handle_event/2 ### +* `Module:handle_event/2` ```erlang Module:handle_event(Event, State) -> NewState From 9d1b933297bb325fe0de8b326219f24b4449b655 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 12:05:41 -0700 Subject: [PATCH 43/52] tiniest of fixes, almost miniscule --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 78f9cdf..18e51f9 100644 --- a/README.markdown +++ b/README.markdown @@ -473,7 +473,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co the following functions should be exported from a jsx callback module -* `Module:init/1 +* `Module:init/1` ```erlang Module:init(Args) -> InitialState From cc582a2b9775b759eea799419418c27a36935565 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 12:57:05 -0700 Subject: [PATCH 44/52] conversational README --- README.markdown | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/README.markdown b/README.markdown index 18e51f9..4a1f787 100644 --- a/README.markdown +++ b/README.markdown @@ -100,12 +100,15 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] jsx is an erlang application for consuming, producing and manipulating [json][json] -jsx strives to be quick but complete, correct but pragmatic, and approachable but powerful. it handles json as encountered in common use with extensions to handle even less common usage. comments, strings quoted with `'` instead of `"`, json fragments and json streams, and invalid utf8 are all supported +json has a [spec][rfc4627] but common usage differs subtly. it's common usage jsx attempts to address, with guidance from the spec -jsx is a collection of functions useful when dealing with json in erlang. jsx is also a json compiler with separate parsing and semantic analysis stages. new, custom, semantic analysis steps are relatively simple to add. the syntactic analysis stage is also exposed separately for use with user defined tokenizers +all json produced and consumed by jsx should be `utf8` encoded text or a reasonable approximation thereof. ascii works too, but anything beyond that i'm not going to make any promises +the [spec][rfc4627] thinks json values must be wrapped in a json array or object but everyone else disagrees so jsx allows naked json values by default. if you're a curmudgeon who's offended by this deviation, you can just check that all values returned by jsx functions are lists, alright? -### json <-> erlang mapping ### +here is a table of how various json values map to erlang: + +#### json <-> erlang mapping #### **json** | **erlang** --------------------------------|-------------------------------- @@ -115,16 +118,6 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is `array` | `[]` and `[JSON]` `object` | `[{}]` and `[{binary() OR atom(), JSON}]` -* json - - json must be a binary encoded in `utf8`. if it's invalid `utf8` or invalid json, it probably won't parse without errors. there are a few non-standard extensions to the parser available that may change that. they are detailed in the [options](#data_types) section below - - jsx also supports json fragments: valid json values that are not complete json. that means jsx will parse things like `<<"1">>`, `<<"true">>` and `<<"\"hello world\"">>` without complaint - -* erlang - - only the erlang terms in the table above are supported. non-supported terms result in badarg errors. jsx is never going to support erlang lists instead of binaries, mostly because you can't discriminate between lists of integers and strings without hinting, and hinting is silly - * numbers javascript and thus json represent all numeric values with floats. as this is woefully insufficient for many uses, **jsx**, just like erlang, supports bigints. whenever possible, this library will interpret json numbers that look like integers as integers. other numbers will be converted to erlang's floating point type, which is nearly but not quite iee754. negative zero is not representable in erlang (zero is unsigned in erlang and `0` is equivalent to `-0`) and will be interpreted as regular zero. numbers not representable are beyond the concern of this implementation, and will result in parsing errors @@ -137,11 +130,9 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors - json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec (like the two cases above) will not be + json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec will not be. in the interest of pragmatism there is an option for looser parsing. see the options section in [data types](#data_types) - in the interest of pragmatism there is an option for looser parsing. see [options](#data_types) below - - all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although see [options](#data_types) below) + all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although, again, see the options section in [data types](#data_types)) this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings @@ -155,10 +146,10 @@ jsx is a collection of functions useful when dealing with json in erlang. jsx is * objects - json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string`, above, or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values + json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string` or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values -### incomplete input ### +#### incomplete input #### jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory @@ -180,7 +171,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co | binary() ``` - the erlang representation of json. binaries should be `utf8` encoded (but see below in options) + the erlang representation of json. binaries should be `utf8` encoded, or close at least * `json_text()` @@ -327,9 +318,9 @@ however, it is important to recognize that jsx is greedy by default. jsx will co `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state - `Opts` are detailed [above](#data_types) + `Opts` are detailed in [options](#data_types) - see [below](#callback_exports) for details on the callback module + see [callback exports](#callback_exports) for details on the callback module * `decode/1,2` From e6ec02b71babe03d573a067ac77a2c74aca7c98f Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 13:03:06 -0700 Subject: [PATCH 45/52] better internal linkage --- README.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.markdown b/README.markdown index 4a1f787..712974a 100644 --- a/README.markdown +++ b/README.markdown @@ -153,7 +153,7 @@ here is a table of how various json values map to erlang: jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory -however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see [options](#data_types) +however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see the options, in [data types](#data_types) ## data types ## @@ -209,7 +209,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co the internal representation used during syntactic analysis -* `event()` +* `events()` & `event()` ```erlang event() = start_object @@ -442,7 +442,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co returns true if input is a valid json text, false if not - what exactly constitutes valid json may be altered per [options](#data_types) + what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) * `is_term/1,2` @@ -457,7 +457,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co returns true if input is a valid erlang representation of json, false if not - what exactly constitutes valid json may be altered per [options](#data_types) + what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) ## callback exports ## @@ -505,11 +505,11 @@ the following functions should be exported from a jsx callback module - `{key, binary()}` - a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions + a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions - `{string, binary()}` - a json string. it will usually be a `utf8` encoded binary. see [options](#data_types) for possible exceptions + a json string. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions - `{integer, integer()}` From 0e53893fcec00d3050197e2e8c6a651225556bcb Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Fri, 25 May 2012 13:05:28 -0700 Subject: [PATCH 46/52] missed one --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 712974a..3203c23 100644 --- a/README.markdown +++ b/README.markdown @@ -318,7 +318,7 @@ however, it is important to recognize that jsx is greedy by default. jsx will co `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state - `Opts` are detailed in [options](#data_types) + `Opts` are detailed in [data types](#data_types) see [callback exports](#callback_exports) for details on the callback module From 56ff3b914eb3584f52ea4d8a24111f1eb02de85f Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sat, 26 May 2012 18:21:50 -0700 Subject: [PATCH 47/52] delistify README --- README.markdown | 572 ++++++++++++++++++++++++------------------------ 1 file changed, 284 insertions(+), 288 deletions(-) diff --git a/README.markdown b/README.markdown index 3203c23..20472ae 100644 --- a/README.markdown +++ b/README.markdown @@ -25,74 +25,74 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] ## quickstart ## -* to build the library and run tests +#### to build the library and run tests #### - ```bash - tanga:jsx alisdair$ sinan build - tanga:jsx alisdair$ sinan -r tests eunit - ``` - or - ```bash - tanga:jsx alisdair$ rebar compile - tanga:jsx alisdair$ rebar eunit - ``` +```bash +tanga:jsx alisdair$ sinan build +tanga:jsx alisdair$ sinan -r tests eunit +``` +or +```bash +tanga:jsx alisdair$ rebar compile +tanga:jsx alisdair$ rebar eunit +``` -* to convert a utf8 binary containing a json string into an erlang term +#### to convert a utf8 binary containing a json string into an erlang term #### - ```erlang - 1> jsx:decode(<<"{\"library\": \"jsx\", \"awesome\": true}">>). - [{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] - 2> jsx:decode(<<"[\"a\",\"list\",\"of\",\"words\"]">>). - [<<"a">>, <<"list">>, <<"of">>, <<"words">>] - ``` +```erlang +1> jsx:decode(<<"{\"library\": \"jsx\", \"awesome\": true}">>). +[{<<"library">>,<<"jsx">>},{<<"awesome">>,true}] +2> jsx:decode(<<"[\"a\",\"list\",\"of\",\"words\"]">>). +[<<"a">>, <<"list">>, <<"of">>, <<"words">>] +``` -* to convert an erlang term into a utf8 binary containing a json string +#### to convert an erlang term into a utf8 binary containing a json string #### - ```erlang - 1> jsx:encode([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). - <<"{\"library\": \"jsx\", \"awesome\": true}">> - 2> jsx:encode([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). - <<"[\"a\",\"list\",\"of\",\"words\"]">> - ``` +```erlang +1> jsx:encode([{<<"library">>,<<"jsx">>},{<<"awesome">>,true}]). +<<"{\"library\": \"jsx\", \"awesome\": true}">> +2> jsx:encode([<<"a">>, <<"list">>, <<"of">>, <<"words">>]). +<<"[\"a\",\"list\",\"of\",\"words\"]">> +``` -* to check if a binary or a term is valid json +#### to check if a binary or a term is valid json #### - ```erlang - 1> jsx:is_json(<<"[\"this is json\"]">>). - true - 2> jsx:is_json("[\"this is not\"]"). - false - 3> jsx:is_term([<<"this is a term">>]). - true - 4> jsx:is_term(["this is not"]). - false - ``` +```erlang +1> jsx:is_json(<<"[\"this is json\"]">>). +true +2> jsx:is_json("[\"this is not\"]"). +false +3> jsx:is_term([<<"this is a term">>]). +true +4> jsx:is_term(["this is not"]). +false +``` -* to minify some json +#### to minify some json #### - ```erlang - 1> jsx:minify(<<"{ - \"a list\": [ - 1, - 2, - 3 - ] - }">>). - <<"{\"a list\":[1,2,3]}">> - ``` +```erlang +1> jsx:minify(<<"{ + \"a list\": [ + 1, + 2, + 3 + ] +}">>). +<<"{\"a list\":[1,2,3]}">> +``` -* to prettify some json +#### to prettify some json #### - ```erlang - 1> jsx:prettify(<<"{\"a list\":[1,2,3]}">>). - <<"{ - \"a list\": [ - 1, - 2, - 3 - ] - }">> - ``` +```erlang +1> jsx:prettify(<<"{\"a list\":[1,2,3]}">>). +<<"{ + \"a list\": [ + 1, + 2, + 3 + ] +}">> +``` ## description ## @@ -108,7 +108,7 @@ the [spec][rfc4627] thinks json values must be wrapped in a json array or object here is a table of how various json values map to erlang: -#### json <-> erlang mapping #### +### json <-> erlang mapping ### **json** | **erlang** --------------------------------|-------------------------------- @@ -149,7 +149,7 @@ here is a table of how various json values map to erlang: json objects are represented by erlang proplists. the empty object has the special representation `[{}]` to differentiate it from the empty list. ambiguities like `[true, false]` prevent the use of the shorthand form of property lists using atoms as properties so all properties must be tuples. all keys must be encoded as in `string` or as atoms (which will be escaped and converted to binaries for presentation to handlers). values should be valid json values -#### incomplete input #### +### incomplete input ### jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory @@ -158,104 +158,100 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## data types ## -* `json_term()` +#### `json_term()` #### - ```erlang - json_term() = [json_term()] - | [{binary() | atom(), json_term()}] - | true - | false - | null - | integer() - | float() - | binary() - ``` +```erlang +json_term() = [json_term()] + | [{binary() | atom(), json_term()}] + | true + | false + | null + | integer() + | float() + | binary() +``` - the erlang representation of json. binaries should be `utf8` encoded, or close at least +the erlang representation of json. binaries should be `utf8` encoded, or close at least -* `json_text()` +#### `json_text()` #### - ```erlang - json_text() = binary() - ``` +```erlang +json_text() = binary() +``` - a utf8 encoded binary containing a json string +a utf8 encoded binary containing a json string -* `tokens()` & `token()` +#### `token()` #### - ```erlang - tokens() = token() | [token()] - - token() = start_object - | end_object - | start_array - | end_array - | {key, binary()} - | {string, binary()} - | binary() - | {number, integer() | float()} - | {integer, integer()} - | {float, float()} - | integer() - | float() - | {literal, true} - | {literal, false} - | {literal, null} - | true - | false - | null - | end_json - ``` +```erlang +token() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | binary() + | {number, integer() | float()} + | {integer, integer()} + | {float, float()} + | integer() + | float() + | {literal, true} + | {literal, false} + | {literal, null} + | true + | false + | null + | end_json +``` - the internal representation used during syntactic analysis +the internal representation used during syntactic analysis -* `events()` & `event()` +#### `event()` #### - ```erlang - event() = start_object - | end_object - | start_array - | end_array - | {key, binary()} - | {string, binary()} - | {integer, integer()} - | {float, float()} - | {literal, true} - | {literal, false} - | {literal, null} - | end_json - ``` +```erlang +event() = start_object + | end_object + | start_array + | end_array + | {key, binary()} + | {string, binary()} + | {integer, integer()} + | {float, float()} + | {literal, true} + | {literal, false} + | {literal, null} + | end_json +``` - the internal representation used during semantic analysis +the internal representation used during semantic analysis -* `options()` & `option()` +#### `option()` #### - ```erlang - options() = [option()] +```erlang +option() = replaced_bad_utf8 + | escaped_forward_slashes + | single_quoted_strings + | unescaped_jsonp + | comments + | escaped_strings + | dirty_strings + | ignored_bad_escapes + | relax + | explicit_end +``` - option() = replaced_bad_utf8 - | escaped_forward_slashes - | single_quoted_strings - | unescaped_jsonp - | comments - | escaped_strings - | dirty_strings - | ignored_bad_escapes - | relax - | explicit_end - ``` +jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details - jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details - - - `replaced_bad_utf8` + - `replaced_bad_utf8` json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints - - `escaped_forward_slashes` + - `escaped_forward_slashes` json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document - - `single_quoted_strings` + - `single_quoted_strings` some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes @@ -263,31 +259,31 @@ however, it is important to recognize that jsx is greedy by default. jsx will co the parser will never emit json with keys or strings delimited by single quotes - - `unescaped_jsonp` + - `unescaped_jsonp` javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping - - `comments` + - `comments` json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) - - `escaped_strings` + - `escaped_strings` by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes - - `dirty_strings` + - `dirty_strings` json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping - - `ignored_bad_escapes` + - `ignored_bad_escapes` during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped - - `explicit_end` + - `explicit_end` this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` - - `relax` + - `relax` relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can @@ -295,245 +291,245 @@ however, it is important to recognize that jsx is greedy by default. jsx will co ## exports ## -* `encoder/3`, `decoder/3` & `parser/3` +#### `encoder/3`, `decoder/3` & `parser/3` #### - ```erlang - decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) - encoder(Module, Args, Opts) -> Fun((JSONTerm) -> any()) - parser(Module, Args, Opts) -> Fun((Tokens) -> any()) +```erlang +decoder(Module, Args, Opts) -> Fun((JSONText) -> any()) +encoder(Module, Args, Opts) -> Fun((JSONTerm) -> any()) +parser(Module, Args, Opts) -> Fun((Tokens) -> any()) - Module = atom() - Args = any() - Opts = options() - JSONText = json_text() - JSONTerm = json_term() - Tokens = tokens() - ``` + Module = atom() + Args = any() + Opts = [option()] + JSONText = json_text() + JSONTerm = json_term() + Tokens = token() | [token()] +``` - jsx is a json compiler with distinct tokenizing, syntactic analysis and semantic analysis stages (actually, semantic analysis takes place during syntactic analysis, for efficiency). included are two tokenizers; one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user-defined tokenizers (`parser/3`) +jsx is a json compiler with interleaved tokenizing, syntactic analysis and semantic analysis stages. included are two tokenizers; one that handles json texts (`decoder/3`) and one that handles erlang terms (`encoder/3`). there is also an entry point to the syntactic analysis stage for use with user-defined tokenizers (`parser/3`) - all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails +all three functions return an anonymous function that takes the appropriate type of input and returns the result of performing semantic analysis, the tuple `{incomplete, F}` where `F` is a new anonymous function (see [incomplete input](#incomplete_input)) or a `badarg` error exception if syntactic analysis fails - `Module` is the name of the callback module +`Module` is the name of the callback module - `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state +`Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state - `Opts` are detailed in [data types](#data_types) +`Opts` are detailed in [data types](#data_types) - see [callback exports](#callback_exports) for details on the callback module +check out [callback module documentation](#callback_exports) for details of the callback module interface -* `decode/1,2` +#### `decode/1,2` #### - ```erlang - decode(JSON) -> Term - decode(JSON, Opts) -> Term +```erlang +decode(JSON) -> Term +decode(JSON, Opts) -> Term - JSON = json_text() - Term = json_term() - Opts = [option() | labels | {labels, Label} | {post_decode, F}] - Label = binary | atom | existing_atom - F = fun((any()) -> any()) - ``` + JSON = json_text() + Term = json_term() + Opts = [option() | labels | {labels, Label} | {post_decode, F}] + Label = binary | atom | existing_atom + F = fun((any()) -> any()) +``` - `decode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) +`decode` parses a json text (a `utf8` encoded binary) and produces an erlang term - the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms and results in a badarg error if the keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom` except it will not add new atoms to the atom table +the option `labels` controls how keys are converted from json to erlang terms. `binary` does no conversion beyond normal escaping. `atom` converts keys to erlang atoms and results in a badarg error if the keys fall outside the range of erlang atoms. `existing_atom` is identical to `atom` except it will not add new atoms to the atom table - `{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: +`{post_decode, F}` is a user defined function of arity 1 that is called on each output value (objects, arrays, strings, numbers and literals). it may return any value to be substituted in the returned term. for example: - ```erlang - 1> F = fun(V) when is_list(V) -> V; (V) -> false end. - 2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). - [{<<"a list">>, [false, false, false]}] - ``` +```erlang +1> F = fun(V) when is_list(V) -> V; (V) -> false end. +2> jsx:decode(<<"{\"a list\": [true, \"a string\", 1]}">>, [{post_decode, F}]). +[{<<"a list">>, [false, false, false]}] +``` - declaring more than one post-decoder will result in a `badarg` error exception +declaring more than one post-decoder will result in a `badarg` error exception - raises a `badarg` error exception if input is not valid json +raises a `badarg` error exception if input is not valid json -* `encode/1,2` +#### `encode/1,2` #### - ```erlang - encode(Term) -> JSON - encode(Term, Opts) -> JSON +```erlang +encode(Term) -> JSON +encode(Term, Opts) -> JSON - Term = json_term() - JSON = json_text() - Opts = [option() | {pre_encode, F} | space | {space, N} | indent | {indent, N}] - F = fun((any()) -> any()) - N = pos_integer() - ``` + Term = json_term() + JSON = json_text() + Opts = [option() | {pre_encode, F} | space | {space, N} | indent | {indent, N}] + F = fun((any()) -> any()) + N = pos_integer() +``` - `encode` parses a json text (a `utf8` encoded binary) and produces an erlang term (see [json <-> erlang mapping](#json---erlang-mapping)) +`encode` parses a json text (a `utf8` encoded binary) and produces an erlang term - the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` +the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` +the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` - `{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: +`{pre_encode, F}` is a user defined function of arity 1 that is called on each input value. it may return any valid json value to be substituted in the returned json. for example: - ```erlang - 1> F = fun(V) when is_list(V) -> V; (V) -> false end. - 2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]). - <<"{\"a list\": [false, false, false]}">> - ``` +```erlang +1> F = fun(V) when is_list(V) -> V; (V) -> false end. +2> jsx:encode([{<<"a list">>, [true, <<"a string">>, 1]}], [{pre_encode, F}]). +<<"{\"a list\": [false, false, false]}">> +``` - declaring more than one pre-encoder will result in a `badarg` error exception +declaring more than one pre-encoder will result in a `badarg` error exception - raises a `badarg` error exception if input is not a valid erlang representation of json +raises a `badarg` error exception if input is not a valid [erlang representation of json](#json---erlang-mapping) -* `format/1,2` +#### `format/1,2` #### - ```erlang - format(JSON) -> JSON - format(JSON, Opts) -> JSON +```erlang +format(JSON) -> JSON +format(JSON, Opts) -> JSON - JSON = json_text() - Opts = [option() | space | {space, N} | indent | {indent, N}] - N = pos_integer() - ``` + JSON = json_text() + Opts = [option() | space | {space, N} | indent | {indent, N}] + N = pos_integer() +``` - `format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` +`format` parses a json text (a `utf8` encoded binary) and produces a new json text according to the format rules specified by `Opts` - the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - - the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` +the option `{space, N}` inserts `N` spaces after every comma and colon in your json output. `space` is an alias for `{space, 1}`. the default is `{space, 0}` - raises a `badarg` error exception if input is not valid json +the option `{indent, N}` inserts a newline and `N` spaces for each level of indentation in your json output. note that this overrides spaces inserted after a comma. `indent` is an alias for `{indent, 1}`. the default is `{indent, 0}` + +raises a `badarg` error exception if input is not valid json -* `minify/1` +#### `minify/1` #### - ```erlang - minify(JSON) -> JSON +```erlang +minify(JSON) -> JSON - JSON = json_text() - ``` + JSON = json_text() +``` - `minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace +`minify` parses a json text (a `utf8` encoded binary) and produces a new json text stripped of whitespace - raises a `badarg` error exception if input is not valid json +raises a `badarg` error exception if input is not valid json -* `prettify/1` +#### `prettify/1` #### - ```erlang - prettify(JSON) -> JSON +```erlang +prettify(JSON) -> JSON - JSON = json_text() - ``` + JSON = json_text() +``` - `prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` +`prettify` parses a json text (a `utf8` encoded binary) and produces a new json text equivalent to `format(JSON, [{space, 1}, {indent, 2}])` - raises a `badarg` error exception if input is not valid json +raises a `badarg` error exception if input is not valid json -* `is_json/1,2` +#### `is_json/1,2` #### - ```erlang - is_json(MaybeJSON) -> true | false - is_json(MaybeJSON, Opts) -> true | false +```erlang +is_json(MaybeJSON) -> true | false +is_json(MaybeJSON, Opts) -> true | false - MaybeJSON = any() - Opts = options() - ``` + MaybeJSON = any() + Opts = options() +``` - returns true if input is a valid json text, false if not +returns true if input is a valid json text, false if not - what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) +what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) -* `is_term/1,2` +#### `is_term/1,2` #### - ```erlang - is_term(MaybeJSON) -> true | false - is_term(MaybeJSON, Opts) -> true | false +```erlang +is_term(MaybeJSON) -> true | false +is_term(MaybeJSON, Opts) -> true | false - MaybeJSON = any() - Opts = options() - ``` + MaybeJSON = any() + Opts = options() +``` - returns true if input is a valid erlang representation of json, false if not +returns true if input is a valid erlang representation of json, false if not - what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) +what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) ## callback exports ## the following functions should be exported from a jsx callback module -* `Module:init/1` +#### `Module:init/1` #### - ```erlang - Module:init(Args) -> InitialState +```erlang +Module:init(Args) -> InitialState - Args = any() - InitialState = any() - ``` + Args = any() + InitialState = any() +``` - whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` +whenever any of `encoder/3`, `decoder/3` or `parser/3` are called, this function is called with the `Args` argument provided in the calling function to obtain `InitialState` -* `Module:handle_event/2` +#### `Module:handle_event/2` #### - ```erlang - Module:handle_event(Event, State) -> NewState +```erlang +Module:handle_event(Event, State) -> NewState - Event = events() - State = any() - NewState = any() - ``` + Event = [event()] + State = any() + NewState = any() +``` - semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: +semantic analysis is performed by repeatedly calling `handle_event/2` with a stream of events emitted by the tokenizer and the current state. the new state returned is used as the input to the next call to `handle_event/2`. the following events must be handled: - - `start_object` - - the start of a json object +- `start_object` - - `end_object` - - the end of a json object + the start of a json object - - `start_array` +- `end_object` - the start of a json array + the end of a json object - - `end_array` +- `start_array` - the end of a json array + the start of a json array - - `{key, binary()}` +- `end_array` - a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions + the end of a json array - - `{string, binary()}` +- `{key, binary()}` - a json string. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions + a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions - - `{integer, integer()}` +- `{string, binary()}` - an erlang integer (bignum) + a json string. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions - - `{float, float()}` +- `{integer, integer()}` - an erlang float + an erlang integer (bignum) - - `{literal, true}` +- `{float, float()}` - the atom `true` + an erlang float - - `{literal, false}` +- `{literal, true}` - the atom `false` + the atom `true` - - `{literal, null}` +- `{literal, false}` - the atom `null` + the atom `false` - - `end_json` +- `{literal, null}` - this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis + the atom `null` + +- `end_json` + + this event is emitted when syntactic analysis is completed. you should do any cleanup and return the result of your semantic analysis ## acknowledgements ## From 09f5db9b12e60266e2c35c792a189dce53d01710 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 27 May 2012 12:41:53 -0700 Subject: [PATCH 48/52] list style formatting strikes again --- README.markdown | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/README.markdown b/README.markdown index 20472ae..593a29e 100644 --- a/README.markdown +++ b/README.markdown @@ -243,49 +243,49 @@ option() = replaced_bad_utf8 jsx functions all take a common set of options. not all flags have meaning in all contexts, but they are always valid options. functions may have additional options beyond these. see [individual function documentation](#exports) for details - - `replaced_bad_utf8` +- `replaced_bad_utf8` - json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints + json text input and json strings SHOULD be utf8 encoded binaries, appropriately escaped as per the json spec. attempts are made to replace invalid codepoints with `u+FFFD` as per the unicode spec when this option is present. this applies both to malformed unicode and disallowed codepoints - - `escaped_forward_slashes` +- `escaped_forward_slashes` - json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document + json strings are escaped according to the json spec. this means forward slashes (solidus) are optionally escaped. this option is only relevant for encoding; you may want to use this if you are embedding json directly into a html or xml document - - `single_quoted_strings` +- `single_quoted_strings` - some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes + some parsers allow double quotes (`u+0022`) to be replaced by single quotes (`u+0027`) to delimit keys and strings. this option allows json containing single quotes as structural characters to be parsed without errors. note that the parser expects strings to be terminated by the same quote type that opened it and that single quotes must, obviously, be escaped within strings delimited by single quotes - double quotes must ALWAYS be escaped, regardless of what kind of quotes delimit the string they are found in + double quotes must ALWAYS be escaped, regardless of what kind of quotes delimit the string they are found in - the parser will never emit json with keys or strings delimited by single quotes + the parser will never emit json with keys or strings delimited by single quotes - - `unescaped_jsonp` +- `unescaped_jsonp` - javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping + javascript interpreters treat the codepoints `u+2028` and `u+2029` as significant whitespace. json strings that contain either of these codepoints will be parsed incorrectly by some javascript interpreters. by default, these codepoints are escaped (to `\u2028` and `\u2029`, respectively) to retain compatibility. this option simply removes that escaping - - `comments` +- `comments` - json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) + json has no official comments but some parsers allow c style comments. anywhere whitespace is allowed this flag allows comments (both `// ...` and `/* ... */` style) - - `escaped_strings` +- `escaped_strings` - by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes + by default, both the encoder and decoder return strings as utf8 binaries appropriate for use in erlang. escape sequences that were present in decoded terms are converted into the appropriate codepoint while encoded terms are unaltered. this flag escapes strings as if for output in json, removing control codes and problematic codepoints and replacing them with the appropriate escapes - - `dirty_strings` +- `dirty_strings` - json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping + json escaping is lossy; it mutates the json string and repeated application can result in unwanted behaviour. if your strings are already escaped (or you'd like to force invalid strings into "json") use this flag to bypass escaping - - `ignored_bad_escapes` +- `ignored_bad_escapes` - during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped + during decoding, ignore unrecognized escape sequences and leave them as is in the stream. note that combining this option with `escaped_strings` will result in the escape character itself being escaped - - `explicit_end` +- `explicit_end` - this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` + this option treats all exhausted inputs as incomplete. the parser will not attempt to return a final state until the function is called with the value `end_stream` - - `relax` +- `relax` - relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can + relax is a synonym for `[replaced_bad_utf8, single_quoted_strings, comments, ignored_bad_escapes]` for when you don't care how janky and awful your json input is, you just want the parser to do the best it can ## exports ## From 697ea4d6af13ce2e8e2b8a914f1864edceaea28f Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 27 May 2012 12:45:14 -0700 Subject: [PATCH 49/52] expanded TOC in readme --- README.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.markdown b/README.markdown index 593a29e..46d2fcf 100644 --- a/README.markdown +++ b/README.markdown @@ -18,6 +18,11 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] - [json <-> erlang mapping](#json---erlang-mapping) - [incomplete input](#incomplete-input) * [data types](#data-types) + - [`json_term()`](#json_term) + - [`json_text()`](#json_text) + - [`token()`](#token) + - [`event()`](#event) + - [`option()`](#option) * [exports](#exports) * [callback exports](#callback_exports) * [acknowledgements](#acknowledgements) From 424d615994578ca1eb81fd1fc489bde53cf8aaa9 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 27 May 2012 20:26:33 -0700 Subject: [PATCH 50/52] more TOC adjustmentery --- README.markdown | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/README.markdown b/README.markdown index 46d2fcf..822fbba 100644 --- a/README.markdown +++ b/README.markdown @@ -24,6 +24,14 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] - [`event()`](#event) - [`option()`](#option) * [exports](#exports) + - [`encoder/3`, `decoder/3` & `parser/3`](#encoder3-decoder3--parser3) + - [`decode/1,2`](#decode12) + - [`encode/1,2`](#encode12) + - [`format/1,2`](#format12) + - [`minify/1`](#minify1) + - [`prettify/1`](#prettify1) + - [`is_json/1,2`](#is_json12) + - [`is_term/1,2`](#is_term12) * [callback exports](#callback_exports) * [acknowledgements](#acknowledgements) @@ -135,9 +143,9 @@ here is a table of how various json values map to erlang: the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors - json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec will not be. in the interest of pragmatism there is an option for looser parsing. see the options section in [data types](#data_types) + json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec will not be. in the interest of pragmatism there is an option for looser parsing. see [options](#option) - all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although, again, see the options section in [data types](#data_types)) + all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although, again, see [options](#option)) this implementation performs no normalization on strings beyond that detailed here. be careful when comparing strings as equivalent strings may have different `utf8` encodings @@ -158,7 +166,7 @@ here is a table of how various json values map to erlang: jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory -however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see the options, in [data types](#data_types) +however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see [options](#option) ## data types ## @@ -319,7 +327,7 @@ all three functions return an anonymous function that takes the appropriate type `Args` is any term that will be passed to `Module:init/1` prior to syntactic analysis to produce an initial state -`Opts` are detailed in [data types](#data_types) +`Opts` are detailed [here](#option) check out [callback module documentation](#callback_exports) for details of the callback module interface @@ -443,7 +451,7 @@ is_json(MaybeJSON, Opts) -> true | false returns true if input is a valid json text, false if not -what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) +what exactly constitutes valid json may be altered per the [options](#option) #### `is_term/1,2` #### @@ -458,8 +466,7 @@ is_term(MaybeJSON, Opts) -> true | false returns true if input is a valid erlang representation of json, false if not -what exactly constitutes valid json may be altered per the options, detailed in [data types](#data_types) - +what exactly constitutes valid json may be altered per the [options](#option) ## callback exports ## @@ -506,11 +513,11 @@ semantic analysis is performed by repeatedly calling `handle_event/2` with a str - `{key, binary()}` - a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions + a key in a json object. this is guaranteed to follow either `start_object` or a json value. it will usually be a `utf8` encoded binary. see the [options](#option) for possible exceptions - `{string, binary()}` - a json string. it will usually be a `utf8` encoded binary. see the options under [data types](#data_types) for possible exceptions + a json string. it will usually be a `utf8` encoded binary. see the [options](#option) for possible exceptions - `{integer, integer()}` From 6f15acaf5dbbca8774fa9ef9ce894f3b0e0613b7 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 27 May 2012 20:29:48 -0700 Subject: [PATCH 51/52] tighter language in README --- README.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.markdown b/README.markdown index 822fbba..6b25502 100644 --- a/README.markdown +++ b/README.markdown @@ -143,7 +143,7 @@ here is a table of how various json values map to erlang: the utf8 restriction means improperly paired surrogates are explicitly disallowed. `u+d800` to `u+dfff` are allowed, but only when they form valid surrogate pairs. surrogates encountered otherwise result in errors - json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec will not be. in the interest of pragmatism there is an option for looser parsing. see [options](#option) + json string escapes of the form `\uXXXX` will be converted to their equivalent codepoints during parsing. this means control characters and other codepoints disallowed by the json spec may be encountered in resulting strings, but codepoints disallowed by the unicode spec will not be. in the interest of pragmatism there is an [option](#option) for looser parsing all erlang strings are represented by *valid* `utf8` encoded binaries. the encoder will check strings for conformance. noncharacters (like `u+ffff`) are allowed in erlang utf8 encoded binaries, but not in strings passed to the encoder (although, again, see [options](#option)) @@ -166,7 +166,7 @@ here is a table of how various json values map to erlang: jsx handles incomplete json texts. if a partial json text is parsed, rather than returning a term from your callback handler, jsx returns `{incomplete, F}` where `F` is a function with an identical API to the anonymous fun returned from `decoder/3`, `encoder/3` or `parser/3`. it retains the internal state of the parser at the point where input was exhausted. this allows you to parse as you stream json over a socket or file descriptor, or to parse large json texts without needing to keep them entirely in memory -however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the option `explicit_end` can be used to modify this behaviour. see [options](#option) +however, it is important to recognize that jsx is greedy by default. jsx will consider the parsing complete if input is exhausted and the json text is not unambiguously incomplete. this is mostly relevant when parsing bare numbers like `<<"1234">>`. this could be a complete json integer or just the beginning of a json integer that is being parsed incrementally. jsx will treat it as a whole integer. the [option](#options) `explicit_end` can be used to modify this behaviour ## data types ## @@ -451,7 +451,7 @@ is_json(MaybeJSON, Opts) -> true | false returns true if input is a valid json text, false if not -what exactly constitutes valid json may be altered per the [options](#option) +what exactly constitutes valid json may be [altered](#option) #### `is_term/1,2` #### @@ -466,7 +466,7 @@ is_term(MaybeJSON, Opts) -> true | false returns true if input is a valid erlang representation of json, false if not -what exactly constitutes valid json may be altered per the [options](#option) +what exactly constitutes valid json may be [altered](#option) ## callback exports ## From 76f16c6996cf885247568da789512ae827dc86f3 Mon Sep 17 00:00:00 2001 From: alisdair sullivan Date: Sun, 27 May 2012 20:31:56 -0700 Subject: [PATCH 52/52] complete TOC --- README.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.markdown b/README.markdown index 6b25502..8e215cb 100644 --- a/README.markdown +++ b/README.markdown @@ -33,6 +33,8 @@ jsx may be built using either [sinan][sinan] or [rebar][rebar] - [`is_json/1,2`](#is_json12) - [`is_term/1,2`](#is_term12) * [callback exports](#callback_exports) + - [`Module:init/1`](#moduleinit1) + - [`Module:handle_event/2`](#modulehandle_event2) * [acknowledgements](#acknowledgements)