From d60725ad1bd39269e75d4e48d0935af5401ec47b Mon Sep 17 00:00:00 2001 From: Martin Sumner Date: Fri, 8 Jun 2018 13:23:45 +0100 Subject: [PATCH] Add schema document --- priv/leveled.schema | 90 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/priv/leveled.schema b/priv/leveled.schema index 6700671..c061898 100644 --- a/priv/leveled.schema +++ b/priv/leveled.schema @@ -7,3 +7,93 @@ {default, "$(platform_data_dir)/leveled"}, {datatype, directory} ]}. + +%% @doc Strategy for flushing data to disk +%% Can be set to riak_sync, sync (if OTP > 16) or none. Use none, and the OS +%% will flush when most efficient. Use riak_sync or sync to flush after every +%% PUT (not recommended wihtout some hardware support e.g. flash drives and/or +%% Flash-backed Write Caches) +{mapping, "leveled.sync_strategy", "leveled.sync_strategy", [ + {default, none}, + {datatype, atom} +]}. + + +%% @doc The key size of the Bookie's in-memory cache +{mapping, "leveled.cache_size", "leveled.cache_size", [ + {default, 4000}, + {datatype, integer}, + hidden +]}. + +%% @doc The key size of the Penciller's in-memory cache +{mapping, "leveled.penciller_cache_size", "leveled.penciller_cache_size", [ + {default, 28000}, + {datatype, integer}, + hidden +]}. + +%% @doc Compression method +%% Can be lz4 or native (which will use the Erlang native zlib compression) +%% within term_to_binary +{mapping, "leveled.compression_method", "leveled.compression_method", [ + {default, lz4}, + {datatype, atom} +]}. + +%% @doc Compression point +%% The point at which compression is applied to the Journal (the Ledger is +%% always compressed). Use on_receipt or on_compact. on_compact is suitable +%% when values are unlikely to yield much benefit from compression +%% (compression is only attempted when compacting) +{mapping, "leveled.compression_point", "leveled.compression_point", [ + {default, on_receipt}, + {datatype, atom} +]}. + + +%% @doc The approximate size (in bytes) when a Journal file should be rolled. +%% Normally keep this as around the size of o(100K) objects. Default is 500MB +{mapping, "leveled.journal_size", "leveled.journal_size", [ + {default, 500000000}, + {datatype, integer} +]}. + +%% @doc The number of journal compactions per vnode per day +%% The higher the value, the more compaction runs, and the sooner space is +%% recovered. But each run has a cost +{mapping, "leveled.compaction_runs_perday", "leveled.compaction_runs_perday", [ + {default, 16}, + {datatype, integer} +]}. + +%% @doc Compaction Low Hour +%% The hour of the day in which journal compaction can start. Use Low hour +%% of 0 and High hour of 23 to have no compaction window (i.e. always compact +%% regardless of time of day) +{mapping, "leveled.compaction_low_hour", "leveled.compaction_low_hour", [ + {default, 0}, + {datatype, integer} +]}. + +%% @doc Compaction Top Hour +%% The hour of the day, after which journal compaction should stop. +%% If low hour > top hour then, compaction will work overnight between low +%% hour and top hour (inclusive). Timings rely on server's view of local time +{mapping, "leveled.compaction_top_hour", "leveled.compaction_top_hour", [ + {default, 23}, + {datatype, integer} +]}. + +%% @doc Max Journal Files Per Compaction Run +%% In a single compaction run, what is the maximum number of consecutive files +%% which may be compacted. +{mapping, "leveled.max_run_length", "leveled.max_run_length", [ + {default, 8}, + {datatype, integer}, + hidden +]}. + + + +