leveled/priv/leveled.schema
Martin Sumner 54e3096020
Switch to logger (#442)
* Switch to logger

Use logger rather than io:format when logging.  The ct tests have besn switched to log to file, testutil/init_per_suite/1 may offer useful guidance on configuring logger with leveled.

As all logs are produced by the leveled_log module, the MFA metadata is uninteresting for log outputs, but can be used for explicit filter controls for leveled logs.

* iolist_to_binary not unicode_binary()

logger filters will be error and be removed if the format line is a binary().  Must be either a charlist() or a unicode_binary() - so iolist_to_binary() can't be used

* Add metadata for filter

* Update test/end_to_end/tictac_SUITE.erl

Co-authored-by: Thomas Arts <thomas.arts@quviq.com>

---------

Co-authored-by: Thomas Arts <thomas.arts@quviq.com>
2024-09-06 11:18:24 +01:00

231 lines
No EOL
8.6 KiB
Erlang

%% -*- erlang -*-
%%%% leveled
%% @doc A path under which leveled data files will be stored.
{mapping, "leveled.data_root", "leveled.data_root", [
{default, "$(platform_data_dir)/leveled"},
{datatype, directory}
]}.
%% @doc Strategy for flushing data to disk
%% Can be set to riak_sync, sync (if OTP > 16) or none. Use none, and the OS
%% will flush when most efficient. Use riak_sync or sync to flush after every
%% PUT (not recommended without some hardware support e.g. flash drives and/or
%% Flash-backed Write Caches)
{mapping, "leveled.sync_strategy", "leveled.sync_strategy", [
{default, none},
{datatype, atom}
]}.
%% @doc The key size of the Bookie's in-memory cache
{mapping, "leveled.cache_size", "leveled.cache_size", [
{default, 2000},
{datatype, integer},
hidden
]}.
%% @doc The maximum size of the bookie's cache before each new PUT results in
%% a slow-offer pause. Prior to Riak 3.0.10 this defaulted to 4
{mapping, "leveled.cache_multiple", "leveled.cache_multiple", [
{default, 2},
{datatype, integer},
hidden
]}.
%% @doc The key size of the Penciller's in-memory cache
{mapping, "leveled.penciller_cache_size", "leveled.penciller_cache_size", [
{default, 20000},
{datatype, integer},
hidden
]}.
%% @doc Compression method
%% Can be lz4, zstd or native (which will use the Erlang native zlib
%% compression) within term_to_binary
{mapping, "leveled.compression_method", "leveled.compression_method", [
{datatype, {enum, [native, lz4, zstd, none]}},
{default, native}
]}.
%% @doc Ledger compression
%% If an alternative compression method is preferred specifically for the
%% ledger, it can be specified here. Default is as_store - use whatever method
%% has been defined in leveled.compression.method. Alternatives are native,
%% lz4, ztsd and none
{mapping, "leveled.ledger_compression", "leveled.ledger_compression", [
{datatype, {enum, [as_store, native, lz4, zstd, none]}},
{default, as_store}
]}.
%% @doc Compression point
%% The point at which compression is applied to the Journal (the Ledger is
%% always compressed). Use on_receipt or on_compact. on_compact is suitable
%% when values are unlikely to yield much benefit from compression
%% (compression is only attempted when compacting)
{mapping, "leveled.compression_point", "leveled.compression_point", [
{default, on_receipt},
{datatype, atom}
]}.
%% @doc Compresstion Level (Ledger LSM)
%% Specify the level in the LSM tree from which compression should occur.
%% Defaults to L1, so only L0 writes are uncompressed.
{mapping, "leveled.compression_level", "leveled.compression_level", [
{default, 1},
{datatype, integer},
{validators, ["range:0-7"]},
hidden
]}.
%% @doc Log level
%% Can be debug, info, warning, error or critical
%% Set the minimum log level to be used within leveled. Leveled will log many
%% lines to allow for stats to be etracted by those using log indexers such as
%% Splunk.
%% Logging will be via erlang logger, and so the destination will be defined
%% by the configured log handlers of the erlang node which starts the bookie.
%% All logs are from the leveled_log module, and so specific handling can be
%% managed using filters on the first element of the MFS metadata, or setting
%% the log level for that specific module.
{mapping, "leveled.log_level", "leveled.log_level", [
{default, info},
{datatype, {enum, [debug, info, warning, error, critical]}}
]}.
%% @doc The approximate size (in bytes) when a Journal file should be rolled.
%% Normally keep this as around the size of o(100K) objects. Default is 1GB.
%% Note that on startup an actual maximum size will be chosen which varies by
%% a random factor from this point - to avoid coordination of roll events
%% across vnodes.
{mapping, "leveled.journal_size", "leveled.journal_size", [
{default, 1000000000},
{datatype, integer}
]}.
%% @doc The approximate count of objects when a Journal file should be rolled.
%% This time measured in object count, a file will be rolled if either the
%% object count or the journal size limit is reached. Default 200K.
%% Note that on startup an actual maximum size will be chosen which varies by
%% a random factor from this point - to avoid coordination of roll events
%% across vnodes.
{mapping, "leveled.journal_objectcount", "leveled.journal_objectcount", [
{default, 200000},
{datatype, integer}
]}.
%% @doc The level of the ledger to be pre-loaded into the page cache
%% Depending on how much memory is available for the page cache, and how much
%% disk I/O activity can be tolerated at startup - then the level at which the
%% ledger is forced into the page cache can be controlled by configuration.
{mapping, "leveled.ledger_pagecachelevel", "leveled.ledger_pagecachelevel", [
{default, 4},
{datatype, integer}
]}.
%% @doc The number of journal compactions per vnode per day
%% The higher the value, the more compaction runs, and the sooner space is
%% recovered. But each run has a cost
{mapping, "leveled.compaction_runs_perday", "leveled.compaction_runs_perday", [
{default, 24},
{datatype, integer}
]}.
%% @doc The number of times per day to score an individual file for compaction.
%% The default value will lead to each file, on average, being scored once
%% every 12 hours
{mapping, "leveled.compaction_scores_perday", "leveled.compaction_scores_perday", [
{default, 2},
{datatype, integer}
]}.
%% @doc Compaction Low Hour
%% The hour of the day in which journal compaction can start. Use Low hour
%% of 0 and High hour of 23 to have no compaction window (i.e. always compact
%% regardless of time of day)
{mapping, "leveled.compaction_low_hour", "leveled.compaction_low_hour", [
{default, 0},
{datatype, integer}
]}.
%% @doc Compaction Top Hour
%% The hour of the day, after which journal compaction should stop.
%% If low hour > top hour then, compaction will work overnight between low
%% hour and top hour (inclusive). Timings rely on server's view of local time
{mapping, "leveled.compaction_top_hour", "leveled.compaction_top_hour", [
{default, 23},
{datatype, integer}
]}.
%% @doc Max Journal Files Per Compaction Run
%% In a single compaction run, what is the maximum number of consecutive files
%% which may be compacted. If increasing this value, the average number of
%% files per run may decrease, unless adjustments are also made to the
%% maxrunlength and singlefile compactionpercentage settings.
{mapping, "leveled.max_run_length", "leveled.max_run_length", [
{default, 4},
{datatype, integer}
]}.
%% @doc Target Percentage for Max Run
%% What is the target score for a maximum run of files, to qualify for
%% compaction. If less than this percentage would be retained after compaction
%% then it is a candidate (e.g. in default case if 25% of space would be
%% recovered)
{mapping, "leveled.maxrunlength_compactionpercentage", "leveled.maxrunlength_compactionpercentage", [
{default, 75.0},
{datatype, float},
hidden
]}.
%% @doc Target Percentage for Single File
%% What is the target score for a run of a single file, to qualify for
%% compaction. If less than this percentage would be retained after compaction
%% then it is a candidate (e.g. in default case if 75% of space would be
%% recovered)
{mapping, "leveled.singlefile_compactionpercentage", "leveled.singlefile_compactionpercentage", [
{default, 25.0},
{datatype, float},
hidden
]}.
%% @doc Snapshot timeout (short)
%% Maximum expected time for an index query. A query which is taking longer
%% than this may fail as it will be released - potentially allowing for some
%% file processes to delete. Timeout is in seconds.
{mapping, "leveled.snapshot_timeout_short", "leveled.snapshot_timeout_short", [
{default, 1800},
{datatype, integer},
hidden
]}.
%% @doc Snapshot timeout (long)
%% Maximum expected time for any other fold. A fold which is taking longer
%% than this may fail as it will be released - potentially allowing for some
%% file processes to delete. Timeout is in seconds.
{mapping, "leveled.snapshot_timeout_long", "leveled.snapshot_timeout_long", [
{default, 86400},
{datatype, integer},
hidden
]}.
%% @doc Statistic monitoring proportion
%% The proportion of requests to be convered by stats, an integer between
%% 0 and 100. There is no flow control, so setting this too high could
%% possibly overflow the leveled_monitor mailbox.
{mapping, "leveled.stats_percentage", "leveled.stats_percentage", [
{default, 10},
{datatype, integer},
{validators, ["range:0-100"]}
]}.
%% @doc Statistic log frequency (seconds)
%% The wait in seconds between logs from each leveled_monitor (there is one
%% monitor per vnode)
{mapping, "leveled.stats_logfrequency", "leveled.stats_logfrequency", [
{default, 30},
{datatype, integer}
]}.