Merge pull request #292 from martinsumner/mas-i287-config
Make page cache level configurable
This commit is contained in:
commit
e3913a6c07
6 changed files with 94 additions and 32 deletions
|
@ -72,7 +72,7 @@
|
||||||
{datatype, integer}
|
{datatype, integer}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
%% @doc The approximate size (in bytes) when a Journal file should be rolled.
|
%% @doc The approximate count of objects when a Journal file should be rolled.
|
||||||
%% This time measured in object count, a file will be rolled if either the
|
%% This time measured in object count, a file will be rolled if either the
|
||||||
%% object count or the journal size limit is reached. Default 200K.
|
%% object count or the journal size limit is reached. Default 200K.
|
||||||
%% Note that on startup an actual maximum size will be chosen which varies by
|
%% Note that on startup an actual maximum size will be chosen which varies by
|
||||||
|
@ -83,6 +83,14 @@
|
||||||
{datatype, integer}
|
{datatype, integer}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
|
%% @doc The level of the ledger to be pre-loaded into the page cache
|
||||||
|
%% Depending on how much memory is available for the page cache, and how much
|
||||||
|
%% disk I/O activity can be tolerated at startup - then the level at which the
|
||||||
|
%% ledger is forced into the page cache can be controlled by configuration.
|
||||||
|
{mapping, "leveled.ledger_pagecachelevel", "leveled.ledger_pagecachelevel", [
|
||||||
|
{default, 4},
|
||||||
|
{datatype, integer}
|
||||||
|
]}.
|
||||||
|
|
||||||
%% @doc The number of journal compactions per vnode per day
|
%% @doc The number of journal compactions per vnode per day
|
||||||
%% The higher the value, the more compaction runs, and the sooner space is
|
%% The higher the value, the more compaction runs, and the sooner space is
|
||||||
|
|
|
@ -66,7 +66,7 @@
|
||||||
{datatype, integer}
|
{datatype, integer}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
%% @doc The approximate size (in bytes) when a Journal file should be rolled.
|
%% @doc The approximate count of objects when a Journal file should be rolled.
|
||||||
%% This time measured in object count, a file will be rolled if either the
|
%% This time measured in object count, a file will be rolled if either the
|
||||||
%% object count or the journal size limit is reached. Default 200K.
|
%% object count or the journal size limit is reached. Default 200K.
|
||||||
%% Note that on startup an actual maximum size will be chosen which varies by
|
%% Note that on startup an actual maximum size will be chosen which varies by
|
||||||
|
@ -77,6 +77,16 @@
|
||||||
{datatype, integer}
|
{datatype, integer}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
|
%% @doc The level of the ledger to be pre-loaded into the page cache
|
||||||
|
%% Depending on how much memory is available for the page cache, and how much
|
||||||
|
%% disk I/O activity can be tolerated at startup - then the level at which the
|
||||||
|
%% ledger is forced into the page cache can be controlled by configuration.
|
||||||
|
{mapping, "multi_backend.$name.leveled.ledger_pagecachelevel", "riak_kv.multi_backend", [
|
||||||
|
{default, 4},
|
||||||
|
{datatype, integer}
|
||||||
|
]}.
|
||||||
|
|
||||||
|
|
||||||
%% @doc The number of journal compactions per vnode per day
|
%% @doc The number of journal compactions per vnode per day
|
||||||
%% The higher the value, the more compaction runs, and the sooner space is
|
%% The higher the value, the more compaction runs, and the sooner space is
|
||||||
%% recovered. But each run has a cost
|
%% recovered. But each run has a cost
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{application, leveled,
|
{application, leveled,
|
||||||
[
|
[
|
||||||
{description, "Key Value store based on LSM-Tree and designed for larger values"},
|
{description, "Key Value store based on LSM-Tree and designed for larger values"},
|
||||||
{vsn, "0.9.17"},
|
{vsn, "0.9.18"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -144,6 +144,7 @@
|
||||||
{maxrunlength_compactionpercentage, 70.0},
|
{maxrunlength_compactionpercentage, 70.0},
|
||||||
{reload_strategy, []},
|
{reload_strategy, []},
|
||||||
{max_pencillercachesize, ?MAX_PCL_CACHE_SIZE},
|
{max_pencillercachesize, ?MAX_PCL_CACHE_SIZE},
|
||||||
|
{ledger_preloadpagecache_level, ?SST_PAGECACHELEVEL_LOOKUP},
|
||||||
{compression_method, ?COMPRESSION_METHOD},
|
{compression_method, ?COMPRESSION_METHOD},
|
||||||
{compression_point, ?COMPRESSION_POINT},
|
{compression_point, ?COMPRESSION_POINT},
|
||||||
{log_level, ?LOG_LEVEL},
|
{log_level, ?LOG_LEVEL},
|
||||||
|
@ -320,6 +321,10 @@
|
||||||
% The minimum size 400 - attempt to set this vlaue lower will be
|
% The minimum size 400 - attempt to set this vlaue lower will be
|
||||||
% ignored. As a rule the value should be at least 4 x the Bookie's
|
% ignored. As a rule the value should be at least 4 x the Bookie's
|
||||||
% cache size
|
% cache size
|
||||||
|
{ledger_preloadpagecache_level, pos_integer()} |
|
||||||
|
% To which level of the ledger should the ledger contents be
|
||||||
|
% pre-loaded into the pagecache (using fadvise on creation and
|
||||||
|
% startup)
|
||||||
{compression_method, native|lz4} |
|
{compression_method, native|lz4} |
|
||||||
% Compression method and point allow Leveled to be switched from
|
% Compression method and point allow Leveled to be switched from
|
||||||
% using bif based compression (zlib) to using nif based compression
|
% using bif based compression (zlib) to using nif based compression
|
||||||
|
@ -1184,12 +1189,14 @@ init([Opts]) ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
|
|
||||||
|
PageCacheLevel = proplists:get_value(ledger_preloadpagecache_level, Opts),
|
||||||
|
|
||||||
{HeadOnly, HeadLookup, SSTPageCacheLevel} =
|
{HeadOnly, HeadLookup, SSTPageCacheLevel} =
|
||||||
case proplists:get_value(head_only, Opts) of
|
case proplists:get_value(head_only, Opts) of
|
||||||
false ->
|
false ->
|
||||||
{false, true, ?SST_PAGECACHELEVEL_LOOKUP};
|
{false, true, PageCacheLevel};
|
||||||
with_lookup ->
|
with_lookup ->
|
||||||
{true, true, ?SST_PAGECACHELEVEL_LOOKUP};
|
{true, true, PageCacheLevel};
|
||||||
no_lookup ->
|
no_lookup ->
|
||||||
{true, false, ?SST_PAGECACHELEVEL_NOLOOKUP}
|
{true, false, ?SST_PAGECACHELEVEL_NOLOOKUP}
|
||||||
end,
|
end,
|
||||||
|
|
|
@ -793,24 +793,24 @@ filter_output(KVCs, FilterFun, FilterServer, MaxSQN, ReloadStrategy) ->
|
||||||
write_values([], _CDBopts, Journal0, ManSlice0, _PressMethod) ->
|
write_values([], _CDBopts, Journal0, ManSlice0, _PressMethod) ->
|
||||||
{Journal0, ManSlice0};
|
{Journal0, ManSlice0};
|
||||||
write_values(KVCList, CDBopts, Journal0, ManSlice0, PressMethod) ->
|
write_values(KVCList, CDBopts, Journal0, ManSlice0, PressMethod) ->
|
||||||
KVList = lists:map(fun({K, V, _C}) ->
|
KVList =
|
||||||
|
lists:map(fun({K, V, _C}) ->
|
||||||
% Compress the value as part of compaction
|
% Compress the value as part of compaction
|
||||||
{K, leveled_codec:maybe_compress(V, PressMethod)}
|
{K, leveled_codec:maybe_compress(V, PressMethod)}
|
||||||
end,
|
end,
|
||||||
KVCList),
|
KVCList),
|
||||||
{ok, Journal1} = case Journal0 of
|
{ok, Journal1} =
|
||||||
null ->
|
case Journal0 of
|
||||||
{TK, _TV} = lists:nth(1, KVList),
|
null ->
|
||||||
{SQN, _LK} = leveled_codec:from_journalkey(TK),
|
{TK, _TV} = lists:nth(1, KVList),
|
||||||
FP = CDBopts#cdb_options.file_path,
|
{SQN, _LK} = leveled_codec:from_journalkey(TK),
|
||||||
FN = leveled_inker:filepath(FP,
|
FP = CDBopts#cdb_options.file_path,
|
||||||
SQN,
|
FN = leveled_inker:filepath(FP, SQN, compact_journal),
|
||||||
compact_journal),
|
leveled_log:log("IC009", [FN]),
|
||||||
leveled_log:log("IC009", [FN]),
|
leveled_cdb:cdb_open_writer(FN, CDBopts);
|
||||||
leveled_cdb:cdb_open_writer(FN, CDBopts);
|
_ ->
|
||||||
_ ->
|
{ok, Journal0}
|
||||||
{ok, Journal0}
|
end,
|
||||||
end,
|
|
||||||
R = leveled_cdb:cdb_mput(Journal1, KVList),
|
R = leveled_cdb:cdb_mput(Journal1, KVList),
|
||||||
case R of
|
case R of
|
||||||
ok ->
|
ok ->
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
aae_bustedjournal/1,
|
aae_bustedjournal/1,
|
||||||
journal_compaction_bustedjournal/1,
|
journal_compaction_bustedjournal/1,
|
||||||
close_duringcompaction/1,
|
close_duringcompaction/1,
|
||||||
allkeydelta_journal_multicompact/1
|
allkeydelta_journal_multicompact/1,
|
||||||
|
recompact_keydeltas/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
all() -> [
|
all() -> [
|
||||||
|
@ -25,7 +26,8 @@ all() -> [
|
||||||
aae_bustedjournal,
|
aae_bustedjournal,
|
||||||
journal_compaction_bustedjournal,
|
journal_compaction_bustedjournal,
|
||||||
close_duringcompaction,
|
close_duringcompaction,
|
||||||
allkeydelta_journal_multicompact
|
allkeydelta_journal_multicompact,
|
||||||
|
recompact_keydeltas
|
||||||
].
|
].
|
||||||
|
|
||||||
|
|
||||||
|
@ -600,18 +602,19 @@ busted_journal_test(MaxJournalSize, PressMethod, PressPoint, Bust) ->
|
||||||
|
|
||||||
|
|
||||||
allkeydelta_journal_multicompact(_Config) ->
|
allkeydelta_journal_multicompact(_Config) ->
|
||||||
% Simply confirms that none of this causes a crash
|
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
B = <<"test_bucket">>,
|
B = <<"test_bucket">>,
|
||||||
StartOptsFun =
|
StartOptsFun =
|
||||||
fun(JOC) ->
|
fun(JOC) ->
|
||||||
[{root_path, RootPath},
|
[{root_path, RootPath},
|
||||||
{max_journalobjectcount, JOC},
|
{max_journalobjectcount, JOC},
|
||||||
{max_run_length, 6},
|
{max_run_length, 4},
|
||||||
|
{singlefile_compactionpercentage, 70.0},
|
||||||
|
{maxrunlength_compactionpercentage, 85.0},
|
||||||
{sync_strategy, testutil:sync_strategy()}]
|
{sync_strategy, testutil:sync_strategy()}]
|
||||||
end,
|
end,
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOptsFun(16000)),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOptsFun(14000)),
|
||||||
{KSpcL1, _V1} = testutil:put_indexed_objects(Bookie1, B, 40000),
|
{KSpcL1, _V1} = testutil:put_indexed_objects(Bookie1, B, 24000),
|
||||||
{KSpcL2, V2} = testutil:put_altered_indexed_objects(Bookie1,
|
{KSpcL2, V2} = testutil:put_altered_indexed_objects(Bookie1,
|
||||||
B,
|
B,
|
||||||
KSpcL1,
|
KSpcL1,
|
||||||
|
@ -637,7 +640,7 @@ allkeydelta_journal_multicompact(_Config) ->
|
||||||
ok = leveled_bookie:book_close(Bookie1),
|
ok = leveled_bookie:book_close(Bookie1),
|
||||||
leveled_penciller:clean_testdir(RootPath ++ "/ledger"),
|
leveled_penciller:clean_testdir(RootPath ++ "/ledger"),
|
||||||
io:format("Restart without ledger~n"),
|
io:format("Restart without ledger~n"),
|
||||||
{ok, Bookie2} = leveled_bookie:book_start(StartOptsFun(24000)),
|
{ok, Bookie2} = leveled_bookie:book_start(StartOptsFun(13000)),
|
||||||
|
|
||||||
ok = testutil:check_indexed_objects(Bookie2,
|
ok = testutil:check_indexed_objects(Bookie2,
|
||||||
B,
|
B,
|
||||||
|
@ -657,7 +660,7 @@ allkeydelta_journal_multicompact(_Config) ->
|
||||||
ok = leveled_bookie:book_close(Bookie2),
|
ok = leveled_bookie:book_close(Bookie2),
|
||||||
|
|
||||||
io:format("Restart with smaller journal object count~n"),
|
io:format("Restart with smaller journal object count~n"),
|
||||||
{ok, Bookie3} = leveled_bookie:book_start(StartOptsFun(8000)),
|
{ok, Bookie3} = leveled_bookie:book_start(StartOptsFun(7000)),
|
||||||
|
|
||||||
{KSpcL4, V4} = testutil:put_altered_indexed_objects(Bookie3,
|
{KSpcL4, V4} = testutil:put_altered_indexed_objects(Bookie3,
|
||||||
B,
|
B,
|
||||||
|
@ -674,11 +677,45 @@ allkeydelta_journal_multicompact(_Config) ->
|
||||||
file:list_dir(
|
file:list_dir(
|
||||||
filename:join(RootPath, "journal/journal_files/post_compact")),
|
filename:join(RootPath, "journal/journal_files/post_compact")),
|
||||||
io:format("Number of files after compaction ~w~n", [length(FileList4)]),
|
io:format("Number of files after compaction ~w~n", [length(FileList4)]),
|
||||||
true = length(FileList4) >= length(FileList3) + 4,
|
true = length(FileList4) >= length(FileList3) + 3,
|
||||||
|
|
||||||
ok = leveled_bookie:book_close(Bookie3),
|
ok = leveled_bookie:book_close(Bookie3),
|
||||||
testutil:reset_filestructure(10000).
|
testutil:reset_filestructure(10000).
|
||||||
|
|
||||||
|
recompact_keydeltas(_Config) ->
|
||||||
|
RootPath = testutil:reset_filestructure(),
|
||||||
|
B = <<"test_bucket">>,
|
||||||
|
StartOptsFun =
|
||||||
|
fun(JOC) ->
|
||||||
|
[{root_path, RootPath},
|
||||||
|
{max_journalobjectcount, JOC},
|
||||||
|
{max_run_length, 4},
|
||||||
|
{singlefile_compactionpercentage, 70.0},
|
||||||
|
{maxrunlength_compactionpercentage, 85.0},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}]
|
||||||
|
end,
|
||||||
|
{ok, Bookie1} = leveled_bookie:book_start(StartOptsFun(45000)),
|
||||||
|
{KSpcL1, _V1} = testutil:put_indexed_objects(Bookie1, B, 24000),
|
||||||
|
{KSpcL2, _V2} = testutil:put_altered_indexed_objects(Bookie1,
|
||||||
|
B,
|
||||||
|
KSpcL1,
|
||||||
|
false),
|
||||||
|
ok = leveled_bookie:book_close(Bookie1),
|
||||||
|
{ok, Bookie2} = leveled_bookie:book_start(StartOptsFun(45000)),
|
||||||
|
compact_and_wait(Bookie2, 0),
|
||||||
|
{KSpcL3, V3} = testutil:put_altered_indexed_objects(Bookie2,
|
||||||
|
B,
|
||||||
|
KSpcL2,
|
||||||
|
false),
|
||||||
|
compact_and_wait(Bookie2, 0),
|
||||||
|
ok = testutil:check_indexed_objects(Bookie2,
|
||||||
|
B,
|
||||||
|
KSpcL1 ++ KSpcL2 ++ KSpcL3,
|
||||||
|
V3),
|
||||||
|
ok = leveled_bookie:book_close(Bookie2),
|
||||||
|
testutil:reset_filestructure(10000).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
rotating_object_check(BookOpts, B, NumberOfObjects) ->
|
rotating_object_check(BookOpts, B, NumberOfObjects) ->
|
||||||
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue