Mas d31 i413 (#415)
* Allow snapshots to be reused in queries Allow for a full bookie snapshot to be re-used for multiple queries, not just KV fetches. * Reduce log noise The internal dummy tag is expected so should not prompt a log on reload * Snapshot should have same status of active db wrt head_only and head_lookup * Allow logging to specified on snapshots * Shutdown snapshot bookie is primary goes down Inker and Penciller already will shut down based on `erlang:monitor/2` * Review feedback Formatting and code readability fixes
This commit is contained in:
parent
9e804924a8
commit
d544db5461
9 changed files with 289 additions and 183 deletions
|
@ -404,6 +404,8 @@ fetchput_snapshot(_Config) ->
|
|||
testutil:check_forlist(Bookie1, ChkList1),
|
||||
testutil:check_forlist(SnapBookie1, ChkList1),
|
||||
|
||||
compare_foldwithsnap(Bookie1, SnapBookie1, ChkList1),
|
||||
|
||||
% Close the snapshot, check the original store still has the objects
|
||||
|
||||
ok = leveled_bookie:book_close(SnapBookie1),
|
||||
|
@ -480,6 +482,8 @@ fetchput_snapshot(_Config) ->
|
|||
testutil:check_forlist(SnapBookie3, ChkList2),
|
||||
testutil:check_forlist(SnapBookie2, ChkList1),
|
||||
io:format("Started new snapshot and check for new objects~n"),
|
||||
|
||||
compare_foldwithsnap(Bookie2, SnapBookie3, ChkList3),
|
||||
|
||||
% Load yet more objects, these are replacement objects for the last load
|
||||
|
||||
|
@ -563,6 +567,28 @@ fetchput_snapshot(_Config) ->
|
|||
testutil:reset_filestructure().
|
||||
|
||||
|
||||
compare_foldwithsnap(Bookie, SnapBookie, ChkList) ->
|
||||
HeadFoldFun = fun(B, K, _Hd, Acc) -> [{B, K}|Acc] end,
|
||||
KeyFoldFun = fun(B, K, Acc) -> [{B, K}|Acc] end,
|
||||
{async, HeadFoldDB} =
|
||||
leveled_bookie:book_headfold(
|
||||
Bookie, ?RIAK_TAG, {HeadFoldFun, []}, true, false, false
|
||||
),
|
||||
{async, HeadFoldSnap} =
|
||||
leveled_bookie:book_headfold(
|
||||
SnapBookie, ?RIAK_TAG, {HeadFoldFun, []}, true, false, false
|
||||
),
|
||||
true = HeadFoldDB() == HeadFoldSnap(),
|
||||
|
||||
testutil:check_forlist(SnapBookie, ChkList),
|
||||
|
||||
{async, KeyFoldSnap} =
|
||||
leveled_bookie:book_keylist(
|
||||
SnapBookie, ?RIAK_TAG, {KeyFoldFun, []}
|
||||
),
|
||||
true = HeadFoldSnap() == KeyFoldSnap().
|
||||
|
||||
|
||||
load_and_count(_Config) ->
|
||||
% Use artificially small files, and the load keys, counting they're all
|
||||
% present
|
||||
|
|
|
@ -74,7 +74,21 @@ replace_everything(_Config) ->
|
|||
compact_and_wait(Book1, 1000),
|
||||
{ok, FileList2} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList2)]),
|
||||
true = FileList1 == FileList2,
|
||||
true = FileList1 =< FileList2,
|
||||
%% There will normally be 5 journal files after 50K write then alter
|
||||
%% That may be two files with entirely altered objects - which will be
|
||||
%% compacted, and will be compacted to nothing.
|
||||
%% The "middle" file - which will be 50% compactable may be scored to
|
||||
%% be part of the first run, or may end up in the second run. If in
|
||||
%% the first run, the second run will not compact and FL1 == FL2.
|
||||
%% Otherwise FL1 could be 0 and FL2 1. Hard to control this as there
|
||||
%% is randomisation in both the scoring and the journal size (due to
|
||||
%% jittering of parameters).
|
||||
compact_and_wait(Book1, 1000),
|
||||
{ok, FileList3} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList3)]),
|
||||
%% By the third compaction there should be no further changes
|
||||
true = FileList2 == FileList3,
|
||||
{async, BackupFun} = leveled_bookie:book_hotbackup(Book1),
|
||||
ok = BackupFun(BackupPath),
|
||||
|
||||
|
@ -131,9 +145,9 @@ replace_everything(_Config) ->
|
|||
{OSpcL6, RSpcL6} = lists:split(200, lists:ukeysort(1, KSpcL6)),
|
||||
{KSpcL7, V7} =
|
||||
testutil:put_altered_indexed_objects(Book6, BKT3, RSpcL6),
|
||||
{ok, FileList3} = file:list_dir(CompPath),
|
||||
compact_and_wait(Book6),
|
||||
{ok, FileList4} = file:list_dir(CompPath),
|
||||
compact_and_wait(Book6),
|
||||
{ok, FileList5} = file:list_dir(CompPath),
|
||||
{OSpcL6A, V7} =
|
||||
testutil:put_altered_indexed_objects(Book6, BKT3, OSpcL6, true, V7),
|
||||
{async, BackupFun6} = leveled_bookie:book_hotbackup(Book6),
|
||||
|
@ -141,7 +155,7 @@ replace_everything(_Config) ->
|
|||
ok = leveled_bookie:book_close(Book6),
|
||||
|
||||
io:format("Checking object count in newly compacted journal files~n"),
|
||||
NewlyCompactedFiles = lists:subtract(FileList4, FileList3),
|
||||
NewlyCompactedFiles = lists:subtract(FileList5, FileList4),
|
||||
true = length(NewlyCompactedFiles) >= 1,
|
||||
CDBFilterFun = fun(_K, _V, _P, Acc, _EF) -> {loop, Acc + 1} end,
|
||||
CheckLengthFun =
|
||||
|
|
|
@ -738,11 +738,45 @@ basic_headonly_test(ObjectCount, RemoveCount, HeadOnly) ->
|
|||
Bucket0,
|
||||
Key0),
|
||||
CheckHeadFun =
|
||||
fun({add, SegID, B, K, H}) ->
|
||||
{ok, H} =
|
||||
leveled_bookie:book_headonly(Bookie1, SegID, B, K)
|
||||
fun(DB) ->
|
||||
fun({add, SegID, B, K, H}) ->
|
||||
{ok, H} =
|
||||
leveled_bookie:book_headonly(DB, SegID, B, K)
|
||||
end
|
||||
end,
|
||||
lists:foreach(CheckHeadFun, ObjectSpecL);
|
||||
lists:foreach(CheckHeadFun(Bookie1), ObjectSpecL),
|
||||
{ok, Snapshot} =
|
||||
leveled_bookie:book_start([{snapshot_bookie, Bookie1}]),
|
||||
ok = leveled_bookie:book_loglevel(Snapshot, warn),
|
||||
ok =
|
||||
leveled_bookie:book_addlogs(
|
||||
Snapshot, [b0001, b0002, b0003, i0027, p0007]
|
||||
),
|
||||
ok =
|
||||
leveled_bookie:book_removelogs(
|
||||
Snapshot, [b0019]
|
||||
),
|
||||
io:format(
|
||||
"Checking for ~w objects against Snapshot ~w~n",
|
||||
[length(ObjectSpecL), Snapshot]),
|
||||
lists:foreach(CheckHeadFun(Snapshot), ObjectSpecL),
|
||||
io:format("Closing snapshot ~w~n", [Snapshot]),
|
||||
ok = leveled_bookie:book_close(Snapshot),
|
||||
{ok, AltSnapshot} =
|
||||
leveled_bookie:book_start([{snapshot_bookie, Bookie1}]),
|
||||
ok =
|
||||
leveled_bookie:book_addlogs(
|
||||
AltSnapshot, [b0001, b0002, b0003, b0004, i0027, p0007]
|
||||
),
|
||||
true = is_process_alive(AltSnapshot),
|
||||
io:format(
|
||||
"Closing actual store ~w with snapshot ~w open~n",
|
||||
[Bookie1, AltSnapshot]
|
||||
),
|
||||
ok = leveled_bookie:book_close(Bookie1),
|
||||
% Sleep a beat so as not to race with the 'DOWN' message
|
||||
timer:sleep(10),
|
||||
false = is_process_alive(AltSnapshot);
|
||||
no_lookup ->
|
||||
{unsupported_message, head} =
|
||||
leveled_bookie:book_head(Bookie1,
|
||||
|
@ -753,11 +787,11 @@ basic_headonly_test(ObjectCount, RemoveCount, HeadOnly) ->
|
|||
leveled_bookie:book_headonly(Bookie1,
|
||||
SegmentID0,
|
||||
Bucket0,
|
||||
Key0)
|
||||
Key0),
|
||||
io:format("Closing actual store ~w~n", [Bookie1]),
|
||||
ok = leveled_bookie:book_close(Bookie1)
|
||||
end,
|
||||
|
||||
|
||||
ok = leveled_bookie:book_close(Bookie1),
|
||||
|
||||
{ok, FinalJournals} = file:list_dir(JFP),
|
||||
io:format("Trim has reduced journal count from " ++
|
||||
"~w to ~w and ~w after restart~n",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue