Merge pull request #104 from martinsumner/mas-i103-coverage

Mas i103 coverage
This commit is contained in:
Martin Sumner 2017-11-13 10:31:28 +00:00 committed by GitHub
commit d866d7b5dc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 326 additions and 144 deletions

View file

@ -916,39 +916,6 @@ get_mem(Key, Handle, HashTree, BinaryMode, QuickCheck) ->
extract_kvpair(Handle, ListToCheck, Key, BinaryMode)
end.
-spec get_nextkey(list()|file:io_device()) ->
nomorekeys|
{any(), nomorekeys}|
{any(), file:io_device(), {integer(), integer()}}.
%% @doc
%% Get the next key at a position in the file (or the first key if no position
%% is passed). Will return both a key and the next position, or nomorekeys if
%% the end has been reached (either in place of the result if there are no
%% more keys, or in place of the position if the returned key is the last key)
get_nextkey(Filename) when is_list(Filename) ->
{ok, Handle} = file:open(Filename, [binary, raw, read]),
get_nextkey(Handle);
get_nextkey(Handle) ->
{ok, _} = file:position(Handle, bof),
{FirstHashPosition, _} = read_next_2_integers(Handle),
get_nextkey(Handle, {256 * ?DWORD_SIZE, FirstHashPosition}).
get_nextkey(Handle, {Position, FirstHashPosition}) ->
{ok, Position} = file:position(Handle, Position),
case read_next_2_integers(Handle) of
{KeyLength, ValueLength} ->
NextKey = read_next_key(Handle, KeyLength),
NextPosition = Position + KeyLength + ValueLength + ?DWORD_SIZE,
case NextPosition of
FirstHashPosition ->
{NextKey, nomorekeys};
_ ->
{NextKey, Handle, {NextPosition, FirstHashPosition}}
end;
eof ->
nomorekeys
end.
-spec hashtable_calc(ets:tid(), integer()) -> {list(), binary()}.
%% @doc
%% Create a binary representation of the hash table to be written to the end
@ -1010,7 +977,7 @@ find_lastkey(Handle, IndexCache) ->
_ ->
{ok, _} = file:position(Handle, LastPosition),
{KeyLength, _ValueLength} = read_next_2_integers(Handle),
read_next_key(Handle, KeyLength)
safe_read_next_key(Handle, KeyLength)
end.
@ -1193,30 +1160,28 @@ saferead_keyvalue(Handle) ->
false;
{KeyL, ValueL} ->
case safe_read_next_key(Handle, KeyL) of
{error, _} ->
false;
eof ->
false;
false ->
false;
Key ->
case file:read(Handle, ValueL) of
eof ->
false;
{ok, Value} ->
{ok, Value} ->
case crccheck_value(Value) of
true ->
{Key, Value, KeyL, ValueL};
false ->
false
end
end
end;
eof ->
false
end
end
end.
safe_read_next_key(Handle, Length) ->
try read_next_key(Handle, Length) of
eof ->
false;
Term ->
Term
catch
@ -1250,10 +1215,10 @@ calc_crc(Value) ->
read_next_key(Handle, Length) ->
case file:read(Handle, Length) of
{ok, Bin} ->
{ok, Bin} ->
binary_to_term(Bin);
ReadError ->
ReadError
eof ->
eof
end.
@ -1907,32 +1872,9 @@ search_hash_table_findinslot_test() ->
?assertMatch(missing, get("../test/hashtable1_test.cdb", Key1, false)),
ok = file:delete("../test/hashtable1_test.cdb").
getnextkey_inclemptyvalue_test() ->
L = [{"K9", "V9"}, {"K2", "V2"}, {"K3", ""},
{"K4", "V4"}, {"K5", "V5"}, {"K6", "V6"}, {"K7", "V7"},
{"K8", "V8"}, {"K1", "V1"}],
ok = create("../test/hashtable2_test.cdb", L),
{FirstKey, Handle, P1} = get_nextkey("../test/hashtable2_test.cdb"),
io:format("Next position details of ~w~n", [P1]),
?assertMatch("K9", FirstKey),
{SecondKey, Handle, P2} = get_nextkey(Handle, P1),
?assertMatch("K2", SecondKey),
{ThirdKeyNoValue, Handle, P3} = get_nextkey(Handle, P2),
?assertMatch("K3", ThirdKeyNoValue),
{_, Handle, P4} = get_nextkey(Handle, P3),
{_, Handle, P5} = get_nextkey(Handle, P4),
{_, Handle, P6} = get_nextkey(Handle, P5),
{_, Handle, P7} = get_nextkey(Handle, P6),
{_, Handle, P8} = get_nextkey(Handle, P7),
{LastKey, nomorekeys} = get_nextkey(Handle, P8),
?assertMatch("K1", LastKey),
ok = file:delete("../test/hashtable2_test.cdb").
newactivefile_test() ->
{LastPosition, _, _} = open_active_file("../test/activefile_test.cdb"),
?assertMatch(256 * ?DWORD_SIZE, LastPosition),
Response = get_nextkey("../test/activefile_test.cdb"),
?assertMatch(nomorekeys, Response),
ok = file:delete("../test/activefile_test.cdb").
emptyvalue_fromdict_test() ->
@ -2015,10 +1957,10 @@ generate_sequentialkeys(Count, KVList) ->
generate_sequentialkeys(Count - 1, KVList ++ [KV]).
get_keys_byposition_manykeys_test_() ->
{timeout, 60, fun get_keys_byposition_manykeys_test_to/0}.
{timeout, 120, fun get_keys_byposition_manykeys_test_to/0}.
get_keys_byposition_manykeys_test_to() ->
KeyCount = 1024,
KeyCount = 2048,
{ok, P1} = cdb_open_writer("../test/poskeymany.pnd",
#cdb_options{binary_mode=false}),
KVList = generate_sequentialkeys(KeyCount, []),
@ -2210,6 +2152,96 @@ crc_corrupt_writer_test() ->
?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")),
ok = cdb_close(P2).
safe_read_test() ->
% should return the right thing or false, or the wrong thing if and
% only if we understand why
Key = term_to_binary(<<"Key">>),
Value = <<"Value">>,
CRC = calc_crc(Value),
ValToWrite = <<CRC:32/integer, Value/binary>>,
KeyL = byte_size(Key),
FlippedKeyL = endian_flip(KeyL),
ValueL= byte_size(ValToWrite),
FlippedValL = endian_flip(ValueL),
TestFN = "../test/saferead.pnd",
BinToWrite =
<<FlippedKeyL:32/integer,
FlippedValL:32/integer,
Key/binary,
ValToWrite/binary>>,
TestCorruptedWriteFun =
fun(BitNumber) ->
<<PreBin:BitNumber/bitstring,
Bit:1/integer,
PostBin/bitstring>> = BinToWrite,
BadBit = Bit bxor 1,
AltBin = <<PreBin:BitNumber/bitstring,
BadBit:1/integer,
PostBin/bitstring>>,
file:delete(TestFN),
{ok, Handle} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(Handle, 0, AltBin),
{ok, _} = file:position(Handle, bof),
case saferead_keyvalue(Handle) of
false ->
% Result OK to be false - should get that on error
ok;
{<<"Key">>, ValToWrite, KeyL, BadValueL} ->
% Sometimes corruption may yield a correct answer
% for example if Value Length is too big
%
% This cna only happen with a corrupted value length at
% the end of the file - which is just a peculiarity of
% the test
?assertMatch(true, BadValueL > ValueL);
{_BadKey, ValToWrite, KeyL, ValueL} ->
% Key is not CRC checked - so may be bit flipped to
% something which is still passes through binary_to_term
% Assumption is that the application should always
% ultimately know the key - and so will be able to check
% against the Key it is trying for.
ok
end,
ok = file:close(Handle)
end,
lists:foreach(TestCorruptedWriteFun,
lists:seq(1, -1 + 8 * (KeyL + ValueL + 8))),
{ok, HandleK} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(HandleK, 0, BinToWrite),
{ok, _} = file:position(HandleK, 8 + KeyL + ValueL),
?assertMatch(false, safe_read_next_key(HandleK, KeyL)),
ok = file:close(HandleK),
WrongKeyL = endian_flip(KeyL + ValueL),
{ok, HandleV0} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(HandleV0, 0, BinToWrite),
ok = file:pwrite(HandleV0, 0, <<WrongKeyL:32/integer>>),
{ok, _} = file:position(HandleV0, bof),
?assertMatch(false, saferead_keyvalue(HandleV0)),
ok = file:close(HandleV0),
WrongValL = 0,
{ok, HandleV1} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(HandleV1, 0, BinToWrite),
ok = file:pwrite(HandleV1, 4, <<WrongValL:32/integer>>),
{ok, _} = file:position(HandleV1, bof),
?assertMatch(false, saferead_keyvalue(HandleV1)),
ok = file:close(HandleV1),
io:format("Happy check ~n"),
{ok, HandleHappy} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(HandleHappy, 0, BinToWrite),
{ok, _} = file:position(HandleHappy, bof),
?assertMatch({<<"Key">>, ValToWrite, KeyL, ValueL},
saferead_keyvalue(HandleHappy)),
file:delete(TestFN).
nonsense_coverage_test() ->
{ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []),
ok = gen_fsm:send_all_state_event(Pid, nonsense),

View file

@ -453,7 +453,7 @@ gen_indexspec(Bucket, Key, IdxOp, IdxField, IdxTerm, SQN, TTL) ->
%% Generate an additional index term representing the change, if the last
%% modified date for the change is within the definition of recency.
%%
%% The objetc may have multiple last modified dates (siblings), and in this
%% The object may have multiple last modified dates (siblings), and in this
%% case index entries for all dates within the range are added.
%%
%% The index should entry auto-expire in the future (when it is no longer
@ -908,4 +908,19 @@ genaaeidx_test() ->
AAESpecsB2 = aae_indexspecs(AAE1, <<"Bucket0">>, Key, SQN, H, LastMods1),
?assertMatch(0, length(AAESpecsB2)).
delayedupdate_aaeidx_test() ->
AAE = #recent_aae{filter=blacklist,
buckets=[],
limit_minutes=60,
unit_minutes=5},
Bucket = <<"Bucket1">>,
Key = <<"Key1">>,
SQN = 1,
H = erlang:phash2(null),
{Mega, Sec, MSec} = os:timestamp(),
LastMods = [{Mega -1, Sec, MSec}],
AAESpecs = aae_indexspecs(AAE, Bucket, Key, SQN, H, LastMods),
?assertMatch(0, length(AAESpecs)).
-endif.

View file

@ -91,7 +91,7 @@
-define(JOURNAL_FILEX, "cdb").
-define(PENDING_FILEX, "pnd").
-define(SAMPLE_SIZE, 200).
-define(SAMPLE_SIZE, 100).
-define(BATCH_SIZE, 32).
-define(BATCHES_TO_CHECK, 8).
%% How many consecutive files to compact in one run
@ -116,14 +116,24 @@
journal :: pid() | undefined,
compaction_perc :: float() | undefined}).
-type iclerk_options() :: #iclerk_options{}.
%%%============================================================================
%%% API
%%%============================================================================
-spec clerk_new(iclerk_options()) -> {ok, pid()}.
%% @doc
%% Generate a new clerk
clerk_new(InkerClerkOpts) ->
gen_server:start(?MODULE, [InkerClerkOpts], []).
-spec clerk_compact(pid(), pid(),
fun(), fun(), fun(),
pid(), integer()) -> ok.
%% @doc
%% Trigger a compaction for this clerk if the threshold of data recovery has
%% been met
clerk_compact(Pid, Checker, InitiateFun, CloseFun, FilterFun, Inker, TimeO) ->
gen_server:cast(Pid,
{compact,
@ -134,10 +144,18 @@ clerk_compact(Pid, Checker, InitiateFun, CloseFun, FilterFun, Inker, TimeO) ->
Inker,
TimeO}).
-spec clerk_hashtablecalc(ets:tid(), integer(), pid()) -> ok.
%% @doc
%% Spawn a dedicated clerk for the process of calculating the binary view
%% of the hastable in the CDB file - so that the file is not blocked during
%% this calculation
clerk_hashtablecalc(HashTree, StartPos, CDBpid) ->
{ok, Clerk} = gen_server:start(?MODULE, [#iclerk_options{}], []),
gen_server:cast(Clerk, {hashtable_calc, HashTree, StartPos, CDBpid}).
-spec clerk_stop(pid()) -> ok.
%% @doc
%% Stop the clerk
clerk_stop(Pid) ->
gen_server:cast(Pid, stop).
@ -240,6 +258,10 @@ code_change(_OldVsn, State, _Extra) ->
%%% External functions
%%%============================================================================
-spec schedule_compaction(list(integer()),
integer(),
{integer(), integer(), integer()}) -> integer().
%% @doc
%% Schedule the next compaction event for this store. Chooses a random
%% interval, and then a random start time within the first third
%% of the interval.
@ -256,7 +278,6 @@ code_change(_OldVsn, State, _Extra) ->
%%
%% Current TS should be the outcome of os:timestamp()
%%
schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) ->
% We chedule the next interval by acting as if we were scheduing all
% n intervals at random, but then only chose the next one. After each
@ -314,11 +335,27 @@ schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) ->
%%%============================================================================
%% @doc
%% Get a score for a single CDB file in the journal. This will pull out a bunch
%% of keys and sizes at random in an efficient way (by scanning the hashtable
%% then just picking the key and size information of disk).
%%
%% The score should represent a percentage which is the size of the file by
%% comparison to the original file if compaction was to be run. So if a file
%% can be reduced in size by 30% the score will be 70%.
%%
%% The score is based on a random sample - so will not be consistent between
%% calls.
check_single_file(CDB, FilterFun, FilterServer, MaxSQN, SampleSize, BatchSize) ->
FN = leveled_cdb:cdb_filename(CDB),
PositionList = leveled_cdb:cdb_getpositions(CDB, SampleSize),
KeySizeList = fetch_inbatches(PositionList, BatchSize, CDB, []),
Score =
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN),
leveled_log:log("IC004", [FN, Score]),
Score.
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN) ->
FoldFunForSizeCompare =
fun(KS, {ActSize, RplSize}) ->
case KS of
@ -333,20 +370,22 @@ check_single_file(CDB, FilterFun, FilterServer, MaxSQN, SampleSize, BatchSize) -
{ActSize, RplSize + Size - ?CRC_SIZE}
end;
_ ->
% There is a key which is not in expected format
% Not that the key-size list has been filtered for
% errors by leveled_cdb - but this doesn't know the
% expected format of the key
{ActSize, RplSize}
end
end,
R0 = lists:foldl(FoldFunForSizeCompare, {0, 0}, KeySizeList),
{ActiveSize, ReplacedSize} = R0,
Score = case ActiveSize + ReplacedSize of
0 ->
100.0;
_ ->
100 * ActiveSize / (ActiveSize + ReplacedSize)
end,
leveled_log:log("IC004", [FN, Score]),
Score.
case ActiveSize + ReplacedSize of
0 ->
100.0;
_ ->
100 * ActiveSize / (ActiveSize + ReplacedSize)
end.
scan_all_files(Manifest, FilterFun, FilterServer, MaxSQN) ->
scan_all_files(Manifest, FilterFun, FilterServer, MaxSQN, []).
@ -980,6 +1019,22 @@ compact_singlefile_totwosmallfiles_testto() ->
ok = leveled_cdb:cdb_deletepending(CDBr),
ok = leveled_cdb:cdb_destroy(CDBr).
size_score_test() ->
KeySizeList =
[{{1, "INK", "Key1"}, 104},
{{2, "INK", "Key2"}, 124},
{{3, "INK", "Key3"}, 144},
{{4, "INK", "Key4"}, 154},
{{5, "INK", "Key5", "Subk1"}, 164},
{{6, "INK", "Key6"}, 174},
{{7, "INK", "Key7"}, 184}],
MaxSQN = 6,
CurrentList = ["Key1", "Key4", "Key5", "Key6"],
FilterFun = fun(L, K, _SQN) -> lists:member(K, L) end,
Score = size_comparison_score(KeySizeList, FilterFun, CurrentList, MaxSQN),
?assertMatch(true, Score > 69.0),
?assertMatch(true, Score < 70.0).
coverage_cheat_test() ->
{noreply, _State0} = handle_info(timeout, #state{}),
{ok, _State1} = code_change(null, #state{}, null),

View file

@ -327,11 +327,7 @@
{info, "After ~w PUTs total write time is ~w total sync time is ~w "
++ "and max write time is ~w and max sync time is ~w"}},
{"CDB18",
{info, "Handled return and write of hashtable"}},
{"CDB19",
{info, "Transferring filename ~s to waste ~s"}},
{"CDB20",
{info, "Deleting filename ~s as no waste retention period defined"}}
{info, "Handled return and write of hashtable"}}
]).

View file

@ -352,16 +352,12 @@ treelookup_range_end(EndRange, {NK0, SL0}, Iter0, Output, EndRangeFun) ->
case leveled_codec:endkey_passed(EndRange, NK0) of
true ->
{LHS, RHS} = lists:splitwith(PredFun, SL0),
case RHS of
[] ->
Output ++ LHS;
[{FirstRHSKey, FirstRHSValue}|_Rest] ->
case EndRangeFun(EndRange, FirstRHSKey, FirstRHSValue) of
true ->
Output ++ LHS ++ [{FirstRHSKey, FirstRHSValue}];
false ->
Output ++ LHS
end
[{FirstRHSKey, FirstRHSValue}|_Rest] = RHS,
case EndRangeFun(EndRange, FirstRHSKey, FirstRHSValue) of
true ->
Output ++ LHS ++ [{FirstRHSKey, FirstRHSValue}];
false ->
Output ++ LHS
end;
false ->
UpdOutput = Output ++ SL0,
@ -378,6 +374,8 @@ treelookup_range_end(EndRange, {NK0, SL0}, Iter0, Output, EndRangeFun) ->
end.
idxtlookup_range_start(StartRange, EndRange, {TLI, IDX}, EndRangeFun) ->
% TLI tuple of lists, IDS is a gb_tree of End Keys mapping to tuple
% indexes
Iter0 = tree_iterator_from(StartRange, IDX),
case tree_next(Iter0) of
none ->
@ -388,6 +386,8 @@ idxtlookup_range_start(StartRange, EndRange, {TLI, IDX}, EndRangeFun) ->
K < StartRange
end,
{_LHS, RHS} = lists:splitwith(PredFun, element(ListID, TLI)),
% The RHS is the list of {EK, SK} elements where the EK >= the
% StartRange, otherwise the LHS falls before the range
idxtlookup_range_end(EndRange, {TLI, NK, RHS}, Iter1, [], EndRangeFun)
end.
@ -395,24 +395,27 @@ idxtlookup_range_end(EndRange, {TLI, NK0, SL0}, Iter0, Output, EndRangeFun) ->
PredFun =
fun({K, _V}) ->
not leveled_codec:endkey_passed(EndRange, K)
% true if EndRange is after K
end,
case leveled_codec:endkey_passed(EndRange, NK0) of
true ->
% The end key of this list is after the end of the range, so no
% longer interested in any of the rest of the tree - just this
% sublist
{LHS, RHS} = lists:splitwith(PredFun, SL0),
case RHS of
[] ->
Output ++ LHS;
[{FirstRHSKey, FirstRHSValue}|_Rest] ->
case EndRangeFun(EndRange, FirstRHSKey, FirstRHSValue) of
true ->
% The start key is not after the end of the range
% and so this should be included in the range
Output ++ LHS ++ [{FirstRHSKey, FirstRHSValue}];
false ->
% the start key of the next key is after the end
% of the range and so should not be included
Output ++ LHS
end
% Split the {EK, SK} pairs based on the EndRange. Note that the
% last key is passed the end range - so the RHS cannot be empty, it
% must at least include the last key (as NK0 is at the end of SL0).
[{FirstRHSKey, FirstRHSValue}|_Rest] = RHS,
case EndRangeFun(EndRange, FirstRHSKey, FirstRHSValue) of
true ->
% The start key is not after the end of the range
% and so this should be included in the range
Output ++ LHS ++ [{FirstRHSKey, FirstRHSValue}];
false ->
% the start key of the next key is after the end
% of the range and so should not be included
Output ++ LHS
end;
false ->
UpdOutput = Output ++ SL0,
@ -587,13 +590,16 @@ generate_randomkeys(Seqn, Count, Acc, BucketLow, BRange) ->
tree_search_test() ->
search_test_by_type(tree).
search_test_by_type(tree),
extra_searchrange_test_by_type(tree).
idxt_search_test() ->
search_test_by_type(idxt).
search_test_by_type(idxt),
extra_searchrange_test_by_type(idxt).
skpl_search_test() ->
search_test_by_type(skpl).
search_test_by_type(skpl),
extra_searchrange_test_by_type(skpl).
search_test_by_type(Type) ->
MapFun =
@ -745,13 +751,16 @@ tree_test_by_(Width, Type, N) ->
tree_matchrange_test() ->
matchrange_test_by_type(tree).
matchrange_test_by_type(tree),
extra_matchrange_test_by_type(tree).
idxt_matchrange_test() ->
matchrange_test_by_type(idxt).
matchrange_test_by_type(idxt),
extra_matchrange_test_by_type(idxt).
skpl_matchrange_test() ->
matchrange_test_by_type(skpl).
matchrange_test_by_type(skpl),
extra_matchrange_test_by_type(skpl).
matchrange_test_by_type(Type) ->
@ -783,6 +792,62 @@ matchrange_test_by_type(Type) ->
?assertMatch(KL_Length, LengthR(AfterFirstKey, PenultimateKey, Tree0) + 2),
?assertMatch(1, LengthR(AfterPenultimateKey, FinalKey, Tree0)).
extra_matchrange_test_by_type(Type) ->
N = 4000,
KL = lists:ukeysort(1, generate_randomkeys(1, N, 1, N div 5)),
Tree0 = from_orderedlist(KL, Type),
SubL = lists:sublist(KL, 2000, 3100),
RangeLists =
lists:map(fun(P) -> lists:sublist(SubL, P, P + 50) end,
lists:seq(1, 50)),
TestRangeLFun =
fun(RangeL) ->
SKeyV = lists:nth(1, RangeL),
EKeyV = lists:nth(50, RangeL),
{{o, SB, SK, null}, _SV} = SKeyV,
{{o, EB, EK, null}, _EV} = EKeyV,
SRangeK = {o, SB, SK ++ "0", null},
ERangeK = {o, EB, EK ++ "0", null},
?assertMatch(49, length(match_range(SRangeK, ERangeK, Tree0)))
end,
lists:foreach(TestRangeLFun, RangeLists).
extra_searchrange_test_by_type(Type) ->
N = 4000,
KL = lists:ukeysort(1, generate_randomkeys(1, N, 1, N div 5)),
SearchKL = convertkeylist(KL, []),
% Each {K, V} in the convert list is now an {EK, SK} or a range
Tree0 = from_orderedlist(SearchKL, Type),
SubL = lists:sublist(KL, 2000, 3100),
SKFun = fun(V) -> V end,
TestRangeLFun =
fun(P) ->
RangeL = lists:sublist(SubL, P, P + 50),
% If P is odd, the range keys will be between a start key and an
% end key.
% If P is even, the range keys will be between an end key and a
% start key
SKeyV = lists:nth(1, RangeL),
EKeyV = lists:nth(50, RangeL),
{{o, SB, SK, null}, _SV} = SKeyV,
{{o, EB, EK, null}, _EV} = EKeyV,
FRangeK = {o, SB, SK ++ "0", null},
BRangeK = {o, EB, EK ++ "0", null},
?assertMatch(25, length(search_range(FRangeK, BRangeK, Tree0, SKFun)))
end,
lists:foreach(TestRangeLFun, lists:seq(1, 50)).
convertkeylist(KeyList, Acc) when length(KeyList) < 2 ->
lists:reverse(Acc);
convertkeylist(KeyList, Acc) ->
[{SK, _SV}|OddTail] = KeyList,
[{EK, _EV}|EvenTail] = OddTail,
convertkeylist(EvenTail, [{EK, SK}|Acc]).
match_fun(Tree) ->
fun({K, V}) ->
?assertMatch({value, V}, match(K, Tree))

View file

@ -172,32 +172,53 @@ journal_compaction_tester(Restart, WRP) ->
Bookie0
end,
ok = leveled_bookie:book_compactjournal(Bookie1, 30000),
testutil:wait_for_compaction(Bookie1),
% Start snapshot - should not stop deletions
WasteFP = RootPath ++ "/journal/journal_files/waste",
% Start snapshot - should stop deletions
{ok, PclClone, InkClone} =
leveled_bookie:book_snapshot(Bookie1, store, undefined, false),
ok = leveled_bookie:book_compactjournal(Bookie1, 30000),
testutil:wait_for_compaction(Bookie1),
% Wait to cause delete_pending to be blocked by snapshot
% timeout on switch to delete_pending is 10s
timer:sleep(10100),
case WRP of
undefined ->
ok;
_ ->
% Check nothing got deleted
{ok, CJs} = file:list_dir(WasteFP),
true = length(CJs) == 0
end,
ok = leveled_penciller:pcl_close(PclClone),
ok = leveled_inker:ink_close(InkClone),
% Snapshot released so deletes shoudl occur at next timeout
case WRP of
undefined ->
timer:sleep(10100); % wait for delete_pending timeout
% Wait 2 seconds for files to be deleted
WasteFP = RootPath ++ "/journal/journal_files/waste",
lists:foldl(fun(X, Found) ->
case Found of
true ->
Found;
false ->
{ok, Files} = file:list_dir(WasteFP),
if
length(Files) > 0 ->
io:format("Deleted files found~n"),
true;
length(Files) == 0 ->
timer:sleep(X),
false
end
end
end,
false,
[2000,2000,2000,2000,2000,2000]),
_ ->
FindDeletedFilesFun =
fun(X, Found) ->
case Found of
true ->
Found;
false ->
{ok, Files} = file:list_dir(WasteFP),
if
length(Files) > 0 ->
io:format("Deleted files found~n"),
true;
length(Files) == 0 ->
timer:sleep(X),
false
end
end
end,
lists:foldl(FindDeletedFilesFun,
false,
[2000,2000,2000,2000,2000,2000])
end,
{ok, ClearedJournals} = file:list_dir(WasteFP),
io:format("~w ClearedJournals found~n", [length(ClearedJournals)]),
case is_integer(WRP) of
@ -210,10 +231,8 @@ journal_compaction_tester(Restart, WRP) ->
ChkList3 = lists:sublist(lists:sort(ObjList2), 500),
testutil:check_forlist(Bookie1, ChkList3),
ok = leveled_penciller:pcl_close(PclClone),
ok = leveled_inker:ink_close(InkClone),
ok = leveled_bookie:book_close(Bookie1),
% Restart
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
testutil:check_forobject(Bookie2, TestObject),