Test fix-up
There was a test that failed to close down a bookie and that caused some issues. The issues are double-reoslved, the close down was tidied as well as the forgotten close being added back in. There is some generla tidy around in anticipation of TTL support.
This commit is contained in:
parent
0a2053b557
commit
e9c568a8b3
7 changed files with 167 additions and 101 deletions
|
@ -19,6 +19,11 @@
|
|||
unreferenced_files :: list(),
|
||||
target_is_basement = false ::boolean()}).
|
||||
|
||||
-record(level,
|
||||
{level :: integer(),
|
||||
is_basement = false :: boolean(),
|
||||
timestamp :: integer()}).
|
||||
|
||||
-record(manifest_entry,
|
||||
{start_key :: tuple(),
|
||||
end_key :: tuple(),
|
||||
|
|
|
@ -88,15 +88,15 @@ key_dominates(LeftKey, RightKey) ->
|
|||
end.
|
||||
|
||||
|
||||
maybe_reap_expiredkey(KV, IsBasement) ->
|
||||
maybe_reap_expiredkey(KV, LevelD) ->
|
||||
Status = strip_to_statusonly(KV),
|
||||
maybe_reap(Status, IsBasement).
|
||||
maybe_reap(Status, LevelD).
|
||||
|
||||
maybe_reap({_, infinity}, _) ->
|
||||
false; % key is not set to expire
|
||||
maybe_reap({_, TS}, {basement, CurrTS}) when CurrTS > TS ->
|
||||
maybe_reap({_, TS}, {true, CurrTS}) when CurrTS > TS ->
|
||||
true; % basement and ready to expire
|
||||
maybe_reap(tomb, {basement, _CurrTS}) ->
|
||||
maybe_reap(tomb, {true, _CurrTS}) ->
|
||||
true; % always expire in basement
|
||||
maybe_reap(_, _) ->
|
||||
false.
|
||||
|
|
|
@ -80,6 +80,7 @@
|
|||
clerk_new(Owner) ->
|
||||
{ok, Pid} = gen_server:start(?MODULE, [], []),
|
||||
ok = gen_server:call(Pid, {register, Owner}, infinity),
|
||||
io:format("Penciller's clerk ~w started with owner ~w~n", [Pid, Owner]),
|
||||
{ok, Pid}.
|
||||
|
||||
clerk_manifestchange(Pid, Action, Closing) ->
|
||||
|
@ -104,7 +105,7 @@ handle_call({manifest_change, return, true}, _From, State) ->
|
|||
WI = State#state.work_item,
|
||||
{reply, {ok, WI}, State};
|
||||
false ->
|
||||
{reply, no_change, State}
|
||||
{stop, normal, no_change, State}
|
||||
end;
|
||||
handle_call({manifest_change, confirm, Closing}, From, State) ->
|
||||
case Closing of
|
||||
|
@ -139,8 +140,9 @@ handle_info(timeout, State=#state{change_pending=Pnd}) when Pnd == false ->
|
|||
State#state{change_pending=true, work_item=WI}}
|
||||
end.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
terminate(Reason, _State) ->
|
||||
io:format("Penciller's Clerk ~w shutdown now complete for reason ~w~n",
|
||||
[self(), Reason]).
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
@ -153,7 +155,7 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
requestandhandle_work(State) ->
|
||||
case leveled_penciller:pcl_workforclerk(State#state.owner) of
|
||||
none ->
|
||||
io:format("Work prompted but none needed~n"),
|
||||
io:format("Work prompted but none needed ~w~n", [self()]),
|
||||
{false, ?MAX_TIMEOUT};
|
||||
WI ->
|
||||
{NewManifest, FilesToDelete} = merge(WI),
|
||||
|
@ -219,7 +221,7 @@ merge(WI) ->
|
|||
mark_for_delete([], _Penciller) ->
|
||||
ok;
|
||||
mark_for_delete([Head|Tail], Penciller) ->
|
||||
leveled_sft:sft_setfordelete(Head#manifest_entry.owner, Penciller),
|
||||
ok = leveled_sft:sft_setfordelete(Head#manifest_entry.owner, Penciller),
|
||||
mark_for_delete(Tail, Penciller).
|
||||
|
||||
|
||||
|
@ -305,7 +307,18 @@ do_merge(KL1, KL2, {SrcLevel, IsB}, {Filepath, MSN}, FileCounter, OutList) ->
|
|||
io:format("File to be created as part of MSN=~w Filename=~s~n",
|
||||
[MSN, FileName]),
|
||||
TS1 = os:timestamp(),
|
||||
{ok, Pid, Reply} = leveled_sft:sft_new(FileName, KL1, KL2, SrcLevel + 1),
|
||||
LevelR = case IsB of
|
||||
true ->
|
||||
#level{level = SrcLevel + 1,
|
||||
is_basement = true,
|
||||
timestamp = os:timestamp()};
|
||||
false ->
|
||||
SrcLevel + 1
|
||||
end,
|
||||
{ok, Pid, Reply} = leveled_sft:sft_new(FileName,
|
||||
KL1,
|
||||
KL2,
|
||||
LevelR),
|
||||
{{KL1Rem, KL2Rem}, SmallestKey, HighestKey} = Reply,
|
||||
ExtMan = lists:append(OutList,
|
||||
[#manifest_entry{start_key=SmallestKey,
|
||||
|
|
|
@ -529,6 +529,9 @@ handle_cast({confirm_delete, FileName}, State=#state{is_snapshot=Snap})
|
|||
case Reply of
|
||||
{true, Pid} ->
|
||||
UF1 = lists:keydelete(FileName, 1, State#state.unreferenced_files),
|
||||
io:format("Filename ~s removed from unreferenced files as delete "
|
||||
++ "is confirmed - file should now close~n",
|
||||
[FileName]),
|
||||
ok = leveled_sft:sft_deleteconfirmed(Pid),
|
||||
{noreply, State#state{unreferenced_files=UF1}};
|
||||
_ ->
|
||||
|
@ -610,8 +613,9 @@ terminate(Reason, State) ->
|
|||
% Tidy shutdown of individual files
|
||||
ok = close_files(0, UpdState#state.manifest),
|
||||
lists:foreach(fun({_FN, Pid, _SN}) ->
|
||||
leveled_sft:sft_close(Pid) end,
|
||||
ok = leveled_sft:sft_close(Pid) end,
|
||||
UpdState#state.unreferenced_files),
|
||||
io:format("Shutdown complete for Penciller~n"),
|
||||
ok.
|
||||
|
||||
|
||||
|
@ -1015,7 +1019,8 @@ close_files(?MAX_LEVELS - 1, _Manifest) ->
|
|||
ok;
|
||||
close_files(Level, Manifest) ->
|
||||
LevelList = get_item(Level, Manifest, []),
|
||||
lists:foreach(fun(F) -> leveled_sft:sft_close(F#manifest_entry.owner) end,
|
||||
lists:foreach(fun(F) ->
|
||||
ok = leveled_sft:sft_close(F#manifest_entry.owner) end,
|
||||
LevelList),
|
||||
close_files(Level + 1, Manifest).
|
||||
|
||||
|
|
|
@ -211,20 +211,29 @@
|
|||
%%% API
|
||||
%%%============================================================================
|
||||
|
||||
sft_new(Filename, KL1, KL2, Level) ->
|
||||
sft_new(Filename, KL1, KL2, Level, #sft_options{}).
|
||||
sft_new(Filename, KL1, KL2, LevelInfo) ->
|
||||
sft_new(Filename, KL1, KL2, LevelInfo, #sft_options{}).
|
||||
|
||||
sft_new(Filename, KL1, KL2, Level, Options) ->
|
||||
sft_new(Filename, KL1, KL2, LevelInfo, Options) ->
|
||||
LevelR = case is_integer(LevelInfo) of
|
||||
true ->
|
||||
#level{level=LevelInfo};
|
||||
_ ->
|
||||
if
|
||||
is_record(LevelInfo, level) ->
|
||||
LevelInfo
|
||||
end
|
||||
end,
|
||||
{ok, Pid} = gen_server:start(?MODULE, [], []),
|
||||
case Options#sft_options.wait of
|
||||
true ->
|
||||
Reply = gen_server:call(Pid,
|
||||
{sft_new, Filename, KL1, KL2, Level},
|
||||
{sft_new, Filename, KL1, KL2, LevelR},
|
||||
infinity),
|
||||
{ok, Pid, Reply};
|
||||
false ->
|
||||
gen_server:cast(Pid,
|
||||
{sft_new, Filename, KL1, KL2, Level}),
|
||||
{sft_new, Filename, KL1, KL2, LevelR}),
|
||||
{ok, Pid}
|
||||
end.
|
||||
|
||||
|
@ -270,22 +279,22 @@ sft_getmaxsequencenumber(Pid) ->
|
|||
init([]) ->
|
||||
{ok, #state{}}.
|
||||
|
||||
handle_call({sft_new, Filename, KL1, [], 0}, _From, _State) ->
|
||||
handle_call({sft_new, Filename, KL1, [], _LevelR=#level{level=L}},
|
||||
_From,
|
||||
_State) when L == 0 ->
|
||||
{ok, State} = create_levelzero(KL1, Filename),
|
||||
{reply,
|
||||
{{[], []},
|
||||
State#state.smallest_key,
|
||||
State#state.highest_key},
|
||||
State};
|
||||
handle_call({sft_new, Filename, KL1, KL2, Level}, _From, State) ->
|
||||
handle_call({sft_new, Filename, KL1, KL2, LevelR}, _From, _State) ->
|
||||
case create_file(Filename) of
|
||||
{error, Reason} ->
|
||||
{reply, {error, Reason}, State};
|
||||
{Handle, FileMD} ->
|
||||
{ReadHandle, UpdFileMD, KeyRemainders} = complete_file(Handle,
|
||||
FileMD,
|
||||
KL1, KL2,
|
||||
Level),
|
||||
LevelR),
|
||||
{reply, {KeyRemainders,
|
||||
UpdFileMD#state.smallest_key,
|
||||
UpdFileMD#state.highest_key},
|
||||
|
@ -335,7 +344,8 @@ handle_call({set_for_delete, Penciller}, _From, State) ->
|
|||
handle_call(get_maxsqn, _From, State) ->
|
||||
statecheck_onreply(State#state.highest_sqn, State).
|
||||
|
||||
handle_cast({sft_new, Filename, Inp1, [], 0}, _State) ->
|
||||
handle_cast({sft_new, Filename, Inp1, [], _LevelR=#level{level=L}}, _State)
|
||||
when L == 0->
|
||||
SW = os:timestamp(),
|
||||
{ok, State} = create_levelzero(Inp1, Filename),
|
||||
io:format("File creation of L0 file ~s took ~w microseconds~n",
|
||||
|
@ -364,7 +374,12 @@ terminate(Reason, State) ->
|
|||
io:format("Exit called and now clearing ~s~n",
|
||||
[State#state.filename]),
|
||||
ok = file:close(State#state.handle),
|
||||
ok = file:delete(State#state.filename);
|
||||
ok = case filelib:is_file(State#state.filename) of
|
||||
true ->
|
||||
file:delete(State#state.filename);
|
||||
false ->
|
||||
ok
|
||||
end;
|
||||
_ ->
|
||||
case State#state.handle of
|
||||
undefined ->
|
||||
|
@ -407,12 +422,11 @@ create_levelzero(Inp1, Filename) ->
|
|||
InputSize = length(ListForFile),
|
||||
io:format("Creating file with input of size ~w~n", [InputSize]),
|
||||
Rename = {true, TmpFilename, PrmFilename},
|
||||
{ReadHandle, UpdFileMD, {[], []}} = complete_file(Handle,
|
||||
FileMD,
|
||||
ListForFile,
|
||||
[],
|
||||
0,
|
||||
Rename),
|
||||
{ReadHandle,
|
||||
UpdFileMD,
|
||||
{[], []}} = complete_file(Handle, FileMD,
|
||||
ListForFile, [],
|
||||
#level{level=0}, Rename),
|
||||
{ok,
|
||||
UpdFileMD#state{handle=ReadHandle,
|
||||
filename=PrmFilename,
|
||||
|
@ -504,15 +518,15 @@ open_file(FileMD) ->
|
|||
|
||||
%% Take a file handle with a previously created header and complete it based on
|
||||
%% the two key lists KL1 and KL2
|
||||
complete_file(Handle, FileMD, KL1, KL2, Level) ->
|
||||
complete_file(Handle, FileMD, KL1, KL2, Level, false).
|
||||
complete_file(Handle, FileMD, KL1, KL2, LevelR) ->
|
||||
complete_file(Handle, FileMD, KL1, KL2, LevelR, false).
|
||||
|
||||
complete_file(Handle, FileMD, KL1, KL2, Level, Rename) ->
|
||||
complete_file(Handle, FileMD, KL1, KL2, LevelR, Rename) ->
|
||||
{ok, KeyRemainders} = write_keys(Handle,
|
||||
maybe_expand_pointer(KL1),
|
||||
maybe_expand_pointer(KL2),
|
||||
[], <<>>,
|
||||
Level,
|
||||
LevelR,
|
||||
fun sftwrite_function/2),
|
||||
{ReadHandle, UpdFileMD} = case Rename of
|
||||
false ->
|
||||
|
@ -773,18 +787,27 @@ get_nextkeyaftermatch([_KTuple|T], KeyToFind, PrevV) ->
|
|||
%% Slots are created then written in bulk to impove I/O efficiency. Slots will
|
||||
%% be written in groups of 32
|
||||
|
||||
write_keys(Handle, KL1, KL2, SlotIndex, SerialisedSlots, Level, WriteFun) ->
|
||||
write_keys(Handle, KL1, KL2, {0, 0},
|
||||
write_keys(Handle,
|
||||
KL1, KL2,
|
||||
SlotIndex, SerialisedSlots,
|
||||
{infinity, 0}, null, {last, null}, Level, WriteFun).
|
||||
LevelR, WriteFun) ->
|
||||
write_keys(Handle,
|
||||
KL1, KL2,
|
||||
{0, 0},
|
||||
SlotIndex, SerialisedSlots,
|
||||
{infinity, 0}, null, {last, null},
|
||||
LevelR, WriteFun).
|
||||
|
||||
|
||||
write_keys(Handle, KL1, KL2, {SlotCount, SlotTotal},
|
||||
write_keys(Handle,
|
||||
KL1, KL2,
|
||||
{SlotCount, SlotTotal},
|
||||
SlotIndex, SerialisedSlots,
|
||||
{LSN, HSN}, LowKey, LastKey, Level, WriteFun)
|
||||
{LSN, HSN}, LowKey, LastKey,
|
||||
LevelR, WriteFun)
|
||||
when SlotCount =:= ?SLOT_GROUPWRITE_COUNT ->
|
||||
UpdHandle = WriteFun(slots , {Handle, SerialisedSlots}),
|
||||
case maxslots_bylevel(SlotTotal, Level) of
|
||||
case maxslots_bylevel(SlotTotal, LevelR#level.level) of
|
||||
reached ->
|
||||
{complete_keywrite(UpdHandle,
|
||||
SlotIndex,
|
||||
|
@ -792,14 +815,20 @@ write_keys(Handle, KL1, KL2, {SlotCount, SlotTotal},
|
|||
WriteFun),
|
||||
{KL1, KL2}};
|
||||
continue ->
|
||||
write_keys(UpdHandle, KL1, KL2, {0, SlotTotal},
|
||||
write_keys(UpdHandle,
|
||||
KL1, KL2,
|
||||
{0, SlotTotal},
|
||||
SlotIndex, <<>>,
|
||||
{LSN, HSN}, LowKey, LastKey, Level, WriteFun)
|
||||
{LSN, HSN}, LowKey, LastKey,
|
||||
LevelR, WriteFun)
|
||||
end;
|
||||
write_keys(Handle, KL1, KL2, {SlotCount, SlotTotal},
|
||||
write_keys(Handle,
|
||||
KL1, KL2,
|
||||
{SlotCount, SlotTotal},
|
||||
SlotIndex, SerialisedSlots,
|
||||
{LSN, HSN}, LowKey, LastKey, Level, WriteFun) ->
|
||||
SlotOutput = create_slot(KL1, KL2, Level),
|
||||
{LSN, HSN}, LowKey, LastKey,
|
||||
LevelR, WriteFun) ->
|
||||
SlotOutput = create_slot(KL1, KL2, LevelR),
|
||||
{{LowKey_Slot, SegFilter, SerialisedSlot, LengthList},
|
||||
{{LSN_Slot, HSN_Slot}, LastKey_Slot, Status},
|
||||
KL1rem, KL2rem} = SlotOutput,
|
||||
|
@ -818,9 +847,12 @@ write_keys(Handle, KL1, KL2, {SlotCount, SlotTotal},
|
|||
WriteFun),
|
||||
{KL1rem, KL2rem}};
|
||||
full ->
|
||||
write_keys(Handle, KL1rem, KL2rem, {SlotCount + 1, SlotTotal + 1},
|
||||
write_keys(Handle,
|
||||
KL1rem, KL2rem,
|
||||
{SlotCount + 1, SlotTotal + 1},
|
||||
UpdSlotIndex, UpdSlots,
|
||||
SNExtremes, FirstKey, FinalKey, Level, WriteFun);
|
||||
SNExtremes, FirstKey, FinalKey,
|
||||
LevelR, WriteFun);
|
||||
complete ->
|
||||
UpdHandle = WriteFun(slots , {Handle, UpdSlots}),
|
||||
{complete_keywrite(UpdHandle,
|
||||
|
@ -929,11 +961,11 @@ maxslots_bylevel(SlotTotal, _Level) ->
|
|||
%% Also this should return a partial block if the KeyLists have been exhausted
|
||||
%% but the block is full
|
||||
|
||||
create_block(KeyList1, KeyList2, Level) ->
|
||||
create_block(KeyList1, KeyList2, [], {infinity, 0}, [], Level).
|
||||
create_block(KeyList1, KeyList2, LevelR) ->
|
||||
create_block(KeyList1, KeyList2, [], {infinity, 0}, [], LevelR).
|
||||
|
||||
create_block(KeyList1, KeyList2,
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, _)
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, _LevelR)
|
||||
when length(BlockKeyList)==?BLOCK_SIZE ->
|
||||
case {KeyList1, KeyList2} of
|
||||
{[], []} ->
|
||||
|
@ -942,11 +974,13 @@ create_block(KeyList1, KeyList2,
|
|||
{BlockKeyList, full, {LSN, HSN}, SegmentList, KeyList1, KeyList2}
|
||||
end;
|
||||
create_block([], [],
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, _) ->
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, _LevelR) ->
|
||||
{BlockKeyList, partial, {LSN, HSN}, SegmentList, [], []};
|
||||
create_block(KeyList1, KeyList2,
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, Level) ->
|
||||
case key_dominates(KeyList1, KeyList2, Level) of
|
||||
BlockKeyList, {LSN, HSN}, SegmentList, LevelR) ->
|
||||
case key_dominates(KeyList1,
|
||||
KeyList2,
|
||||
{LevelR#level.is_basement, LevelR#level.timestamp}) of
|
||||
{{next_key, TopKey}, Rem1, Rem2} ->
|
||||
{UpdLSN, UpdHSN} = update_sequencenumbers(TopKey, LSN, HSN),
|
||||
NewBlockKeyList = lists:append(BlockKeyList,
|
||||
|
@ -955,11 +989,11 @@ create_block(KeyList1, KeyList2,
|
|||
[hash_for_segmentid(TopKey)]),
|
||||
create_block(Rem1, Rem2,
|
||||
NewBlockKeyList, {UpdLSN, UpdHSN},
|
||||
NewSegmentList, Level);
|
||||
NewSegmentList, LevelR);
|
||||
{skipped_key, Rem1, Rem2} ->
|
||||
create_block(Rem1, Rem2,
|
||||
BlockKeyList, {LSN, HSN},
|
||||
SegmentList, Level)
|
||||
SegmentList, LevelR)
|
||||
end.
|
||||
|
||||
|
||||
|
@ -998,11 +1032,11 @@ create_slot(KL1, KL2, _, _, SegLists, SerialisedSlot, LengthList,
|
|||
{{LowKey, generate_segment_filter(SegLists), SerialisedSlot, LengthList},
|
||||
{{LSN, HSN}, LastKey, partial},
|
||||
KL1, KL2};
|
||||
create_slot(KL1, KL2, Level, BlockCount, SegLists, SerialisedSlot, LengthList,
|
||||
create_slot(KL1, KL2, LevelR, BlockCount, SegLists, SerialisedSlot, LengthList,
|
||||
{LowKey, LSN, HSN, LastKey, _Status}) ->
|
||||
{BlockKeyList, Status,
|
||||
{LSNb, HSNb},
|
||||
SegmentList, KL1b, KL2b} = create_block(KL1, KL2, Level),
|
||||
SegmentList, KL1b, KL2b} = create_block(KL1, KL2, LevelR),
|
||||
TrackingMetadata = case LowKey of
|
||||
null ->
|
||||
[NewLowKeyV|_] = BlockKeyList,
|
||||
|
@ -1021,7 +1055,7 @@ create_slot(KL1, KL2, Level, BlockCount, SegLists, SerialisedSlot, LengthList,
|
|||
SerialisedBlock = serialise_block(BlockKeyList),
|
||||
BlockLength = byte_size(SerialisedBlock),
|
||||
SerialisedSlot2 = <<SerialisedSlot/binary, SerialisedBlock/binary>>,
|
||||
create_slot(KL1b, KL2b, Level, BlockCount - 1, SegLists ++ [SegmentList],
|
||||
create_slot(KL1b, KL2b, LevelR, BlockCount - 1, SegLists ++ [SegmentList],
|
||||
SerialisedSlot2, LengthList ++ [BlockLength],
|
||||
TrackingMetadata).
|
||||
|
||||
|
@ -1416,7 +1450,7 @@ simple_create_block_test() ->
|
|||
KeyList2 = [{{o, "Bucket1", "Key2", null}, {3, {active, infinity}, null}}],
|
||||
{MergedKeyList, ListStatus, SN, _, _, _} = create_block(KeyList1,
|
||||
KeyList2,
|
||||
1),
|
||||
#level{level=1}),
|
||||
?assertMatch(partial, ListStatus),
|
||||
[H1|T1] = MergedKeyList,
|
||||
?assertMatch(H1, {{o, "Bucket1", "Key1", null}, {1, {active, infinity}, null}}),
|
||||
|
@ -1431,7 +1465,7 @@ dominate_create_block_test() ->
|
|||
KeyList2 = [{{o, "Bucket1", "Key2", null}, {3, {tomb, infinity}, null}}],
|
||||
{MergedKeyList, ListStatus, SN, _, _, _} = create_block(KeyList1,
|
||||
KeyList2,
|
||||
1),
|
||||
#level{level=1}),
|
||||
?assertMatch(partial, ListStatus),
|
||||
[K1, K2] = MergedKeyList,
|
||||
?assertMatch(K1, {{o, "Bucket1", "Key1", null}, {1, {active, infinity}, null}}),
|
||||
|
@ -1477,7 +1511,7 @@ alternating_create_block_test() ->
|
|||
{KeyList1, KeyList2} = sample_keylist(),
|
||||
{MergedKeyList, ListStatus, _, _, _, _} = create_block(KeyList1,
|
||||
KeyList2,
|
||||
1),
|
||||
#level{level=1}),
|
||||
BlockSize = length(MergedKeyList),
|
||||
?assertMatch(BlockSize, 32),
|
||||
?assertMatch(ListStatus, complete),
|
||||
|
@ -1488,7 +1522,9 @@ alternating_create_block_test() ->
|
|||
K32 = lists:nth(32, MergedKeyList),
|
||||
?assertMatch(K32, {{o, "Bucket4", "Key1", null}, {1, {active, infinity}, null}}),
|
||||
HKey = {{o, "Bucket1", "Key0", null}, {1, {active, infinity}, null}},
|
||||
{_, ListStatus2, _, _, _, _} = create_block([HKey|KeyList1], KeyList2, 1),
|
||||
{_, ListStatus2, _, _, _, _} = create_block([HKey|KeyList1],
|
||||
KeyList2,
|
||||
#level{level=1}),
|
||||
?assertMatch(ListStatus2, full).
|
||||
|
||||
|
||||
|
@ -1583,7 +1619,7 @@ merge_seglists_test() ->
|
|||
|
||||
createslot_stage1_test() ->
|
||||
{KeyList1, KeyList2} = sample_keylist(),
|
||||
Out = create_slot(KeyList1, KeyList2, 1),
|
||||
Out = create_slot(KeyList1, KeyList2, #level{level=1}),
|
||||
{{LowKey, SegFilter, _SerialisedSlot, _LengthList},
|
||||
{{LSN, HSN}, LastKey, Status},
|
||||
KL1, KL2} = Out,
|
||||
|
@ -1606,7 +1642,7 @@ createslot_stage1_test() ->
|
|||
createslot_stage2_test() ->
|
||||
Out = create_slot(lists:sort(generate_randomkeys(100)),
|
||||
lists:sort(generate_randomkeys(100)),
|
||||
1),
|
||||
#level{level=1}),
|
||||
{{_LowKey, _SegFilter, SerialisedSlot, LengthList},
|
||||
{{_LSN, _HSN}, _LastKey, Status},
|
||||
_KL1, _KL2} = Out,
|
||||
|
@ -1619,7 +1655,7 @@ createslot_stage2_test() ->
|
|||
createslot_stage3_test() ->
|
||||
Out = create_slot(lists:sort(generate_sequentialkeys(100, 1)),
|
||||
lists:sort(generate_sequentialkeys(100, 101)),
|
||||
1),
|
||||
#level{level=1}),
|
||||
{{LowKey, SegFilter, SerialisedSlot, LengthList},
|
||||
{{_LSN, _HSN}, LastKey, Status},
|
||||
KL1, KL2} = Out,
|
||||
|
@ -1662,7 +1698,10 @@ testwrite_function(finalise, {Handle, C_SlotIndex, SNExtremes, KeyExtremes}) ->
|
|||
|
||||
writekeys_stage1_test() ->
|
||||
{KL1, KL2} = sample_keylist(),
|
||||
{FunOut, {_KL1Rem, _KL2Rem}} = write_keys([], KL1, KL2, [], <<>>, 1,
|
||||
{FunOut, {_KL1Rem, _KL2Rem}} = write_keys([],
|
||||
KL1, KL2,
|
||||
[], <<>>,
|
||||
#level{level=1},
|
||||
fun testwrite_function/2),
|
||||
{Handle, {_, PointerIndex}, SNExtremes, KeyExtremes} = FunOut,
|
||||
?assertMatch(SNExtremes, {1,3}),
|
||||
|
@ -1685,7 +1724,9 @@ initial_create_file_test() ->
|
|||
Filename = "../test/test1.sft",
|
||||
{KL1, KL2} = sample_keylist(),
|
||||
{Handle, FileMD} = create_file(Filename),
|
||||
{UpdHandle, UpdFileMD, {[], []}} = complete_file(Handle, FileMD, KL1, KL2, 1),
|
||||
{UpdHandle, UpdFileMD, {[], []}} = complete_file(Handle, FileMD,
|
||||
KL1, KL2,
|
||||
#level{level=1}),
|
||||
Result1 = fetch_keyvalue(UpdHandle, UpdFileMD, {o, "Bucket1", "Key8", null}),
|
||||
io:format("Result is ~w~n", [Result1]),
|
||||
?assertMatch(Result1, {{o, "Bucket1", "Key8", null},
|
||||
|
@ -1703,7 +1744,8 @@ big_create_file_test() ->
|
|||
{InitHandle, InitFileMD} = create_file(Filename),
|
||||
{Handle, FileMD, {_KL1Rem, _KL2Rem}} = complete_file(InitHandle,
|
||||
InitFileMD,
|
||||
KL1, KL2, 1),
|
||||
KL1, KL2,
|
||||
#level{level=1}),
|
||||
[{K1, {Sq1, St1, V1}}|_] = KL1,
|
||||
[{K2, {Sq2, St2, V2}}|_] = KL2,
|
||||
Result1 = fetch_keyvalue(Handle, FileMD, K1),
|
||||
|
@ -1736,11 +1778,9 @@ initial_iterator_test() ->
|
|||
Filename = "../test/test2.sft",
|
||||
{KL1, KL2} = sample_keylist(),
|
||||
{Handle, FileMD} = create_file(Filename),
|
||||
{UpdHandle, UpdFileMD, {[], []}} = complete_file(Handle,
|
||||
FileMD,
|
||||
KL1,
|
||||
KL2,
|
||||
1),
|
||||
{UpdHandle, UpdFileMD, {[], []}} = complete_file(Handle, FileMD,
|
||||
KL1, KL2,
|
||||
#level{level=1}),
|
||||
Result1 = fetch_range_keysonly(UpdHandle, UpdFileMD,
|
||||
{o, "Bucket1", "Key8", null},
|
||||
{o, "Bucket1", "Key9d", null}),
|
||||
|
@ -1790,54 +1830,54 @@ key_dominates_test() ->
|
|||
KL1 = [KV1, KV2],
|
||||
KL2 = [KV3, KV4],
|
||||
?assertMatch({{next_key, KV1}, [KV2], KL2},
|
||||
key_dominates(KL1, KL2, 1)),
|
||||
key_dominates(KL1, KL2, {undefined, 1})),
|
||||
?assertMatch({{next_key, KV1}, KL2, [KV2]},
|
||||
key_dominates(KL2, KL1, 1)),
|
||||
key_dominates(KL2, KL1, {undefined, 1})),
|
||||
?assertMatch({skipped_key, KL2, KL1},
|
||||
key_dominates([KV5|KL2], KL1, 1)),
|
||||
key_dominates([KV5|KL2], KL1, {undefined, 1})),
|
||||
?assertMatch({{next_key, KV1}, [KV2], []},
|
||||
key_dominates(KL1, [], 1)),
|
||||
key_dominates(KL1, [], {undefined, 1})),
|
||||
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
|
||||
key_dominates([KV6|KL2], KL1, 1)),
|
||||
key_dominates([KV6|KL2], KL1, {undefined, 1})),
|
||||
?assertMatch({{next_key, KV6}, KL2, [KV2]},
|
||||
key_dominates([KV6|KL2], [KV2], 1)),
|
||||
key_dominates([KV6|KL2], [KV2], {undefined, 1})),
|
||||
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
|
||||
key_dominates([KV6|KL2], KL1, {basement, 1})),
|
||||
key_dominates([KV6|KL2], KL1, {true, 1})),
|
||||
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
|
||||
key_dominates([KV6|KL2], KL1, {basement, 1000})),
|
||||
key_dominates([KV6|KL2], KL1, {true, 1000})),
|
||||
?assertMatch({{next_key, KV6}, KL2, [KV2]},
|
||||
key_dominates([KV6|KL2], [KV2], {basement, 1})),
|
||||
key_dominates([KV6|KL2], [KV2], {true, 1})),
|
||||
?assertMatch({skipped_key, KL2, [KV2]},
|
||||
key_dominates([KV6|KL2], [KV2], {basement, 1000})),
|
||||
key_dominates([KV6|KL2], [KV2], {true, 1000})),
|
||||
?assertMatch({skipped_key, [], []},
|
||||
key_dominates([KV6], [], {basement, 1000})),
|
||||
key_dominates([KV6], [], {true, 1000})),
|
||||
?assertMatch({skipped_key, [], []},
|
||||
key_dominates([], [KV6], {basement, 1000})),
|
||||
key_dominates([], [KV6], {true, 1000})),
|
||||
?assertMatch({{next_key, KV6}, [], []},
|
||||
key_dominates([KV6], [], {basement, 1})),
|
||||
key_dominates([KV6], [], {true, 1})),
|
||||
?assertMatch({{next_key, KV6}, [], []},
|
||||
key_dominates([], [KV6], {basement, 1})),
|
||||
key_dominates([], [KV6], {true, 1})),
|
||||
?assertMatch({skipped_key, [], []},
|
||||
key_dominates([KV7], [], {basement, 1})),
|
||||
key_dominates([KV7], [], {true, 1})),
|
||||
?assertMatch({skipped_key, [], []},
|
||||
key_dominates([], [KV7], {basement, 1})),
|
||||
key_dominates([], [KV7], {true, 1})),
|
||||
?assertMatch({skipped_key, [KV7|KL2], [KV2]},
|
||||
key_dominates([KV7|KL2], KL1, 1)),
|
||||
key_dominates([KV7|KL2], KL1, {undefined, 1})),
|
||||
?assertMatch({{next_key, KV7}, KL2, [KV2]},
|
||||
key_dominates([KV7|KL2], [KV2], 1)),
|
||||
key_dominates([KV7|KL2], [KV2], {undefined, 1})),
|
||||
?assertMatch({skipped_key, [KV7|KL2], [KV2]},
|
||||
key_dominates([KV7|KL2], KL1, {basement, 1})),
|
||||
key_dominates([KV7|KL2], KL1, {true, 1})),
|
||||
?assertMatch({skipped_key, KL2, [KV2]},
|
||||
key_dominates([KV7|KL2], [KV2], {basement, 1})).
|
||||
key_dominates([KV7|KL2], [KV2], {true, 1})).
|
||||
|
||||
|
||||
big_iterator_test() ->
|
||||
Filename = "../test/bigtest1.sft",
|
||||
{KL1, KL2} = {lists:sort(generate_randomkeys(10000)), []},
|
||||
{InitHandle, InitFileMD} = create_file(Filename),
|
||||
{Handle, FileMD, {KL1Rem, KL2Rem}} = complete_file(InitHandle,
|
||||
InitFileMD,
|
||||
KL1, KL2, 1),
|
||||
{Handle, FileMD, {KL1Rem, KL2Rem}} = complete_file(InitHandle, InitFileMD,
|
||||
KL1, KL2,
|
||||
#level{level=1}),
|
||||
io:format("Remainder lengths are ~w and ~w ~n", [length(KL1Rem), length(KL2Rem)]),
|
||||
{complete, Result1} = fetch_range_keysonly(Handle,
|
||||
FileMD,
|
||||
|
|
|
@ -228,6 +228,7 @@ simple_querycount(_Config) ->
|
|||
end,
|
||||
R9),
|
||||
ok = leveled_bookie:book_riakput(Book3, Obj9, Spc9),
|
||||
ok = leveled_bookie:book_close(Book3),
|
||||
{ok, Book4} = leveled_bookie:book_start(RootPath, 2000, 50000000),
|
||||
lists:foreach(fun({IdxF, IdxT, X}) ->
|
||||
R = leveled_bookie:book_returnfolder(Book4,
|
||||
|
|
|
@ -25,9 +25,11 @@
|
|||
load_objects/5]).
|
||||
|
||||
|
||||
|
||||
reset_filestructure() ->
|
||||
io:format("Waiting 2s to give a chance for all file closes to complete~n"),
|
||||
timer:sleep(2000),
|
||||
% io:format("Waiting ~w ms to give a chance for all file closes " ++
|
||||
"to complete~n", [Wait]),
|
||||
% timer:sleep(Wait),
|
||||
RootPath = "test",
|
||||
filelib:ensure_dir(RootPath ++ "/journal/"),
|
||||
filelib:ensure_dir(RootPath ++ "/ledger/"),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue