Registering and releasing of Journal snapshots
Added a test of journal compaction with a registered snapshot and it showed that the deleting of files did not correctly check the list of registerd snapshots. Corrected.
This commit is contained in:
parent
9abc1d643a
commit
479dc3ac80
4 changed files with 26 additions and 8 deletions
|
@ -273,7 +273,7 @@ handle_call({release_snapshot, Snapshot}, _From , State) ->
|
|||
{reply, ok, State#state{registered_snapshots=Rs}};
|
||||
handle_call({confirm_delete, ManSQN}, _From, State) ->
|
||||
Reply = lists:foldl(fun({_R, SnapSQN}, Bool) ->
|
||||
case SnapSQN < ManSQN of
|
||||
case SnapSQN >= ManSQN of
|
||||
true ->
|
||||
Bool;
|
||||
false ->
|
||||
|
|
|
@ -42,8 +42,8 @@
|
|||
{"P0004",
|
||||
{info, "Remaining ledger snapshots are ~w"}},
|
||||
{"P0005",
|
||||
{info, "Delete confirmed as file ~s is removed from " ++ "
|
||||
unreferenced files"}},
|
||||
{info, "Delete confirmed as file ~s is removed from " ++
|
||||
"unreferenced files"}},
|
||||
{"P0006",
|
||||
{info, "Orphaned reply after timeout on L0 file write ~s"}},
|
||||
{"P0007",
|
||||
|
@ -233,7 +233,7 @@ log_timer(LogReference, Subs, StartTime) ->
|
|||
true ->
|
||||
MicroS = timer:now_diff(os:timestamp(), StartTime),
|
||||
{Unit, Time} = case MicroS of
|
||||
MicroS when MicroS < 10000 ->
|
||||
MicroS when MicroS < 1000 ->
|
||||
{"microsec", MicroS};
|
||||
MicroS ->
|
||||
{"ms", MicroS div 1000}
|
||||
|
|
|
@ -364,7 +364,7 @@ handle_cast(close, State) ->
|
|||
handle_info(timeout, State) ->
|
||||
case State#state.ready_for_delete of
|
||||
true ->
|
||||
leveled_log:log("SFT05", [State#state.filename]),
|
||||
leveled_log:log("SFT05", [timeout, State#state.filename]),
|
||||
ok = leveled_penciller:pcl_confirmdelete(State#state.penciller,
|
||||
State#state.filename),
|
||||
{noreply, State, ?DELETE_TIMEOUT};
|
||||
|
|
|
@ -450,6 +450,15 @@ space_clear_ondelete(_Config) ->
|
|||
{ok, FNsA_J} = file:list_dir(RootPath ++ "/journal/journal_files"),
|
||||
io:format("Bookie created ~w journal files and ~w ledger files~n",
|
||||
[length(FNsA_J), length(FNsA_L)]),
|
||||
|
||||
% Get an iterator to lock the inker during compaction
|
||||
FoldObjectsFun = fun(B, K, V, Acc) -> [{B, K, testutil:riak_hash(V)}|Acc]
|
||||
end,
|
||||
{async, HTreeF1} = leveled_bookie:book_returnfolder(Book1,
|
||||
{foldobjects_allkeys,
|
||||
?RIAK_TAG,
|
||||
FoldObjectsFun}),
|
||||
% Delete the keys
|
||||
SW2 = os:timestamp(),
|
||||
lists:foreach(fun({Bucket, Key}) ->
|
||||
ok = leveled_bookie:book_riakdelete(Book1,
|
||||
|
@ -460,6 +469,9 @@ space_clear_ondelete(_Config) ->
|
|||
KL1),
|
||||
io:format("Deletion took ~w microseconds for 80K keys~n",
|
||||
[timer:now_diff(os:timestamp(), SW2)]),
|
||||
|
||||
|
||||
|
||||
ok = leveled_bookie:book_compactjournal(Book1, 30000),
|
||||
F = fun leveled_bookie:book_islastcompactionpending/1,
|
||||
lists:foldl(fun(X, Pending) ->
|
||||
|
@ -474,11 +486,17 @@ space_clear_ondelete(_Config) ->
|
|||
end end,
|
||||
true,
|
||||
lists:seq(1, 15)),
|
||||
io:format("Waiting for journal deletes~n"),
|
||||
io:format("Waiting for journal deletes - blocked~n"),
|
||||
timer:sleep(20000),
|
||||
KeyHashList1 = HTreeF1(),
|
||||
io:format("Key Hash List returned of length ~w~n", [length(KeyHashList1)]),
|
||||
true = length(KeyHashList1) == 80000,
|
||||
io:format("Waiting for journal deletes - unblocked~n"),
|
||||
timer:sleep(20000),
|
||||
{ok, FNsB_L} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
||||
{ok, FNsB_J} = file:list_dir(RootPath ++ "/journal/journal_files"),
|
||||
{ok, FNsB_PC} = file:list_dir(RootPath ++ "/journal/journal_files/post_compact"),
|
||||
{ok, FNsB_PC} = file:list_dir(RootPath
|
||||
++ "/journal/journal_files/post_compact"),
|
||||
PointB_Journals = length(FNsB_J) + length(FNsB_PC),
|
||||
io:format("Bookie has ~w journal files and ~w ledger files " ++
|
||||
"after deletes~n",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue