Merge remote-tracking branch 'refs/remotes/origin/master' into mas-docs2
This commit is contained in:
commit
0e9cc22d3d
3 changed files with 39 additions and 9 deletions
|
@ -168,7 +168,13 @@
|
|||
{info, "Saved manifest file"}},
|
||||
{"PC019",
|
||||
{debug, "After ~s level ~w is ~w"}},
|
||||
|
||||
{"PC020",
|
||||
{warn, "Empty prompt deletions at ManifestSQN=~w"}},
|
||||
{"PC021",
|
||||
{info, "Prompting deletions at ManifestSQN=~w"}},
|
||||
{"PC022",
|
||||
{info, "Storing reference to deletions at ManifestSQN=~w"}},
|
||||
|
||||
{"I0001",
|
||||
{info, "Unexpected failure to fetch value for Key=~w SQN=~w "
|
||||
++ "with reason ~w"}},
|
||||
|
|
|
@ -91,12 +91,13 @@ handle_cast(prompt, State) ->
|
|||
handle_cast({push_work, Work}, State) ->
|
||||
{ManifestSQN, Deletions} = handle_work(Work, State),
|
||||
PDs = dict:store(ManifestSQN, Deletions, State#state.pending_deletions),
|
||||
leveled_log:log("PC022", [ManifestSQN]),
|
||||
{noreply, State#state{pending_deletions = PDs}, ?MAX_TIMEOUT};
|
||||
handle_cast({prompt_deletions, ManifestSQN}, State) ->
|
||||
Deletions = dict:fetch(ManifestSQN, State#state.pending_deletions),
|
||||
{Deletions, UpdD} = return_deletions(ManifestSQN,
|
||||
State#state.pending_deletions),
|
||||
ok = notify_deletions(Deletions, State#state.owner),
|
||||
UpdDeletions = dict:erase(ManifestSQN, State#state.pending_deletions),
|
||||
{noreply, State#state{pending_deletions = UpdDeletions}, ?MIN_TIMEOUT}.
|
||||
{noreply, State#state{pending_deletions = UpdD}, ?MIN_TIMEOUT}.
|
||||
|
||||
handle_info(timeout, State) ->
|
||||
request_work(State),
|
||||
|
@ -223,6 +224,18 @@ do_merge(KL1, KL2, SinkLevel, SinkB, RP, NewSQN, MaxSQN, Additions) ->
|
|||
end.
|
||||
|
||||
|
||||
return_deletions(ManifestSQN, PendingDeletionD) ->
|
||||
% The returning of deletions had been seperated out as a failure to fetch
|
||||
% here had caased crashes of the clerk. The root cause of the failure to
|
||||
% fetch was the same clerk being asked to do the same work twice - and this
|
||||
% should be blocked now by the ongoing_work boolean in the Penciller
|
||||
% LoopData
|
||||
%
|
||||
% So this is now allowed to crash again
|
||||
PendingDeletions = dict:fetch(ManifestSQN, PendingDeletionD),
|
||||
leveled_log:log("PC021", [ManifestSQN]),
|
||||
{PendingDeletions, dict:erase(ManifestSQN, PendingDeletionD)}.
|
||||
|
||||
%%%============================================================================
|
||||
%%% Test
|
||||
%%%============================================================================
|
||||
|
|
|
@ -511,10 +511,19 @@ handle_cast({levelzero_complete, FN, StartKey, EndKey}, State) ->
|
|||
manifest=UpdMan,
|
||||
persisted_sqn=State#state.ledger_sqn}};
|
||||
handle_cast(work_for_clerk, State) ->
|
||||
case State#state.levelzero_pending of
|
||||
true ->
|
||||
{noreply, State};
|
||||
false ->
|
||||
case {State#state.levelzero_pending, State#state.work_ongoing} of
|
||||
{false, false} ->
|
||||
% TODO - as part of supervision tree and retry work:
|
||||
% Need to check for work_ongoing as well as levelzero_pending as
|
||||
% there may be a race that could lead to the clerk doing the same
|
||||
% thing twice.
|
||||
%
|
||||
% This has implications though if we auto-restart the pclerk in the
|
||||
% future, without altering this state - it may never be able to
|
||||
% request work due to ongoing work that crashed the previous clerk
|
||||
%
|
||||
% Perhaps the pclerk should not be restarted because of this, and
|
||||
% the failure should ripple up
|
||||
{WL, WC} = leveled_pmanifest:check_for_work(State#state.manifest,
|
||||
?LEVEL_SCALEFACTOR),
|
||||
case WC of
|
||||
|
@ -534,7 +543,9 @@ handle_cast(work_for_clerk, State) ->
|
|||
{TL, State#state.manifest}),
|
||||
{noreply,
|
||||
State#state{work_backlog=false, work_ongoing=true}}
|
||||
end
|
||||
end;
|
||||
_ ->
|
||||
{noreply, State}
|
||||
end.
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue