Record time to calculate compation run

As this is a factor in assessing suitability of Journal size
This commit is contained in:
Martin Sumner 2018-09-26 10:19:24 +01:00
parent 8d8a0a3f5b
commit 3df481941f
3 changed files with 9 additions and 3 deletions

View file

@ -21,7 +21,7 @@ If there are smaller objects then lookups within a Journal may get faster if eac
- excessive CPU use and related performance impacts during rolling of CDB files, see log `CDB07`; - excessive CPU use and related performance impacts during rolling of CDB files, see log `CDB07`;
- excessive load caused during journal compaction despite tuning down `max_run_length`. - excessive load caused during journal compaction despite tuning down `max_run_length`.
If the store is used to hold bigger objects, the `max_journalsize` may be scaled up accordingly. Having fewer Journal files (by using larger objects), will reduce the lookup time to find the right Journal during GET requests, but in most circumstances the impact of this improvement is marginal. If the store is used to hold bigger objects, the `max_journalsize` may be scaled up accordingly. Having fewer Journal files (by using larger objects), will reduce the lookup time to find the right Journal during GET requests, but in most circumstances the impact of this improvement is marginal. The primary impact of fewer Journal files is the decision-making time of Journal compaction (the time to calculate if a compaction should be undertaken, then what should be compacted) will increase. The timing for making compaction calculations can be monitored through log `IC003`.
## Ledger Cache Size ## Ledger Cache Size

View file

@ -217,6 +217,10 @@ handle_cast({compact, Checker, InitiateFun, CloseFun, FilterFun, Inker, _TO},
State) -> State) ->
% Empty the waste folder % Empty the waste folder
clear_waste(State), clear_waste(State),
SW = os:timestamp(),
% Clock to record the time it takes to calculate the potential for
% compaction
% Need to fetch manifest at start rather than have it be passed in % Need to fetch manifest at start rather than have it be passed in
% Don't want to process a queued call waiting on an old manifest % Don't want to process a queued call waiting on an old manifest
[_Active|Manifest] = leveled_inker:ink_getmanifest(Inker), [_Active|Manifest] = leveled_inker:ink_getmanifest(Inker),
@ -233,6 +237,7 @@ handle_cast({compact, Checker, InitiateFun, CloseFun, FilterFun, Inker, _TO},
case score_run(BestRun0, ScoreParams) of case score_run(BestRun0, ScoreParams) of
Score when Score > 0.0 -> Score when Score > 0.0 ->
BestRun1 = sort_run(BestRun0), BestRun1 = sort_run(BestRun0),
leveled_log:log_timer("IC003", [Score, true], SW),
print_compaction_run(BestRun1, ScoreParams), print_compaction_run(BestRun1, ScoreParams),
ManifestSlice = compact_files(BestRun1, ManifestSlice = compact_files(BestRun1,
CDBopts, CDBopts,
@ -258,7 +263,7 @@ handle_cast({compact, Checker, InitiateFun, CloseFun, FilterFun, Inker, _TO},
{noreply, State} {noreply, State}
end; end;
Score -> Score ->
leveled_log:log("IC003", [Score]), leveled_log:log_timer("IC003", [Score, false], SW),
ok = leveled_inker:ink_compactioncomplete(Inker), ok = leveled_inker:ink_compactioncomplete(Inker),
ok = CloseFun(FilterServer), ok = CloseFun(FilterServer),
{noreply, State} {noreply, State}

View file

@ -296,7 +296,8 @@
{"IC002", {"IC002",
{info, "Clerk updating Inker as compaction complete of ~w files"}}, {info, "Clerk updating Inker as compaction complete of ~w files"}},
{"IC003", {"IC003",
{info, "No compaction run as highest score=~w"}}, {info, "Scoring of compaction runs complete with highest score=~w "
++ "and run to be made=~w"}},
{"IC004", {"IC004",
{info, "Score for filename ~s is ~w"}}, {info, "Score for filename ~s is ~w"}},
{"IC005", {"IC005",