Rationalise logging in commit
Also: Sort the output from an 'all' fetch one loop at a time Make sure the test of scoring na empty file is scoring an empty file If it is an emtpy file we want to compact the fragment away - in which case it should score 0.0 not 100.0
This commit is contained in:
parent
60e29f2ff0
commit
6b3328f4a3
3 changed files with 45 additions and 47 deletions
|
@ -267,13 +267,12 @@ cdb_getpositions(Pid, SampleSize) ->
|
||||||
% requests waiting for this to complete, loop over each of the 256 indexes
|
% requests waiting for this to complete, loop over each of the 256 indexes
|
||||||
% outside of the FSM processing loop - to allow for other messages to be
|
% outside of the FSM processing loop - to allow for other messages to be
|
||||||
% interleaved
|
% interleaved
|
||||||
SW = os:timestamp(),
|
|
||||||
PosList =
|
|
||||||
case SampleSize of
|
case SampleSize of
|
||||||
all ->
|
all ->
|
||||||
FoldFun =
|
FoldFun =
|
||||||
fun(Index, Acc) ->
|
fun(Index, Acc) ->
|
||||||
cdb_getpositions_fromidx(Pid, all, Index, Acc)
|
PosList = cdb_getpositions_fromidx(Pid, all, Index, []),
|
||||||
|
lists:merge(Acc, lists:sort(PosList))
|
||||||
end,
|
end,
|
||||||
IdxList = lists:seq(0, 255),
|
IdxList = lists:seq(0, 255),
|
||||||
lists:foldl(FoldFun, [], IdxList);
|
lists:foldl(FoldFun, [], IdxList);
|
||||||
|
@ -294,9 +293,7 @@ cdb_getpositions(Pid, SampleSize) ->
|
||||||
PosList0 = lists:foldl(FoldFun, [], SortedL),
|
PosList0 = lists:foldl(FoldFun, [], SortedL),
|
||||||
P1 = leveled_rand:uniform(max(1, length(PosList0) - S0)),
|
P1 = leveled_rand:uniform(max(1, length(PosList0) - S0)),
|
||||||
lists:sublist(lists:sort(PosList0), P1, S0)
|
lists:sublist(lists:sort(PosList0), P1, S0)
|
||||||
end,
|
end.
|
||||||
leveled_log:log_timer("CDB22", [length(PosList)], SW),
|
|
||||||
PosList.
|
|
||||||
|
|
||||||
cdb_getpositions_fromidx(Pid, SampleSize, Index, Acc) ->
|
cdb_getpositions_fromidx(Pid, SampleSize, Index, Acc) ->
|
||||||
gen_fsm:sync_send_event(Pid,
|
gen_fsm:sync_send_event(Pid,
|
||||||
|
@ -1234,10 +1231,9 @@ scan_index_returnpositions(Handle, Position, Count, PosList0) ->
|
||||||
[HPosition|PosList]
|
[HPosition|PosList]
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
PosList = lists:foldl(AddPosFun,
|
lists:foldl(AddPosFun,
|
||||||
PosList0,
|
PosList0,
|
||||||
read_next_n_integerpairs(Handle, Count)),
|
read_next_n_integerpairs(Handle, Count)).
|
||||||
lists:reverse(PosList).
|
|
||||||
|
|
||||||
|
|
||||||
%% Take an active file and write the hash details necessary to close that
|
%% Take an active file and write the hash details necessary to close that
|
||||||
|
|
|
@ -507,17 +507,22 @@ schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) ->
|
||||||
%% calls.
|
%% calls.
|
||||||
check_single_file(CDB, FilterFun, FilterServer, MaxSQN, SampleSize, BatchSize) ->
|
check_single_file(CDB, FilterFun, FilterServer, MaxSQN, SampleSize, BatchSize) ->
|
||||||
FN = leveled_cdb:cdb_filename(CDB),
|
FN = leveled_cdb:cdb_filename(CDB),
|
||||||
|
SW = os:timestamp(),
|
||||||
PositionList = leveled_cdb:cdb_getpositions(CDB, SampleSize),
|
PositionList = leveled_cdb:cdb_getpositions(CDB, SampleSize),
|
||||||
AvgJump =
|
|
||||||
(lists:last(PositionList) - lists:nth(1, PositionList))
|
|
||||||
div length(PositionList),
|
|
||||||
leveled_log:log("IC014", [AvgJump]),
|
|
||||||
KeySizeList = fetch_inbatches(PositionList, BatchSize, CDB, []),
|
KeySizeList = fetch_inbatches(PositionList, BatchSize, CDB, []),
|
||||||
Score =
|
Score =
|
||||||
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN),
|
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN),
|
||||||
leveled_log:log("IC004", [FN, Score]),
|
safely_log_filescore(PositionList, FN, Score, SW),
|
||||||
Score.
|
Score.
|
||||||
|
|
||||||
|
safely_log_filescore([], FN, Score, SW) ->
|
||||||
|
leveled_log:log_timer("IC004", [Score, empty, FN], SW);
|
||||||
|
safely_log_filescore(PositionList, FN, Score, SW) ->
|
||||||
|
AvgJump =
|
||||||
|
(lists:last(PositionList) - lists:nth(1, PositionList))
|
||||||
|
div length(PositionList),
|
||||||
|
leveled_log:log_timer("IC004", [Score, AvgJump, FN], SW).
|
||||||
|
|
||||||
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN) ->
|
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN) ->
|
||||||
FoldFunForSizeCompare =
|
FoldFunForSizeCompare =
|
||||||
fun(KS, {ActSize, RplSize}) ->
|
fun(KS, {ActSize, RplSize}) ->
|
||||||
|
@ -552,7 +557,7 @@ size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN) ->
|
||||||
{ActiveSize, ReplacedSize} = R0,
|
{ActiveSize, ReplacedSize} = R0,
|
||||||
case ActiveSize + ReplacedSize of
|
case ActiveSize + ReplacedSize of
|
||||||
0 ->
|
0 ->
|
||||||
100.0;
|
0.0;
|
||||||
_ ->
|
_ ->
|
||||||
100 * ActiveSize / (ActiveSize + ReplacedSize)
|
100 * ActiveSize / (ActiveSize + ReplacedSize)
|
||||||
end.
|
end.
|
||||||
|
@ -1117,7 +1122,6 @@ compact_empty_file_test() ->
|
||||||
FN1 = leveled_inker:filepath(RP, 1, new_journal),
|
FN1 = leveled_inker:filepath(RP, 1, new_journal),
|
||||||
CDBopts = #cdb_options{binary_mode=true},
|
CDBopts = #cdb_options{binary_mode=true},
|
||||||
{ok, CDB1} = leveled_cdb:cdb_open_writer(FN1, CDBopts),
|
{ok, CDB1} = leveled_cdb:cdb_open_writer(FN1, CDBopts),
|
||||||
ok = leveled_cdb:cdb_put(CDB1, {1, stnd, test_ledgerkey("Key1")}, <<>>),
|
|
||||||
{ok, FN2} = leveled_cdb:cdb_complete(CDB1),
|
{ok, FN2} = leveled_cdb:cdb_complete(CDB1),
|
||||||
{ok, CDB2} = leveled_cdb:cdb_open_reader(FN2),
|
{ok, CDB2} = leveled_cdb:cdb_open_reader(FN2),
|
||||||
LedgerSrv1 = [{8, {o, "Bucket", "Key1", null}},
|
LedgerSrv1 = [{8, {o, "Bucket", "Key1", null}},
|
||||||
|
@ -1125,7 +1129,7 @@ compact_empty_file_test() ->
|
||||||
{3, {o, "Bucket", "Key3", null}}],
|
{3, {o, "Bucket", "Key3", null}}],
|
||||||
LedgerFun1 = fun(_Srv, _Key, _ObjSQN) -> false end,
|
LedgerFun1 = fun(_Srv, _Key, _ObjSQN) -> false end,
|
||||||
Score1 = check_single_file(CDB2, LedgerFun1, LedgerSrv1, 9, 8, 4),
|
Score1 = check_single_file(CDB2, LedgerFun1, LedgerSrv1, 9, 8, 4),
|
||||||
?assertMatch(100.0, Score1),
|
?assertMatch(0.0, Score1),
|
||||||
ok = leveled_cdb:cdb_deletepending(CDB2),
|
ok = leveled_cdb:cdb_deletepending(CDB2),
|
||||||
ok = leveled_cdb:cdb_destroy(CDB2).
|
ok = leveled_cdb:cdb_destroy(CDB2).
|
||||||
|
|
||||||
|
|
|
@ -336,7 +336,7 @@
|
||||||
{info, "Scoring of compaction runs complete with highest score=~w "
|
{info, "Scoring of compaction runs complete with highest score=~w "
|
||||||
++ "with run of run_length=~w"}},
|
++ "with run of run_length=~w"}},
|
||||||
{"IC004",
|
{"IC004",
|
||||||
{info, "Score for filename ~s is ~w"}},
|
{info, "Score=~w with mean_byte_jump=~w for filename ~s"}},
|
||||||
{"IC005",
|
{"IC005",
|
||||||
{info, "Compaction to be performed on ~w files with score of ~w"}},
|
{info, "Compaction to be performed on ~w files with score of ~w"}},
|
||||||
{"IC006",
|
{"IC006",
|
||||||
|
@ -406,9 +406,7 @@
|
||||||
{"CDB20",
|
{"CDB20",
|
||||||
{warn, "Error ~w caught when safe reading a file to length ~w"}},
|
{warn, "Error ~w caught when safe reading a file to length ~w"}},
|
||||||
{"CDB21",
|
{"CDB21",
|
||||||
{warn, "File ~s to be deleted but already gone"}},
|
{warn, "File ~s to be deleted but already gone"}}
|
||||||
{"CDB22",
|
|
||||||
{info, "Positions ~w fetch"}}
|
|
||||||
|
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue