Merge pull request #137 from martinsumner/mas-i11-backenddelete
Mas i11 backenddelete
This commit is contained in:
commit
d8dd952cba
3 changed files with 91 additions and 9 deletions
|
@ -478,7 +478,7 @@ foldobjects(SnapFun, Tag, KeyRanges, FoldObjFun, DeferredFetch, SegmentList) ->
|
|||
% initial accumulator
|
||||
FoldObjFun;
|
||||
false ->
|
||||
% no initial accumulatr passed, and so should be just a list
|
||||
% no initial accumulator passed, and so should be just a list
|
||||
{FoldObjFun, []}
|
||||
end,
|
||||
|
||||
|
|
|
@ -1259,7 +1259,12 @@ generate_binary_slot(Lookup, KVL, PressMethod, BuildTimings0) ->
|
|||
|
||||
check_blocks([], _Handle, _StartPos, _BlockLengths, _PosBinLength,
|
||||
_LedgerKeyToCheck, _PressMethod, Acc) ->
|
||||
Acc;
|
||||
case is_list(Acc) of
|
||||
true ->
|
||||
lists:reverse(Acc);
|
||||
false ->
|
||||
Acc
|
||||
end;
|
||||
check_blocks([Pos|Rest], Handle, StartPos, BlockLengths, PosBinLength,
|
||||
LedgerKeyToCheck, PressMethod, Acc) ->
|
||||
{BlockNumber, BlockPos} = revert_position(Pos),
|
||||
|
@ -1277,7 +1282,10 @@ check_blocks([Pos|Rest], Handle, StartPos, BlockLengths, PosBinLength,
|
|||
_ ->
|
||||
case LedgerKeyToCheck of
|
||||
false ->
|
||||
Acc ++ [{K, V}];
|
||||
check_blocks(Rest, Handle, StartPos,
|
||||
BlockLengths, PosBinLength,
|
||||
LedgerKeyToCheck, PressMethod,
|
||||
[{K, V}|Acc]);
|
||||
_ ->
|
||||
check_blocks(Rest, Handle, StartPos,
|
||||
BlockLengths, PosBinLength,
|
||||
|
@ -1629,7 +1637,7 @@ tune_hash(SegHash) ->
|
|||
tune_seglist(SegList) ->
|
||||
case is_list(SegList) of
|
||||
true ->
|
||||
lists:map(fun tune_hash/1, SegList);
|
||||
lists:usort(lists:map(fun tune_hash/1, SegList));
|
||||
false ->
|
||||
SegList
|
||||
end.
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
all() -> [
|
||||
crossbucket_aae,
|
||||
handoff,
|
||||
dollar_bucket_index,
|
||||
dollar_key_index
|
||||
dollar_bucket_index,
|
||||
dollar_key_index
|
||||
].
|
||||
|
||||
-define(MAGIC, 53). % riak_kv -> riak_object
|
||||
|
@ -61,8 +61,81 @@ crossbucket_aae(_Config) ->
|
|||
%% Check all the objects are found - used to trigger HEAD performance log
|
||||
ok = testutil:checkhead_forlist(Bookie2, lists:nth(1, CLs)),
|
||||
|
||||
% This part of the test tests an issue with accelerating folds by segment
|
||||
% list, when there is more than one key with a matching segment in the
|
||||
% slot. Previously this was not handled correctly - and this test part
|
||||
% of the test detects this, by finding slices of keys which are probably
|
||||
% in the same slot
|
||||
SW0 = os:timestamp(),
|
||||
SliceSize = 20,
|
||||
|
||||
CL1 = lists:sublist(lists:nth(1, CLs), 100, SliceSize),
|
||||
CL2 = lists:sublist(lists:nth(2, CLs), 100, SliceSize),
|
||||
CL3 = lists:sublist(lists:nth(3, CLs), 100, SliceSize),
|
||||
CL4 = lists:sublist(lists:nth(4, CLs), 100, SliceSize),
|
||||
|
||||
SegMapFun =
|
||||
fun({_RN, RiakObject, _Spc}) ->
|
||||
B = RiakObject#r_object.bucket,
|
||||
K = RiakObject#r_object.key,
|
||||
leveled_tictac:keyto_segment32(<<B/binary, K/binary>>)
|
||||
end,
|
||||
BKMapFun =
|
||||
fun({_RN, RiakObject, _Spc}) ->
|
||||
B = RiakObject#r_object.bucket,
|
||||
K = RiakObject#r_object.key,
|
||||
{B, K}
|
||||
end,
|
||||
|
||||
SL1 = lists:map(SegMapFun, CL1),
|
||||
SL2 = lists:map(SegMapFun, CL2),
|
||||
SL3 = lists:map(SegMapFun, CL3),
|
||||
SL4 = lists:map(SegMapFun, CL4),
|
||||
|
||||
BK1 = lists:map(BKMapFun, CL1),
|
||||
BK2 = lists:map(BKMapFun, CL2),
|
||||
BK3 = lists:map(BKMapFun, CL3),
|
||||
BK4 = lists:map(BKMapFun, CL4),
|
||||
|
||||
HeadSegmentFolderGen =
|
||||
fun(SegL, BKL) ->
|
||||
{foldheads_allkeys,
|
||||
?RIAK_TAG,
|
||||
{fun(B, K, _PO, Acc) ->
|
||||
case lists:member({B, K}, BKL) of
|
||||
true ->
|
||||
Acc + 1;
|
||||
false ->
|
||||
Acc
|
||||
end
|
||||
end, 0},
|
||||
false, true, SegL}
|
||||
end,
|
||||
|
||||
{async, SL1Folder} =
|
||||
leveled_bookie:book_returnfolder(Bookie2,
|
||||
HeadSegmentFolderGen(SL1, BK1)),
|
||||
{async, SL2Folder} =
|
||||
leveled_bookie:book_returnfolder(Bookie2,
|
||||
HeadSegmentFolderGen(SL2, BK2)),
|
||||
{async, SL3Folder} =
|
||||
leveled_bookie:book_returnfolder(Bookie2,
|
||||
HeadSegmentFolderGen(SL3, BK3)),
|
||||
{async, SL4Folder} =
|
||||
leveled_bookie:book_returnfolder(Bookie2,
|
||||
HeadSegmentFolderGen(SL4, BK4)),
|
||||
|
||||
Results = [SL1Folder(), SL2Folder(), SL3Folder(), SL4Folder()],
|
||||
io:format("SegList folders returned results of ~w " ++
|
||||
"for SliceSize ~w in ~w ms~n",
|
||||
[Results, SliceSize,
|
||||
timer:now_diff(os:timestamp(), SW0)/1000]),
|
||||
lists:foreach(fun(R) -> true = R == SliceSize end, Results),
|
||||
|
||||
% Start a new store, and load the same objects (except fot the original
|
||||
% test object) into this store
|
||||
%
|
||||
% This is now the comparison part of the test
|
||||
|
||||
StartOpts3 = [{root_path, RootPathB},
|
||||
{max_journalsize, 200000000},
|
||||
|
@ -74,12 +147,12 @@ crossbucket_aae(_Config) ->
|
|||
test_singledelta_stores(Bookie2, Bookie3, medium, {B1, K1}),
|
||||
test_singledelta_stores(Bookie2, Bookie3, xsmall, {B1, K1}),
|
||||
test_singledelta_stores(Bookie2, Bookie3, xxsmall, {B1, K1}),
|
||||
|
||||
% Test with a newly opend book (i.e with no blovk indexes cached)
|
||||
|
||||
% Test with a newly opend book (i.e with no block indexes cached)
|
||||
ok = leveled_bookie:book_close(Bookie2),
|
||||
{ok, Bookie2A} = leveled_bookie:book_start(StartOpts2),
|
||||
test_singledelta_stores(Bookie2A, Bookie3, small, {B1, K1}),
|
||||
|
||||
|
||||
ok = leveled_bookie:book_close(Bookie2A),
|
||||
ok = leveled_bookie:book_close(Bookie3).
|
||||
|
||||
|
@ -507,3 +580,4 @@ dollar_bucket_index(_Config) ->
|
|||
|
||||
ok = leveled_bookie:book_close(Bookie1),
|
||||
testutil:reset_filestructure().
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue