Switch to binary index in pmem

Remove the ets index in pmem and use a binary index instead.  This may
be slower, but avoids the bulk upload to ets, and means that matches
know of position (so only skiplists with a match need be tried).

Also stops the discrepancy between snapshots and non-snapshots - as
previously the snapshots were always slowed by not having access to the
ETS table.
This commit is contained in:
martinsumner 2017-01-05 21:58:33 +00:00
parent 1d3fb18df7
commit 5a88565c08
5 changed files with 183 additions and 100 deletions

View file

@ -154,6 +154,7 @@
-record(ledger_cache, {mem :: ets:tab(), -record(ledger_cache, {mem :: ets:tab(),
loader = leveled_skiplist:empty(false) :: tuple(), loader = leveled_skiplist:empty(false) :: tuple(),
index = leveled_pmem:new_index(), % array
min_sqn = infinity :: integer()|infinity, min_sqn = infinity :: integer()|infinity,
max_sqn = 0 :: integer()}). max_sqn = 0 :: integer()}).
@ -458,6 +459,7 @@ code_change(_OldVsn, State, _Extra) ->
load_snapshot(LedgerSnapshot, LedgerCache) -> load_snapshot(LedgerSnapshot, LedgerCache) ->
CacheToLoad = {LedgerCache#ledger_cache.loader, CacheToLoad = {LedgerCache#ledger_cache.loader,
LedgerCache#ledger_cache.index,
LedgerCache#ledger_cache.min_sqn, LedgerCache#ledger_cache.min_sqn,
LedgerCache#ledger_cache.max_sqn}, LedgerCache#ledger_cache.max_sqn},
ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot, CacheToLoad). ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot, CacheToLoad).
@ -467,8 +469,9 @@ empty_ledgercache() ->
push_ledgercache(Penciller, Cache) -> push_ledgercache(Penciller, Cache) ->
CacheToLoad = {Cache#ledger_cache.loader, CacheToLoad = {Cache#ledger_cache.loader,
Cache#ledger_cache.min_sqn, Cache#ledger_cache.index,
Cache#ledger_cache.max_sqn}, Cache#ledger_cache.min_sqn,
Cache#ledger_cache.max_sqn},
leveled_penciller:pcl_pushmem(Penciller, CacheToLoad). leveled_penciller:pcl_pushmem(Penciller, CacheToLoad).
%%%============================================================================ %%%============================================================================
@ -929,41 +932,44 @@ accumulate_index(TermRe, AddFun, FoldKeysFun) ->
preparefor_ledgercache(?INKT_KEYD, preparefor_ledgercache(?INKT_KEYD,
LedgerKey, SQN, _Obj, _Size, {IndexSpecs, TTL}) -> LedgerKey, SQN, _Obj, _Size, {IndexSpecs, TTL}) ->
{Bucket, Key} = leveled_codec:from_ledgerkey(LedgerKey), {Bucket, Key} = leveled_codec:from_ledgerkey(LedgerKey),
leveled_codec:convert_indexspecs(IndexSpecs, Bucket, Key, SQN, TTL); KeyChanges = leveled_codec:convert_indexspecs(IndexSpecs,
Bucket,
Key,
SQN,
TTL),
{no_lookup, SQN, KeyChanges};
preparefor_ledgercache(_Type, LedgerKey, SQN, Obj, Size, {IndexSpecs, TTL}) -> preparefor_ledgercache(_Type, LedgerKey, SQN, Obj, Size, {IndexSpecs, TTL}) ->
{Bucket, Key, PrimaryChange} = leveled_codec:generate_ledgerkv(LedgerKey, {Bucket, Key, ObjKeyChange, H} = leveled_codec:generate_ledgerkv(LedgerKey,
SQN, SQN,
Obj, Obj,
Size, Size,
TTL), TTL),
[PrimaryChange] ++ leveled_codec:convert_indexspecs(IndexSpecs, KeyChanges = [ObjKeyChange] ++ leveled_codec:convert_indexspecs(IndexSpecs,
Bucket, Bucket,
Key, Key,
SQN, SQN,
TTL). TTL),
{H, SQN, KeyChanges}.
addto_ledgercache(Changes, Cache) -> addto_ledgercache({H, SQN, KeyChanges}, Cache) ->
ets:insert(Cache#ledger_cache.mem, KeyChanges),
UpdIndex = leveled_pmem:prepare_for_index(Cache#ledger_cache.index, H),
Cache#ledger_cache{index = UpdIndex,
min_sqn=min(SQN, Cache#ledger_cache.min_sqn),
max_sqn=max(SQN, Cache#ledger_cache.max_sqn)}.
addto_ledgercache({H, SQN, KeyChanges}, Cache, loader) ->
FoldChangesFun = FoldChangesFun =
fun({K, V}, Cache0) -> fun({K, V}, SL0) ->
{SQN, _Hash} = leveled_codec:strip_to_seqnhashonly({K, V}), leveled_skiplist:enter_nolookup(K, V, SL0)
true = ets:insert(Cache0#ledger_cache.mem, {K, V}), end,
Cache0#ledger_cache{min_sqn=min(SQN, Cache0#ledger_cache.min_sqn), UpdSL = lists:foldl(FoldChangesFun, Cache#ledger_cache.loader, KeyChanges),
max_sqn=max(SQN, Cache0#ledger_cache.max_sqn)} UpdIndex = leveled_pmem:prepare_for_index(Cache#ledger_cache.index, H),
end, Cache#ledger_cache{index = UpdIndex,
lists:foldl(FoldChangesFun, Cache, Changes). loader = UpdSL,
min_sqn=min(SQN, Cache#ledger_cache.min_sqn),
addto_ledgercache(Changes, Cache, loader) -> max_sqn=max(SQN, Cache#ledger_cache.max_sqn)}.
FoldChangesFun =
fun({K, V}, Cache0) ->
{SQN, _Hash} = leveled_codec:strip_to_seqnhashonly({K, V}),
SL0 = Cache0#ledger_cache.loader,
SL1 = leveled_skiplist:enter_nolookup(K, V, SL0),
Cache0#ledger_cache{loader = SL1,
min_sqn=min(SQN, Cache0#ledger_cache.min_sqn),
max_sqn=max(SQN, Cache0#ledger_cache.max_sqn)}
end,
lists:foldl(FoldChangesFun, Cache, Changes).
maybepush_ledgercache(MaxCacheSize, Cache, Penciller) -> maybepush_ledgercache(MaxCacheSize, Cache, Penciller) ->
@ -973,12 +979,13 @@ maybepush_ledgercache(MaxCacheSize, Cache, Penciller) ->
if if
TimeToPush -> TimeToPush ->
CacheToLoad = {leveled_skiplist:from_orderedset(Tab), CacheToLoad = {leveled_skiplist:from_orderedset(Tab),
Cache#ledger_cache.index,
Cache#ledger_cache.min_sqn, Cache#ledger_cache.min_sqn,
Cache#ledger_cache.max_sqn}, Cache#ledger_cache.max_sqn},
case leveled_penciller:pcl_pushmem(Penciller, CacheToLoad) of case leveled_penciller:pcl_pushmem(Penciller, CacheToLoad) of
ok -> ok ->
true = ets:delete_all_objects(Tab),
Cache0 = #ledger_cache{}, Cache0 = #ledger_cache{},
true = ets:delete_all_objects(Tab),
{ok, Cache0#ledger_cache{mem=Tab}}; {ok, Cache0#ledger_cache{mem=Tab}};
returned -> returned ->
{returned, Cache} {returned, Cache}

View file

@ -314,11 +314,12 @@ generate_ledgerkv(PrimaryKey, SQN, Obj, Size, TS) ->
_ -> _ ->
{active, TS} {active, TS}
end, end,
Hash = magic_hash(PrimaryKey),
Value = {SQN, Value = {SQN,
Status, Status,
magic_hash(PrimaryKey), Hash,
extract_metadata(Obj, Size, Tag)}, extract_metadata(Obj, Size, Tag)},
{Bucket, Key, {PrimaryKey, Value}}. {Bucket, Key, {PrimaryKey, Value}, Hash}.
integer_now() -> integer_now() ->

View file

@ -220,7 +220,7 @@
levelzero_size = 0 :: integer(), levelzero_size = 0 :: integer(),
levelzero_maxcachesize :: integer(), levelzero_maxcachesize :: integer(),
levelzero_cointoss = false :: boolean(), levelzero_cointoss = false :: boolean(),
levelzero_index, % may be none or an ETS table reference levelzero_index, % An array
is_snapshot = false :: boolean(), is_snapshot = false :: boolean(),
snapshot_fully_loaded = false :: boolean(), snapshot_fully_loaded = false :: boolean(),
@ -330,7 +330,7 @@ init([PCLopts]) ->
end. end.
handle_call({push_mem, {PushedTree, MinSQN, MaxSQN}}, handle_call({push_mem, {PushedTree, PushedIdx, MinSQN, MaxSQN}},
From, From,
State=#state{is_snapshot=Snap}) when Snap == false -> State=#state{is_snapshot=Snap}) when Snap == false ->
% The push_mem process is as follows: % The push_mem process is as follows:
@ -360,11 +360,12 @@ handle_call({push_mem, {PushedTree, MinSQN, MaxSQN}},
false -> false ->
leveled_log:log("P0018", [ok, false, false]), leveled_log:log("P0018", [ok, false, false]),
gen_server:reply(From, ok), gen_server:reply(From, ok),
{noreply, update_levelzero(State#state.levelzero_size, {noreply,
{PushedTree, MinSQN, MaxSQN}, update_levelzero(State#state.levelzero_size,
State#state.ledger_sqn, {PushedTree, PushedIdx, MinSQN, MaxSQN},
State#state.levelzero_cache, State#state.ledger_sqn,
State)} State#state.levelzero_cache,
State)}
end; end;
handle_call({fetch, Key, Hash}, _From, State) -> handle_call({fetch, Key, Hash}, _From, State) ->
{R, HeadTimer} = timed_fetch_mem(Key, {R, HeadTimer} = timed_fetch_mem(Key,
@ -411,17 +412,22 @@ handle_call(work_for_clerk, From, State) ->
handle_call(get_startup_sqn, _From, State) -> handle_call(get_startup_sqn, _From, State) ->
{reply, State#state.persisted_sqn, State}; {reply, State#state.persisted_sqn, State};
handle_call({register_snapshot, Snapshot}, _From, State) -> handle_call({register_snapshot, Snapshot}, _From, State) ->
Rs = [{Snapshot, State#state.manifest_sqn}|State#state.registered_snapshots], Rs = [{Snapshot,
State#state.manifest_sqn}|State#state.registered_snapshots],
{reply, {ok, State}, State#state{registered_snapshots = Rs}}; {reply, {ok, State}, State#state{registered_snapshots = Rs}};
handle_call({load_snapshot, {BookieIncrTree, MinSQN, MaxSQN}}, _From, State) -> handle_call({load_snapshot, {BookieIncrTree, BookieIdx, MinSQN, MaxSQN}},
_From, State) ->
L0D = leveled_pmem:add_to_cache(State#state.levelzero_size, L0D = leveled_pmem:add_to_cache(State#state.levelzero_size,
{BookieIncrTree, MinSQN, MaxSQN}, {BookieIncrTree, MinSQN, MaxSQN},
State#state.ledger_sqn, State#state.ledger_sqn,
State#state.levelzero_cache), State#state.levelzero_cache),
{LedgerSQN, L0Size, L0Cache} = L0D, {LedgerSQN, L0Size, L0Cache} = L0D,
L0Index = leveled_pmem:add_to_index(BookieIdx,
State#state.levelzero_index,
length(L0Cache)),
{reply, ok, State#state{levelzero_cache=L0Cache, {reply, ok, State#state{levelzero_cache=L0Cache,
levelzero_size=L0Size, levelzero_size=L0Size,
levelzero_index=none, levelzero_index=L0Index,
ledger_sqn=LedgerSQN, ledger_sqn=LedgerSQN,
snapshot_fully_loaded=true}}; snapshot_fully_loaded=true}};
handle_call({fetch_levelzero, Slot}, _From, State) -> handle_call({fetch_levelzero, Slot}, _From, State) ->
@ -467,9 +473,10 @@ handle_cast({levelzero_complete, FN, StartKey, EndKey}, State) ->
filename=FN}, filename=FN},
UpdMan = lists:keystore(0, 1, State#state.manifest, {0, [ManEntry]}), UpdMan = lists:keystore(0, 1, State#state.manifest, {0, [ManEntry]}),
% Prompt clerk to ask about work - do this for every L0 roll % Prompt clerk to ask about work - do this for every L0 roll
leveled_pmem:clear_index(State#state.levelzero_index), UpdIndex = leveled_pmem:clear_index(State#state.levelzero_index),
ok = leveled_pclerk:clerk_prompt(State#state.clerk), ok = leveled_pclerk:clerk_prompt(State#state.clerk),
{noreply, State#state{levelzero_cache=[], {noreply, State#state{levelzero_cache=[],
levelzero_index=UpdIndex,
levelzero_pending=false, levelzero_pending=false,
levelzero_constructor=undefined, levelzero_constructor=undefined,
levelzero_size=0, levelzero_size=0,
@ -643,20 +650,23 @@ start_from_file(PCLopts) ->
update_levelzero(L0Size, {PushedTree, MinSQN, MaxSQN}, update_levelzero(L0Size, {PushedTree, PushedIdx, MinSQN, MaxSQN},
LedgerSQN, L0Cache, State) -> LedgerSQN, L0Cache, State) ->
SW = os:timestamp(), SW = os:timestamp(),
Update = leveled_pmem:add_to_cache(L0Size, Update = leveled_pmem:add_to_cache(L0Size,
{PushedTree, MinSQN, MaxSQN}, {PushedTree, MinSQN, MaxSQN},
LedgerSQN, LedgerSQN,
L0Cache), L0Cache),
leveled_pmem:add_to_index(PushedTree, State#state.levelzero_index), UpdL0Index = leveled_pmem:add_to_index(PushedIdx,
State#state.levelzero_index,
length(L0Cache) + 1),
{UpdMaxSQN, NewL0Size, UpdL0Cache} = Update, {UpdMaxSQN, NewL0Size, UpdL0Cache} = Update,
if if
UpdMaxSQN >= LedgerSQN -> UpdMaxSQN >= LedgerSQN ->
UpdState = State#state{levelzero_cache=UpdL0Cache, UpdState = State#state{levelzero_cache=UpdL0Cache,
levelzero_size=NewL0Size, levelzero_size=NewL0Size,
levelzero_index=UpdL0Index,
ledger_sqn=UpdMaxSQN}, ledger_sqn=UpdMaxSQN},
CacheTooBig = NewL0Size > State#state.levelzero_maxcachesize, CacheTooBig = NewL0Size > State#state.levelzero_maxcachesize,
CacheMuchTooBig = NewL0Size > ?SUPER_MAX_TABLE_SIZE, CacheMuchTooBig = NewL0Size > ?SUPER_MAX_TABLE_SIZE,
@ -741,20 +751,14 @@ plain_fetch_mem(Key, Hash, Manifest, L0Cache, L0Index) ->
R = fetch_mem(Key, Hash, Manifest, L0Cache, L0Index), R = fetch_mem(Key, Hash, Manifest, L0Cache, L0Index),
element(1, R). element(1, R).
fetch_mem(Key, Hash, Manifest, L0Cache, none) -> fetch_mem(Key, Hash, Manifest, L0Cache, L0Index) ->
L0Check = leveled_pmem:check_levelzero(Key, Hash, L0Cache), PosList = leveled_pmem:check_index(Hash, L0Index),
L0Check = leveled_pmem:check_levelzero(Key, Hash, PosList, L0Cache),
case L0Check of case L0Check of
{false, not_found} -> {false, not_found} ->
fetch(Key, Hash, Manifest, 0, fun timed_sst_get/3); fetch(Key, Hash, Manifest, 0, fun timed_sst_get/3);
{true, KV} -> {true, KV} ->
{KV, 0} {KV, 0}
end;
fetch_mem(Key, Hash, Manifest, L0Cache, L0Index) ->
case leveled_pmem:check_index(Hash, L0Index) of
true ->
fetch_mem(Key, Hash, Manifest, L0Cache, none);
false ->
fetch(Key, Hash, Manifest, 0, fun timed_sst_get/3)
end. end.
fetch(_Key, _Hash, _Manifest, ?MAX_LEVELS + 1, _FetchFun) -> fetch(_Key, _Hash, _Manifest, ?MAX_LEVELS + 1, _FetchFun) ->
@ -1374,12 +1378,15 @@ confirm_delete_test() ->
maybe_pause_push(PCL, KL) -> maybe_pause_push(PCL, KL) ->
T0 = leveled_skiplist:empty(true), T0 = leveled_skiplist:empty(true),
T1 = lists:foldl(fun({K, V}, {AccSL, MinSQN, MaxSQN}) -> I0 = leveled_pmem:new_index(),
SL = leveled_skiplist:enter(K, V, AccSL), T1 = lists:foldl(fun({K, V}, {AccSL, AccIdx, MinSQN, MaxSQN}) ->
UpdSL = leveled_skiplist:enter(K, V, AccSL),
SQN = leveled_codec:strip_to_seqonly({K, V}), SQN = leveled_codec:strip_to_seqonly({K, V}),
{SL, min(SQN, MinSQN), max(SQN, MaxSQN)} H = leveled_codec:magic_hash(K),
UpdIdx = leveled_pmem:prepare_for_index(AccIdx, H),
{UpdSL, UpdIdx, min(SQN, MinSQN), max(SQN, MaxSQN)}
end, end,
{T0, infinity, 0}, {T0, I0, infinity, 0},
KL), KL),
case pcl_pushmem(PCL, T1) of case pcl_pushmem(PCL, T1) of
returned -> returned ->

View file

@ -42,11 +42,13 @@
-include("include/leveled.hrl"). -include("include/leveled.hrl").
-export([ -export([
prepare_for_index/2,
add_to_cache/4, add_to_cache/4,
to_list/2, to_list/2,
check_levelzero/3, check_levelzero/3,
check_levelzero/4,
merge_trees/4, merge_trees/4,
add_to_index/2, add_to_index/3,
new_index/0, new_index/0,
clear_index/1, clear_index/1,
check_index/2 check_index/2
@ -59,6 +61,12 @@
%%% API %%% API
%%%============================================================================ %%%============================================================================
prepare_for_index(IndexArray, Hash) ->
{Slot, H0} = split_hash(Hash),
Bin = array:get(Slot, IndexArray),
array:set(Slot, <<Bin/binary, 1:1/integer, H0:23/integer>>, IndexArray).
add_to_cache(L0Size, {LevelMinus1, MinSQN, MaxSQN}, LedgerSQN, TreeList) -> add_to_cache(L0Size, {LevelMinus1, MinSQN, MaxSQN}, LedgerSQN, TreeList) ->
LM1Size = leveled_skiplist:size(LevelMinus1), LM1Size = leveled_skiplist:size(LevelMinus1),
case LM1Size of case LM1Size of
@ -73,32 +81,29 @@ add_to_cache(L0Size, {LevelMinus1, MinSQN, MaxSQN}, LedgerSQN, TreeList) ->
end end
end. end.
add_to_index(LevelMinus1, L0Index) -> add_to_index(LM1Array, L0Index, CacheSlot) when CacheSlot < 128 ->
IndexAddFun = IndexAddFun =
fun({_K, V}) -> fun(Slot, Acc) ->
{_, _, Hash, _} = leveled_codec:striphead_to_details(V), Bin0 = array:get(Slot, Acc),
case Hash of BinLM1 = array:get(Slot, LM1Array),
no_lookup -> array:set(Slot,
ok; <<Bin0/binary,
_ -> 0:1/integer, CacheSlot:7/integer,
ets:insert(L0Index, {Hash}) BinLM1/binary>>,
end Acc)
end, end,
lists:foreach(IndexAddFun, leveled_skiplist:to_list(LevelMinus1)). lists:foldl(IndexAddFun, L0Index, lists:seq(0, 255)).
new_index() -> new_index() ->
ets:new(l0index, [private, set]). array:new([{size, 256}, {default, <<>>}]).
clear_index(L0Index) -> clear_index(_L0Index) ->
ets:delete_all_objects(L0Index). new_index().
check_index(Hash, L0Index) -> check_index(Hash, L0Index) ->
case ets:lookup(L0Index, Hash) of {Slot, H0} = split_hash(Hash),
[{Hash}] -> Bin = array:get(Slot, L0Index),
true; find_pos(Bin, H0, [], 0).
[] ->
false
end.
to_list(Slots, FetchFun) -> to_list(Slots, FetchFun) ->
SW = os:timestamp(), SW = os:timestamp(),
@ -114,13 +119,15 @@ to_list(Slots, FetchFun) ->
FullList. FullList.
check_levelzero(Key, TreeList) -> check_levelzero(Key, PosList, TreeList) ->
check_levelzero(Key, leveled_codec:magic_hash(Key), TreeList). check_levelzero(Key, leveled_codec:magic_hash(Key), PosList, TreeList).
check_levelzero(_Key, _Hash, []) -> check_levelzero(_Key, _Hash, _PosList, []) ->
{false, not_found}; {false, not_found};
check_levelzero(Key, Hash, TreeList) -> check_levelzero(_Key, _Hash, [], _TreeList) ->
check_slotlist(Key, Hash, lists:seq(1, length(TreeList)), TreeList). {false, not_found};
check_levelzero(Key, Hash, PosList, TreeList) ->
check_slotlist(Key, Hash, PosList, TreeList).
merge_trees(StartKey, EndKey, SkipListList, LevelMinus1) -> merge_trees(StartKey, EndKey, SkipListList, LevelMinus1) ->
@ -136,6 +143,22 @@ merge_trees(StartKey, EndKey, SkipListList, LevelMinus1) ->
%%% Internal Functions %%% Internal Functions
%%%============================================================================ %%%============================================================================
find_pos(<<>>, _Hash, PosList, _SlotID) ->
PosList;
find_pos(<<1:1/integer, Hash:23/integer, T/binary>>, Hash, PosList, SlotID) ->
find_pos(T, Hash, PosList ++ [SlotID], SlotID);
find_pos(<<1:1/integer, _Miss:23/integer, T/binary>>, Hash, PosList, SlotID) ->
find_pos(T, Hash, PosList, SlotID);
find_pos(<<0:1/integer, NxtSlot:7/integer, T/binary>>, Hash, PosList, _SlotID) ->
find_pos(T, Hash, PosList, NxtSlot).
split_hash(Hash) ->
Slot = Hash band 255,
H0 = (Hash bsr 8) band 8388607,
{Slot, H0}.
check_slotlist(Key, Hash, CheckList, TreeList) -> check_slotlist(Key, Hash, CheckList, TreeList) ->
SlotCheckFun = SlotCheckFun =
fun(SlotToCheck, {Found, KV}) -> fun(SlotToCheck, {Found, KV}) ->
@ -162,12 +185,21 @@ check_slotlist(Key, Hash, CheckList, TreeList) ->
-ifdef(TEST). -ifdef(TEST).
generate_randomkeys_aslist(Seqn, Count, BucketRangeLow, BucketRangeHigh) ->
lists:ukeysort(1,
generate_randomkeys(Seqn,
Count,
[],
BucketRangeLow,
BucketRangeHigh)).
generate_randomkeys(Seqn, Count, BucketRangeLow, BucketRangeHigh) -> generate_randomkeys(Seqn, Count, BucketRangeLow, BucketRangeHigh) ->
generate_randomkeys(Seqn, KVL = generate_randomkeys(Seqn,
Count, Count,
leveled_skiplist:empty(true), [],
BucketRangeLow, BucketRangeLow,
BucketRangeHigh). BucketRangeHigh),
leveled_skiplist:from_list(KVL).
generate_randomkeys(_Seqn, 0, Acc, _BucketLow, _BucketHigh) -> generate_randomkeys(_Seqn, 0, Acc, _BucketLow, _BucketHigh) ->
Acc; Acc;
@ -179,7 +211,7 @@ generate_randomkeys(Seqn, Count, Acc, BucketLow, BRange) ->
{Seqn, {active, infinity}, null}}, {Seqn, {active, infinity}, null}},
generate_randomkeys(Seqn + 1, generate_randomkeys(Seqn + 1,
Count - 1, Count - 1,
leveled_skiplist:enter(K, V, Acc), [{K, V}|Acc],
BucketLow, BucketLow,
BRange). BRange).
@ -230,8 +262,9 @@ compare_method_test() ->
[], [],
TestList), TestList),
PosList = lists:seq(1, length(TreeList)),
S1 = lists:foldl(fun({Key, _V}, Acc) -> S1 = lists:foldl(fun({Key, _V}, Acc) ->
R0 = check_levelzero(Key, TreeList), R0 = check_levelzero(Key, PosList, TreeList),
[R0|Acc] [R0|Acc]
end, end,
[], [],
@ -267,6 +300,41 @@ compare_method_test() ->
[timer:now_diff(os:timestamp(), SWb), Sz1]), [timer:now_diff(os:timestamp(), SWb), Sz1]),
?assertMatch(Sz0, Sz1). ?assertMatch(Sz0, Sz1).
with_index_test() ->
IndexPrepareFun =
fun({K, _V}, Acc) ->
H = leveled_codec:magic_hash(K),
prepare_for_index(Acc, H)
end,
LoadFun =
fun(_X, {{LedgerSQN, L0Size, L0TreeList}, L0Idx, SrcList}) ->
LM1 = generate_randomkeys_aslist(LedgerSQN + 1, 2000, 1, 500),
LM1Array = lists:foldl(IndexPrepareFun, new_index(), LM1),
LM1SL = leveled_skiplist:from_list(LM1),
UpdL0Index = add_to_index(LM1Array, L0Idx, length(L0TreeList) + 1),
R = add_to_cache(L0Size,
{LM1SL, LedgerSQN + 1, LedgerSQN + 2000},
LedgerSQN,
L0TreeList),
{R, UpdL0Index, lists:ukeymerge(1, LM1, SrcList)}
end,
R0 = lists:foldl(LoadFun, {{0, 0, []}, new_index(), []}, lists:seq(1, 16)),
{{SQN, Size, TreeList}, L0Index, SrcKVL} = R0,
?assertMatch(32000, SQN),
?assertMatch(true, Size =< 32000),
CheckFun =
fun({K, V}, {L0Idx, L0Cache}) ->
H = leveled_codec:magic_hash(K),
PosList = check_index(H, L0Idx),
?assertMatch({true, {K, V}},
check_slotlist(K, H, PosList, L0Cache)),
{L0Idx, L0Cache}
end,
_R1 = lists:foldl(CheckFun, {L0Index, TreeList}, SrcKVL).
-endif. -endif.

View file

@ -1230,11 +1230,11 @@ generate_randomkeys(Seqn, Count, Acc, BucketLow, BRange) ->
LedgerKey = leveled_codec:to_ledgerkey("Bucket" ++ BNumber, LedgerKey = leveled_codec:to_ledgerkey("Bucket" ++ BNumber,
"Key" ++ KNumber, "Key" ++ KNumber,
o), o),
{_B, _K, KV} = leveled_codec:generate_ledgerkv(LedgerKey, {_B, _K, KV, _H} = leveled_codec:generate_ledgerkv(LedgerKey,
Seqn, Seqn,
crypto:rand_bytes(64), crypto:rand_bytes(64),
64, 64,
infinity), infinity),
generate_randomkeys(Seqn + 1, generate_randomkeys(Seqn + 1,
Count - 1, Count - 1,
[KV|Acc], [KV|Acc],