Correct is_active
Firts part of adding support for scanning for Keys and Hashes. as part of this discovered TTL support did the opposite (only fetched things in the past!).
This commit is contained in:
parent
2607792d1f
commit
bd6c44e9b0
2 changed files with 107 additions and 1 deletions
|
@ -397,6 +397,14 @@ handle_call({return_folder, FolderType}, _From, State) ->
|
|||
allkey_query(State#state.penciller,
|
||||
State#state.ledger_cache,
|
||||
Tag),
|
||||
State};
|
||||
{hashtree_query, Tag, JournalCheck} ->
|
||||
{reply,
|
||||
hashtree_query(State#state.penciller,
|
||||
State#state.ledger_cache,
|
||||
State#state.inker,
|
||||
Tag,
|
||||
JournalCheck),
|
||||
State}
|
||||
end;
|
||||
handle_call({compact_journal, Timeout}, _From, State) ->
|
||||
|
@ -484,6 +492,34 @@ index_query(Penciller, LedgerCache,
|
|||
end,
|
||||
{async, Folder}.
|
||||
|
||||
|
||||
hashtree_query(Penciller, LedgerCache, _Inker,
|
||||
Tag, JournalCheck) ->
|
||||
PCLopts = #penciller_options{start_snapshot=true,
|
||||
source_penciller=Penciller},
|
||||
{ok, LedgerSnapshot} = leveled_penciller:pcl_start(PCLopts),
|
||||
JournalSnapshot = case JournalCheck of
|
||||
false ->
|
||||
null
|
||||
end,
|
||||
Folder = fun() ->
|
||||
io:format("Length of increment in snapshot is ~w~n",
|
||||
[gb_trees:size(LedgerCache)]),
|
||||
ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot,
|
||||
LedgerCache),
|
||||
StartKey = leveled_codec:to_ledgerkey(null, null, Tag),
|
||||
EndKey = leveled_codec:to_ledgerkey(null, null, Tag),
|
||||
AccFun = accumulate_hashes(),
|
||||
Acc = leveled_penciller:pcl_fetchkeys(LedgerSnapshot,
|
||||
StartKey,
|
||||
EndKey,
|
||||
AccFun,
|
||||
[]),
|
||||
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
||||
Acc
|
||||
end,
|
||||
{async, Folder}.
|
||||
|
||||
allkey_query(Penciller, LedgerCache, Tag) ->
|
||||
PCLopts = #penciller_options{start_snapshot=true,
|
||||
source_penciller=Penciller},
|
||||
|
@ -589,6 +625,18 @@ accumulate_size() ->
|
|||
end,
|
||||
AccFun.
|
||||
|
||||
accumulate_hashes() ->
|
||||
Now = leveled_codec:integer_now(),
|
||||
AccFun = fun(Key, Value, KHList) ->
|
||||
case leveled_codec:is_active(Key, Value, Now) of
|
||||
true ->
|
||||
[leveled_codec:get_keyandhash(Key, Value)|KHList];
|
||||
false ->
|
||||
KHList
|
||||
end
|
||||
end,
|
||||
AccFun.
|
||||
|
||||
accumulate_keys() ->
|
||||
Now = leveled_codec:integer_now(),
|
||||
AccFun = fun(Key, Value, KeyList) ->
|
||||
|
@ -923,4 +971,49 @@ ttl_test() ->
|
|||
ok = book_close(Bookie2),
|
||||
reset_filestructure().
|
||||
|
||||
hashtree_query_test() ->
|
||||
RootPath = reset_filestructure(),
|
||||
{ok, Bookie1} = book_start(#bookie_options{root_path=RootPath,
|
||||
max_journalsize=1000000,
|
||||
cache_size=500}),
|
||||
ObjL1 = generate_multiple_objects(1200, 1),
|
||||
% Put in all the objects with a TTL in the future
|
||||
Future = leveled_codec:integer_now() + 300,
|
||||
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
|
||||
"Bucket", K, V, S,
|
||||
?STD_TAG,
|
||||
Future) end,
|
||||
ObjL1),
|
||||
ObjL2 = generate_multiple_objects(20, 1201),
|
||||
% Put in a few objects with a TTL in the past
|
||||
Past = leveled_codec:integer_now() - 300,
|
||||
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
|
||||
"Bucket", K, V, S,
|
||||
?STD_TAG,
|
||||
Past) end,
|
||||
ObjL2),
|
||||
% Scan the store for the Bucket, Keys and Hashes
|
||||
{async, HTFolder} = book_returnfolder(Bookie1,
|
||||
{hashtree_query,
|
||||
?STD_TAG,
|
||||
false}),
|
||||
KeyHashList = HTFolder(),
|
||||
lists:foreach(fun({B, _K, H}) ->
|
||||
?assertMatch("Bucket", B),
|
||||
?assertMatch(true, is_integer(H))
|
||||
end,
|
||||
KeyHashList),
|
||||
?assertMatch(1200, length(KeyHashList)),
|
||||
ok = book_close(Bookie1),
|
||||
{ok, Bookie2} = book_start(#bookie_options{root_path=RootPath,
|
||||
max_journalsize=200000,
|
||||
cache_size=500}),
|
||||
{async, HTFolder2} = book_returnfolder(Bookie2,
|
||||
{hashtree_query,
|
||||
?STD_TAG,
|
||||
false}),
|
||||
?assertMatch(KeyHashList, HTFolder2()),
|
||||
ok = book_close(Bookie2),
|
||||
reset_filestructure().
|
||||
|
||||
-endif.
|
|
@ -58,6 +58,7 @@
|
|||
generate_ledgerkv/4,
|
||||
generate_ledgerkv/5,
|
||||
get_size/2,
|
||||
get_keyandhash/2,
|
||||
convert_indexspecs/5,
|
||||
riakto_keydetails/1,
|
||||
generate_uuid/0,
|
||||
|
@ -124,7 +125,7 @@ is_active(Key, Value, Now) ->
|
|||
true;
|
||||
tomb ->
|
||||
false;
|
||||
{active, TS} when Now >= TS ->
|
||||
{active, TS} when TS >= Now ->
|
||||
true;
|
||||
{active, _TS} ->
|
||||
false
|
||||
|
@ -307,6 +308,18 @@ get_size(PK, Value) ->
|
|||
Size
|
||||
end.
|
||||
|
||||
get_keyandhash(LK, Value) ->
|
||||
{Tag, Bucket, Key, _} = LK,
|
||||
{_, _, MD} = Value,
|
||||
case Tag of
|
||||
?RIAK_TAG ->
|
||||
{_RMD, _VC, Hash, _Size} = MD,
|
||||
{Bucket, Key, Hash};
|
||||
?STD_TAG ->
|
||||
{Hash, _Size} = MD,
|
||||
{Bucket, Key, Hash}
|
||||
end.
|
||||
|
||||
|
||||
build_metadata_object(PrimaryKey, MD) ->
|
||||
{Tag, Bucket, Key, null} = PrimaryKey,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue