IsEmpty check
Previously there was no is_empty check, and there was a workaround using binary_bucketlist. But what if there were many buckets - this is a slow seek (using get next key over and over). Instead have a proper is_empty check.
This commit is contained in:
parent
ef22aabe85
commit
cda412508a
2 changed files with 55 additions and 12 deletions
|
@ -64,7 +64,8 @@
|
||||||
book_islastcompactionpending/1,
|
book_islastcompactionpending/1,
|
||||||
book_trimjournal/1,
|
book_trimjournal/1,
|
||||||
book_close/1,
|
book_close/1,
|
||||||
book_destroy/1]).
|
book_destroy/1,
|
||||||
|
book_isempty/2]).
|
||||||
|
|
||||||
-export([get_opt/2,
|
-export([get_opt/2,
|
||||||
get_opt/3,
|
get_opt/3,
|
||||||
|
@ -484,15 +485,24 @@ book_trimjournal(Pid) ->
|
||||||
%%
|
%%
|
||||||
%% A clean shutdown will persist all the information in the Penciller memory
|
%% A clean shutdown will persist all the information in the Penciller memory
|
||||||
%% before closing, so shutdown is not instantaneous.
|
%% before closing, so shutdown is not instantaneous.
|
||||||
|
|
||||||
book_close(Pid) ->
|
book_close(Pid) ->
|
||||||
gen_server:call(Pid, close, infinity).
|
gen_server:call(Pid, close, infinity).
|
||||||
|
|
||||||
%% @doc Close and clean-out files
|
%% @doc Close and clean-out files
|
||||||
|
|
||||||
book_destroy(Pid) ->
|
book_destroy(Pid) ->
|
||||||
gen_server:call(Pid, destroy, infinity).
|
gen_server:call(Pid, destroy, infinity).
|
||||||
|
|
||||||
|
|
||||||
|
-spec book_isempty(pid(), atom()) -> boolean().
|
||||||
|
%% @doc
|
||||||
|
%% Confirm if the store is empty, or if it contains a Key and Value for a
|
||||||
|
%% given tag
|
||||||
|
book_isempty(Pid, Tag) ->
|
||||||
|
FoldAccT = {fun(_B, _Acc) -> false end, true},
|
||||||
|
{async, Runner} =
|
||||||
|
gen_server:call(Pid, {return_runner, {first_bucket, Tag, FoldAccT}}),
|
||||||
|
Runner().
|
||||||
|
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
%%% gen_server callbacks
|
%%% gen_server callbacks
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
|
@ -992,7 +1002,14 @@ get_runner(State,
|
||||||
leveled_runner:foldobjects_byindex(SnapFun,
|
leveled_runner:foldobjects_byindex(SnapFun,
|
||||||
{Tag, Bucket, Field, FromTerm, ToTerm},
|
{Tag, Bucket, Field, FromTerm, ToTerm},
|
||||||
FoldObjectsFun);
|
FoldObjectsFun);
|
||||||
|
get_runner(State, {binary_bucketlist, Tag, FoldAccT}) ->
|
||||||
|
{FoldBucketsFun, Acc} = FoldAccT,
|
||||||
|
SnapFun = return_snapfun(State, ledger, no_lookup, false, false),
|
||||||
|
leveled_runner:binary_bucketlist(SnapFun, Tag, FoldBucketsFun, Acc);
|
||||||
|
get_runner(State, {first_bucket, Tag, FoldAccT}) ->
|
||||||
|
{FoldBucketsFun, Acc} = FoldAccT,
|
||||||
|
SnapFun = return_snapfun(State, ledger, no_lookup, false, false),
|
||||||
|
leveled_runner:binary_bucketlist(SnapFun, Tag, FoldBucketsFun, Acc, 1);
|
||||||
%% Set of specific runners, primarily used as exmaples for tests
|
%% Set of specific runners, primarily used as exmaples for tests
|
||||||
get_runner(State, DeprecatedQuery) ->
|
get_runner(State, DeprecatedQuery) ->
|
||||||
get_deprecatedrunner(State, DeprecatedQuery).
|
get_deprecatedrunner(State, DeprecatedQuery).
|
||||||
|
@ -1009,10 +1026,6 @@ get_deprecatedrunner(State, {bucket_stats, Bucket}) ->
|
||||||
get_deprecatedrunner(State, {riakbucket_stats, Bucket}) ->
|
get_deprecatedrunner(State, {riakbucket_stats, Bucket}) ->
|
||||||
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
|
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
|
||||||
leveled_runner:bucket_sizestats(SnapFun, Bucket, ?RIAK_TAG);
|
leveled_runner:bucket_sizestats(SnapFun, Bucket, ?RIAK_TAG);
|
||||||
get_deprecatedrunner(State, {binary_bucketlist, Tag, FoldAccT}) ->
|
|
||||||
{FoldKeysFun, Acc} = FoldAccT,
|
|
||||||
SnapFun = return_snapfun(State, ledger, no_lookup, false, false),
|
|
||||||
leveled_runner:binary_bucketlist(SnapFun, Tag, FoldKeysFun, Acc);
|
|
||||||
get_deprecatedrunner(State, {hashlist_query, Tag, JournalCheck}) ->
|
get_deprecatedrunner(State, {hashlist_query, Tag, JournalCheck}) ->
|
||||||
SnapType = snaptype_by_presence(JournalCheck),
|
SnapType = snaptype_by_presence(JournalCheck),
|
||||||
SnapFun = return_snapfun(State, SnapType, no_lookup, true, true),
|
SnapFun = return_snapfun(State, SnapType, no_lookup, true, true),
|
||||||
|
@ -1915,6 +1928,21 @@ foldobjects_vs_foldheads_bybucket_testto() ->
|
||||||
ok = book_close(Bookie1),
|
ok = book_close(Bookie1),
|
||||||
reset_filestructure().
|
reset_filestructure().
|
||||||
|
|
||||||
|
is_empty_test() ->
|
||||||
|
RootPath = reset_filestructure(),
|
||||||
|
{ok, Bookie1} = book_start([{root_path, RootPath},
|
||||||
|
{max_journalsize, 1000000},
|
||||||
|
{cache_size, 500}]),
|
||||||
|
% Put in an object with a TTL in the future
|
||||||
|
Future = leveled_codec:integer_now() + 300,
|
||||||
|
?assertMatch(true, leveled_bookie:book_isempty(Bookie1, ?STD_TAG)),
|
||||||
|
ok = book_tempput(Bookie1,
|
||||||
|
<<"B">>, <<"K">>, {value, <<"V">>}, [],
|
||||||
|
?STD_TAG, Future),
|
||||||
|
?assertMatch(false, leveled_bookie:book_isempty(Bookie1, ?STD_TAG)),
|
||||||
|
?assertMatch(true, leveled_bookie:book_isempty(Bookie1, ?RIAK_TAG)),
|
||||||
|
|
||||||
|
ok = leveled_bookie:book_close(Bookie1).
|
||||||
|
|
||||||
scan_table_test() ->
|
scan_table_test() ->
|
||||||
K1 = leveled_codec:to_ledgerkey(<<"B1">>,
|
K1 = leveled_codec:to_ledgerkey(<<"B1">>,
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
-export([
|
-export([
|
||||||
bucket_sizestats/3,
|
bucket_sizestats/3,
|
||||||
binary_bucketlist/4,
|
binary_bucketlist/4,
|
||||||
|
binary_bucketlist/5,
|
||||||
index_query/3,
|
index_query/3,
|
||||||
bucketkey_query/4,
|
bucketkey_query/4,
|
||||||
hashlist_query/3,
|
hashlist_query/3,
|
||||||
|
@ -69,10 +70,20 @@ bucket_sizestats(SnapFun, Bucket, Tag) ->
|
||||||
%% @doc
|
%% @doc
|
||||||
%% List buckets for tag, assuming bucket names are all binary type
|
%% List buckets for tag, assuming bucket names are all binary type
|
||||||
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc) ->
|
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc) ->
|
||||||
|
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc, -1).
|
||||||
|
|
||||||
|
-spec binary_bucketlist(fun(), atom(), fun(), any(), integer())
|
||||||
|
-> {async, fun()}.
|
||||||
|
%% @doc
|
||||||
|
%% set Max Buckets to -1 to list all buckets, otherwise will only return
|
||||||
|
%% MaxBuckets (use 1 to confirm that there exists any bucket for a given Tag)
|
||||||
|
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc, MaxBuckets) ->
|
||||||
Runner =
|
Runner =
|
||||||
fun() ->
|
fun() ->
|
||||||
{ok, LedgerSnapshot, _JournalSnapshot} = SnapFun(),
|
{ok, LedgerSnapshot, _JournalSnapshot} = SnapFun(),
|
||||||
BucketAcc = get_nextbucket(null, null, Tag, LedgerSnapshot, []),
|
BucketAcc =
|
||||||
|
get_nextbucket(null, null,
|
||||||
|
Tag, LedgerSnapshot, [], {0, MaxBuckets}),
|
||||||
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
||||||
lists:foldl(fun({B, _K}, Acc) -> FoldBucketsFun(B, Acc) end,
|
lists:foldl(fun({B, _K}, Acc) -> FoldBucketsFun(B, Acc) end,
|
||||||
InitAcc,
|
InitAcc,
|
||||||
|
@ -380,7 +391,9 @@ foldobjects_byindex(SnapFun, {Tag, Bucket, Field, FromTerm, ToTerm}, FoldFun) ->
|
||||||
%%% Internal functions
|
%%% Internal functions
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
|
|
||||||
get_nextbucket(NextBucket, NextKey, Tag, LedgerSnapshot, BKList) ->
|
get_nextbucket(_NextB, _NextK, _Tag, _LS, BKList, {Limit, Limit}) ->
|
||||||
|
BKList;
|
||||||
|
get_nextbucket(NextBucket, NextKey, Tag, LedgerSnapshot, BKList, {C, L}) ->
|
||||||
Now = leveled_codec:integer_now(),
|
Now = leveled_codec:integer_now(),
|
||||||
StartKey = leveled_codec:to_ledgerkey(NextBucket, NextKey, Tag),
|
StartKey = leveled_codec:to_ledgerkey(NextBucket, NextKey, Tag),
|
||||||
EndKey = leveled_codec:to_ledgerkey(null, null, Tag),
|
EndKey = leveled_codec:to_ledgerkey(null, null, Tag),
|
||||||
|
@ -405,13 +418,15 @@ get_nextbucket(NextBucket, NextKey, Tag, LedgerSnapshot, BKList) ->
|
||||||
null,
|
null,
|
||||||
Tag,
|
Tag,
|
||||||
LedgerSnapshot,
|
LedgerSnapshot,
|
||||||
[{B, K}|BKList]);
|
[{B, K}|BKList],
|
||||||
|
{C + 1, L});
|
||||||
false ->
|
false ->
|
||||||
get_nextbucket(B,
|
get_nextbucket(B,
|
||||||
<<K/binary, 0>>,
|
<<K/binary, 0>>,
|
||||||
Tag,
|
Tag,
|
||||||
LedgerSnapshot,
|
LedgerSnapshot,
|
||||||
BKList)
|
BKList,
|
||||||
|
{C, L})
|
||||||
end;
|
end;
|
||||||
{NB, _V} ->
|
{NB, _V} ->
|
||||||
leveled_log:log("B0010",[NB]),
|
leveled_log:log("B0010",[NB]),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue