Use leveled_codec types
... and exporting them. Previously types wer enot exported, and it appears dialyzer treated tham as any() when they were unexported types ??!!??
This commit is contained in:
parent
2063cacd8f
commit
6a20b2ce66
9 changed files with 222 additions and 96 deletions
|
@ -248,8 +248,9 @@ book_start(Opts) ->
|
|||
gen_server:start(?MODULE, [Opts], []).
|
||||
|
||||
|
||||
-spec book_tempput(pid(), any(), any(), any(), list(), atom(), integer()) ->
|
||||
ok|pause.
|
||||
-spec book_tempput(pid(), any(), any(), any(),
|
||||
leveled_codec:index_specs(),
|
||||
leveled_codec:tag(), integer()) -> ok|pause.
|
||||
|
||||
%% @doc Put an object with an expiry time
|
||||
%%
|
||||
|
@ -314,8 +315,9 @@ book_put(Pid, Bucket, Key, Object, IndexSpecs) ->
|
|||
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag) ->
|
||||
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, infinity).
|
||||
|
||||
-spec book_put(pid(), any(), any(), any(), list(), atom(), infinity|integer())
|
||||
-> ok|pause.
|
||||
-spec book_put(pid(), any(), any(), any(),
|
||||
leveled_codec:index_specs(),
|
||||
leveled_codec:tag(), infinity|integer()) -> ok|pause.
|
||||
|
||||
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL) ->
|
||||
gen_server:call(Pid,
|
||||
|
@ -349,7 +351,8 @@ book_mput(Pid, ObjectSpecs) ->
|
|||
book_mput(Pid, ObjectSpecs, TTL) ->
|
||||
gen_server:call(Pid, {mput, ObjectSpecs, TTL}, infinity).
|
||||
|
||||
-spec book_delete(pid(), any(), any(), list()) -> ok|pause.
|
||||
-spec book_delete(pid(), any(), any(), leveled_codec:index_specs())
|
||||
-> ok|pause.
|
||||
|
||||
%% @doc
|
||||
%%
|
||||
|
@ -360,8 +363,10 @@ book_delete(Pid, Bucket, Key, IndexSpecs) ->
|
|||
book_put(Pid, Bucket, Key, delete, IndexSpecs, ?STD_TAG).
|
||||
|
||||
|
||||
-spec book_get(pid(), any(), any(), atom()) -> {ok, any()}|not_found.
|
||||
-spec book_head(pid(), any(), any(), atom()) -> {ok, any()}|not_found.
|
||||
-spec book_get(pid(), any(), any(), leveled_codec:tag())
|
||||
-> {ok, any()}|not_found.
|
||||
-spec book_head(pid(), any(), any(), leveled_codec:tag())
|
||||
-> {ok, any()}|not_found.
|
||||
|
||||
%% @doc - GET and HEAD requests
|
||||
%%
|
||||
|
@ -496,7 +501,7 @@ book_destroy(Pid) ->
|
|||
gen_server:call(Pid, destroy, infinity).
|
||||
|
||||
|
||||
-spec book_isempty(pid(), atom()) -> boolean().
|
||||
-spec book_isempty(pid(), leveled_codec:tag()) -> boolean().
|
||||
%% @doc
|
||||
%% Confirm if the store is empty, or if it contains a Key and Value for a
|
||||
%% given tag
|
||||
|
@ -575,7 +580,7 @@ handle_call({put, Bucket, Key, Object, IndexSpecs, Tag, TTL}, From, State)
|
|||
{IndexSpecs, TTL}),
|
||||
{SW1, Timings1} =
|
||||
update_timings(SW0, {put, {inker, ObjSize}}, State#state.put_timings),
|
||||
Changes = preparefor_ledgercache(no_type_assigned,
|
||||
Changes = preparefor_ledgercache(null,
|
||||
LedgerKey,
|
||||
SQN,
|
||||
Object,
|
||||
|
@ -1242,8 +1247,11 @@ readycache_forsnapshot(LedgerCache, Query) ->
|
|||
max_sqn=LedgerCache#ledger_cache.max_sqn}
|
||||
end.
|
||||
|
||||
-spec scan_table(ets:tab(), tuple(), tuple()) ->
|
||||
{list(), non_neg_integer()|infinity, non_neg_integer()}.
|
||||
-spec scan_table(ets:tab(),
|
||||
leveled_codec:ledger_key(), leveled_codec:ledger_key())
|
||||
-> {list(leveled_codec:ledger_kv()),
|
||||
non_neg_integer()|infinity,
|
||||
non_neg_integer()}.
|
||||
%% @doc
|
||||
%% Query the ETS table to find a range of keys (start inclusive). Should also
|
||||
%% return the miniumum and maximum sequence number found in the query. This
|
||||
|
@ -1280,7 +1288,8 @@ scan_table(Table, StartKey, EndKey, Acc, MinSQN, MaxSQN) ->
|
|||
end.
|
||||
|
||||
|
||||
-spec fetch_head(tuple(), pid(), ledger_cache()) -> not_present|tuple().
|
||||
-spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache())
|
||||
-> not_present|leveled_codec:ledger_value().
|
||||
%% @doc
|
||||
%% Fetch only the head of the object from the Ledger (or the bookie's recent
|
||||
%% ledger cache if it has just been updated). not_present is returned if the
|
||||
|
@ -1310,9 +1319,14 @@ fetch_head(Key, Penciller, LedgerCache) ->
|
|||
end.
|
||||
|
||||
|
||||
-spec preparefor_ledgercache(atom(), any(), integer(), any(),
|
||||
integer(), tuple(), book_state()) ->
|
||||
{integer()|no_lookup, integer(), list()}.
|
||||
-spec preparefor_ledgercache(leveled_codec:journal_key_tag()|null,
|
||||
leveled_codec:ledger_key()|?DUMMY,
|
||||
integer(), any(), integer(),
|
||||
leveled_codec:key_changes(),
|
||||
book_state())
|
||||
-> {integer()|no_lookup,
|
||||
integer(),
|
||||
list(leveled_codec:ledger_kv())}.
|
||||
%% @doc
|
||||
%% Prepare an object and its related key changes for addition to the Ledger
|
||||
%% via the Ledger Cache.
|
||||
|
@ -1342,7 +1356,9 @@ preparefor_ledgercache(_InkTag,
|
|||
|
||||
|
||||
-spec addto_ledgercache({integer()|no_lookup,
|
||||
integer(), list()}, ledger_cache())
|
||||
integer(),
|
||||
list(leveled_codec:ledger_kv())},
|
||||
ledger_cache())
|
||||
-> ledger_cache().
|
||||
%% @doc
|
||||
%% Add a set of changes associated with a single sequence number (journal
|
||||
|
@ -1356,7 +1372,10 @@ addto_ledgercache({H, SQN, KeyChanges}, Cache) ->
|
|||
max_sqn=max(SQN, Cache#ledger_cache.max_sqn)}.
|
||||
|
||||
-spec addto_ledgercache({integer()|no_lookup,
|
||||
integer(), list()}, ledger_cache(), loader)
|
||||
integer(),
|
||||
list(leveled_codec:ledger_kv())},
|
||||
ledger_cache(),
|
||||
loader)
|
||||
-> ledger_cache().
|
||||
%% @doc
|
||||
%% Add a set of changes associated witha single sequence number (journal
|
||||
|
|
|
@ -103,6 +103,26 @@
|
|||
lz4|native.
|
||||
-type journal_keychanges() ::
|
||||
{list(), infinity|integer()}. % {KeyChanges, TTL}
|
||||
-type index_specs() ::
|
||||
list({add|remove, any(), any()}).
|
||||
|
||||
-type segment_list()
|
||||
:: list(integer())|false.
|
||||
|
||||
-export_type([tag/0,
|
||||
segment_hash/0,
|
||||
ledger_status/0,
|
||||
ledger_key/0,
|
||||
ledger_value/0,
|
||||
ledger_kv/0,
|
||||
compaction_strategy/0,
|
||||
journal_key_tag/0,
|
||||
journal_key/0,
|
||||
compression_method/0,
|
||||
journal_keychanges/0,
|
||||
index_specs/0,
|
||||
segment_list/0]).
|
||||
|
||||
|
||||
%%%============================================================================
|
||||
%%% Ledger Key Manipulation
|
||||
|
@ -495,7 +515,7 @@ hash(Obj) ->
|
|||
|
||||
|
||||
%%%============================================================================
|
||||
%%% Other functions
|
||||
%%% Other Ledger Functions
|
||||
%%%============================================================================
|
||||
|
||||
|
||||
|
@ -509,7 +529,7 @@ obj_objectspecs(ObjectSpecs, SQN, TTL) ->
|
|||
end,
|
||||
ObjectSpecs).
|
||||
|
||||
-spec idx_indexspecs(list(tuple()),
|
||||
-spec idx_indexspecs(index_specs(),
|
||||
any(), any(), integer(), integer()|infinity)
|
||||
-> list(ledger_kv()).
|
||||
%% @doc
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
%% The Entry should have a pid() as the third element, but a string() may be
|
||||
%% used in unit tests
|
||||
|
||||
-export_type([manifest/0, manifest_entry/0]).
|
||||
|
||||
%%%============================================================================
|
||||
%%% API
|
||||
|
@ -73,7 +74,8 @@ add_entry(Manifest, Entry, ToEnd) ->
|
|||
from_list(Man1)
|
||||
end.
|
||||
|
||||
-spec append_lastkey(manifest(), pid(), any()) -> manifest().
|
||||
-spec append_lastkey(manifest(), pid(), leveled_codec:journal_key())
|
||||
-> manifest().
|
||||
%% @doc
|
||||
%% On discovery of the last key in the last journal entry, the manifest can
|
||||
%% be updated through this function to have the last key
|
||||
|
|
|
@ -172,18 +172,14 @@ ink_start(InkerOpts) ->
|
|||
gen_server:start(?MODULE, [InkerOpts], []).
|
||||
|
||||
-spec ink_put(pid(),
|
||||
{atom(), any(), any(), any()}|string(),
|
||||
leveled_codec:ledger_key(),
|
||||
any(),
|
||||
{list(), integer()|infinity}) ->
|
||||
leveled_codec:key_changes()) ->
|
||||
{ok, integer(), integer()}.
|
||||
%% @doc
|
||||
%% PUT an object into the journal, returning the sequence number for the PUT
|
||||
%% as well as the size of the object (information required by the ledger).
|
||||
%%
|
||||
%% The primary key is expected to be a tuple of the form
|
||||
%% {Tag, Bucket, Key, null}, but unit tests support pure string Keys and so
|
||||
%% these types are also supported.
|
||||
%%
|
||||
%% KeyChanges is a tuple of {KeyChanges, TTL} where the TTL is an
|
||||
%% expiry time (or infinity).
|
||||
ink_put(Pid, PrimaryKey, Object, KeyChanges) ->
|
||||
|
@ -200,7 +196,7 @@ ink_mput(Pid, PrimaryKey, ObjectChanges) ->
|
|||
gen_server:call(Pid, {mput, PrimaryKey, ObjectChanges}, infinity).
|
||||
|
||||
-spec ink_get(pid(),
|
||||
{atom(), any(), any(), any()}|string(),
|
||||
leveled_codec:ledger_key(),
|
||||
integer()) ->
|
||||
{{integer(), any()}, {any(), any()}}.
|
||||
%% @doc
|
||||
|
@ -222,7 +218,7 @@ ink_fetch(Pid, PrimaryKey, SQN) ->
|
|||
gen_server:call(Pid, {fetch, PrimaryKey, SQN}, infinity).
|
||||
|
||||
-spec ink_keycheck(pid(),
|
||||
{atom(), any(), any(), any()}|string(),
|
||||
leveled_codec:ledger_key(),
|
||||
integer()) ->
|
||||
probably|missing.
|
||||
%% @doc
|
||||
|
@ -729,7 +725,9 @@ put_object(LedgerKey, Object, KeyChanges, State) ->
|
|||
end.
|
||||
|
||||
|
||||
-spec get_object(tuple(), integer(), leveled_imanifest:manifest()) -> any().
|
||||
-spec get_object(leveled_codec:ledger_key(),
|
||||
integer(),
|
||||
leveled_imanifest:manifest()) -> any().
|
||||
%% @doc
|
||||
%% Find the SQN in the manifest and then fetch the object from the Journal,
|
||||
%% in the manifest. If the fetch is in response to a user GET request then
|
||||
|
@ -745,8 +743,9 @@ get_object(LedgerKey, SQN, Manifest, ToIgnoreKeyChanges) ->
|
|||
leveled_codec:from_inkerkv(Obj, ToIgnoreKeyChanges).
|
||||
|
||||
|
||||
-spec key_check(tuple(), integer(), leveled_imanifest:manifest())
|
||||
-> missing|probably.
|
||||
-spec key_check(leveled_codec:ledger_key(),
|
||||
integer(),
|
||||
leveled_imanifest:manifest()) -> missing|probably.
|
||||
%% @doc
|
||||
%% Checks for the presence of the key at that SQN withing the journal,
|
||||
%% avoiding the cost of actually reading the object from disk.
|
||||
|
@ -1039,9 +1038,11 @@ initiate_penciller_snapshot(Bookie) ->
|
|||
create_value_for_journal(Obj, Comp) ->
|
||||
leveled_codec:create_value_for_journal(Obj, Comp, native).
|
||||
|
||||
key_converter(K) ->
|
||||
{o, <<"B">>, K, null}.
|
||||
|
||||
build_dummy_journal() ->
|
||||
F = fun(X) -> X end,
|
||||
build_dummy_journal(F).
|
||||
build_dummy_journal(fun key_converter/1).
|
||||
|
||||
build_dummy_journal(KeyConvertF) ->
|
||||
RootPath = "../test/journal",
|
||||
|
@ -1126,12 +1127,12 @@ simple_inker_test() ->
|
|||
cdb_options=CDBopts,
|
||||
compression_method=native,
|
||||
compress_on_receipt=true}),
|
||||
Obj1 = ink_get(Ink1, "Key1", 1),
|
||||
?assertMatch({{1, "Key1"}, {"TestValue1", ?TEST_KC}}, Obj1),
|
||||
Obj3 = ink_get(Ink1, "Key1", 3),
|
||||
?assertMatch({{3, "Key1"}, {"TestValue3", ?TEST_KC}}, Obj3),
|
||||
Obj4 = ink_get(Ink1, "Key4", 4),
|
||||
?assertMatch({{4, "Key4"}, {"TestValue4", ?TEST_KC}}, Obj4),
|
||||
Obj1 = ink_get(Ink1, key_converter("Key1"), 1),
|
||||
?assertMatch(Obj1, {{1, key_converter("Key1")}, {"TestValue1", ?TEST_KC}}),
|
||||
Obj3 = ink_get(Ink1, key_converter("Key1"), 3),
|
||||
?assertMatch(Obj3, {{3, key_converter("Key1")}, {"TestValue3", ?TEST_KC}}),
|
||||
Obj4 = ink_get(Ink1, key_converter("Key4"), 4),
|
||||
?assertMatch(Obj4, {{4, key_converter("Key4")}, {"TestValue4", ?TEST_KC}}),
|
||||
ink_close(Ink1),
|
||||
clean_testdir(RootPath).
|
||||
|
||||
|
@ -1150,10 +1151,10 @@ simple_inker_completeactivejournal_test() ->
|
|||
cdb_options=CDBopts,
|
||||
compression_method=native,
|
||||
compress_on_receipt=true}),
|
||||
Obj1 = ink_get(Ink1, "Key1", 1),
|
||||
?assertMatch({{1, "Key1"}, {"TestValue1", ?TEST_KC}}, Obj1),
|
||||
Obj2 = ink_get(Ink1, "Key4", 4),
|
||||
?assertMatch({{4, "Key4"}, {"TestValue4", ?TEST_KC}}, Obj2),
|
||||
Obj1 = ink_get(Ink1, key_converter("Key1"), 1),
|
||||
?assertMatch(Obj1, {{1, key_converter("Key1")}, {"TestValue1", ?TEST_KC}}),
|
||||
Obj2 = ink_get(Ink1, key_converter("Key4"), 4),
|
||||
?assertMatch(Obj2, {{4, key_converter("Key4")}, {"TestValue4", ?TEST_KC}}),
|
||||
ink_close(Ink1),
|
||||
clean_testdir(RootPath).
|
||||
|
||||
|
@ -1247,9 +1248,9 @@ empty_manifest_test() ->
|
|||
cdb_options=CDBopts,
|
||||
compression_method=native,
|
||||
compress_on_receipt=true}),
|
||||
?assertMatch(not_present, ink_fetch(Ink1, "Key1", 1)),
|
||||
?assertMatch(not_present, ink_fetch(Ink1, key_converter("Key1"), 1)),
|
||||
|
||||
CheckFun = fun(L, K, SQN) -> lists:member({SQN, K}, L) end,
|
||||
CheckFun = fun(L, K, SQN) -> lists:member({SQN, key_converter(K)}, L) end,
|
||||
?assertMatch(false, CheckFun([], "key", 1)),
|
||||
ok = ink_compactjournal(Ink1,
|
||||
[],
|
||||
|
@ -1269,11 +1270,12 @@ empty_manifest_test() ->
|
|||
cdb_options=CDBopts,
|
||||
compression_method=native,
|
||||
compress_on_receipt=false}),
|
||||
?assertMatch(not_present, ink_fetch(Ink2, "Key1", 1)),
|
||||
{ok, SQN, Size} = ink_put(Ink2, "Key1", "Value1", {[], infinity}),
|
||||
?assertMatch(not_present, ink_fetch(Ink2, key_converter("Key1"), 1)),
|
||||
{ok, SQN, Size} =
|
||||
ink_put(Ink2, key_converter("Key1"), "Value1", {[], infinity}),
|
||||
?assertMatch(2, SQN),
|
||||
?assertMatch(true, Size > 0),
|
||||
{ok, V} = ink_fetch(Ink2, "Key1", 2),
|
||||
{ok, V} = ink_fetch(Ink2, key_converter("Key1"), 2),
|
||||
?assertMatch("Value1", V),
|
||||
ink_close(Ink2),
|
||||
clean_testdir(RootPath).
|
||||
|
|
|
@ -347,7 +347,8 @@ pcl_fetchlevelzero(Pid, Slot) ->
|
|||
% be stuck in L0 pending
|
||||
gen_server:call(Pid, {fetch_levelzero, Slot}, 60000).
|
||||
|
||||
-spec pcl_fetch(pid(), tuple()) -> {tuple(), tuple()}|not_present.
|
||||
-spec pcl_fetch(pid(), leveled_codec:ledger_key())
|
||||
-> leveled_codec:ledger_kv()|not_present.
|
||||
%% @doc
|
||||
%% Fetch a key, return the first (highest SQN) occurrence of that Key along
|
||||
%% with the value.
|
||||
|
@ -364,8 +365,10 @@ pcl_fetch(Pid, Key) ->
|
|||
gen_server:call(Pid, {fetch, Key, Hash}, infinity)
|
||||
end.
|
||||
|
||||
-spec pcl_fetch(pid(), tuple(), {integer(), integer()}) ->
|
||||
{tuple(), tuple()}|not_present.
|
||||
-spec pcl_fetch(pid(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:segment_hash())
|
||||
-> leveled_codec:ledger_kv()|not_present.
|
||||
%% @doc
|
||||
%% Fetch a key, return the first (highest SQN) occurrence of that Key along
|
||||
%% with the value.
|
||||
|
@ -374,7 +377,10 @@ pcl_fetch(Pid, Key) ->
|
|||
pcl_fetch(Pid, Key, Hash) ->
|
||||
gen_server:call(Pid, {fetch, Key, Hash}, infinity).
|
||||
|
||||
-spec pcl_fetchkeys(pid(), tuple(), tuple(), fun(), any()) -> any().
|
||||
-spec pcl_fetchkeys(pid(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key(),
|
||||
fun(), any()) -> any().
|
||||
%% @doc
|
||||
%% Run a range query between StartKey and EndKey (inclusive). This will cover
|
||||
%% all keys in the range - so must only be run against snapshots of the
|
||||
|
@ -392,8 +398,11 @@ pcl_fetchkeys(Pid, StartKey, EndKey, AccFun, InitAcc) ->
|
|||
false, -1},
|
||||
infinity).
|
||||
|
||||
-spec pcl_fetchkeysbysegment(pid(), tuple(), tuple(), fun(), any(),
|
||||
false|list(integer())) -> any().
|
||||
-spec pcl_fetchkeysbysegment(pid(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key(),
|
||||
fun(), any(),
|
||||
leveled_codec:segment_list()) -> any().
|
||||
%% @doc
|
||||
%% Run a range query between StartKey and EndKey (inclusive). This will cover
|
||||
%% all keys in the range - so must only be run against snapshots of the
|
||||
|
@ -414,7 +423,10 @@ pcl_fetchkeysbysegment(Pid, StartKey, EndKey, AccFun, InitAcc, SegmentList) ->
|
|||
SegmentList, -1},
|
||||
infinity).
|
||||
|
||||
-spec pcl_fetchnextkey(pid(), tuple(), tuple(), fun(), any()) -> any().
|
||||
-spec pcl_fetchnextkey(pid(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key(),
|
||||
fun(), any()) -> any().
|
||||
%% @doc
|
||||
%% Run a range query between StartKey and EndKey (inclusive). This has the
|
||||
%% same constraints as pcl_fetchkeys/5, but will only return the first key
|
||||
|
@ -427,7 +439,9 @@ pcl_fetchnextkey(Pid, StartKey, EndKey, AccFun, InitAcc) ->
|
|||
false, 1},
|
||||
infinity).
|
||||
|
||||
-spec pcl_checksequencenumber(pid(), tuple(), integer()) -> boolean().
|
||||
-spec pcl_checksequencenumber(pid(),
|
||||
leveled_codec:ledger_key(),
|
||||
integer()) -> boolean().
|
||||
%% @doc
|
||||
%% Check if the sequence number of the passed key is not replaced by a change
|
||||
%% after the passed sequence number. Will return true if the Key is present
|
||||
|
@ -450,14 +464,18 @@ pcl_checksequencenumber(Pid, Key, SQN) ->
|
|||
pcl_workforclerk(Pid) ->
|
||||
gen_server:cast(Pid, work_for_clerk).
|
||||
|
||||
-spec pcl_manifestchange(pid(), tuple()) -> ok.
|
||||
-spec pcl_manifestchange(pid(), leveled_pmanifest:manifest()) -> ok.
|
||||
%% @doc
|
||||
%% Provide a manifest record (i.e. the output of the leveled_pmanifest module)
|
||||
%% that is required to beocme the new manifest.
|
||||
pcl_manifestchange(Pid, Manifest) ->
|
||||
gen_server:cast(Pid, {manifest_change, Manifest}).
|
||||
|
||||
-spec pcl_confirml0complete(pid(), string(), tuple(), tuple(), binary()) -> ok.
|
||||
-spec pcl_confirml0complete(pid(),
|
||||
string(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key(),
|
||||
binary()) -> ok.
|
||||
%% @doc
|
||||
%% Allows a SST writer that has written a L0 file to confirm that the file
|
||||
%% is now complete, so the filename and key ranges can be added to the
|
||||
|
|
|
@ -79,6 +79,8 @@
|
|||
-type manifest() :: #manifest{}.
|
||||
-type manifest_entry() :: #manifest_entry{}.
|
||||
|
||||
-export_type([manifest/0, manifest_entry/0]).
|
||||
|
||||
%%%============================================================================
|
||||
%%% API
|
||||
%%%============================================================================
|
||||
|
@ -306,7 +308,8 @@ switch_manifest_entry(Manifest, ManSQN, SrcLevel, Entry) ->
|
|||
get_manifest_sqn(Manifest) ->
|
||||
Manifest#manifest.manifest_sqn.
|
||||
|
||||
-spec key_lookup(manifest(), integer(), tuple()) -> false|manifest_entry().
|
||||
-spec key_lookup(manifest(), integer(), leveled_codec:ledger_key())
|
||||
-> false|manifest_entry().
|
||||
%% @doc
|
||||
%% For a given key find which manifest entry covers that key at that level,
|
||||
%% returning false if there is no covering manifest entry at that level.
|
||||
|
@ -320,7 +323,10 @@ key_lookup(Manifest, LevelIdx, Key) ->
|
|||
Key)
|
||||
end.
|
||||
|
||||
-spec range_lookup(manifest(), integer(), tuple(), tuple()) -> list().
|
||||
-spec range_lookup(manifest(),
|
||||
integer(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key()) -> list().
|
||||
%% @doc
|
||||
%% Return a list of manifest_entry pointers at this level which cover the
|
||||
%% key query range.
|
||||
|
@ -331,7 +337,10 @@ range_lookup(Manifest, LevelIdx, StartKey, EndKey) ->
|
|||
end,
|
||||
range_lookup_int(Manifest, LevelIdx, StartKey, EndKey, MakePointerFun).
|
||||
|
||||
-spec merge_lookup(manifest(), integer(), tuple(), tuple()) -> list().
|
||||
-spec merge_lookup(manifest(),
|
||||
integer(),
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key()) -> list().
|
||||
%% @doc
|
||||
%% Return a list of manifest_entry pointers at this level which cover the
|
||||
%% key query range, only all keys in the files should be included in the
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
%%% API
|
||||
%%%============================================================================
|
||||
|
||||
-spec prepare_for_index(index_array(), {integer(), integer()}|no_lookup)
|
||||
-spec prepare_for_index(index_array(), leveled_codec:segment_hash())
|
||||
-> index_array().
|
||||
%% @doc
|
||||
%% Add the hash of a key to the index. This is 'prepared' in the sense that
|
||||
|
|
|
@ -42,14 +42,17 @@
|
|||
|
||||
-define(CHECKJOURNAL_PROB, 0.2).
|
||||
|
||||
-type key_range() :: {StartKey:: any(), EndKey :: any()}.
|
||||
-type key_range()
|
||||
:: {leveled_codec:leveled_key(), leveled_codec:leveled_key()}.
|
||||
-type fun_and_acc()
|
||||
:: {fun(), any()}.
|
||||
|
||||
%%%============================================================================
|
||||
%%% External functions
|
||||
%%%============================================================================
|
||||
|
||||
|
||||
-spec bucket_sizestats(fun(), any(), atom()) -> {async, fun()}.
|
||||
-spec bucket_sizestats(fun(), any(), leveled_codec:tag()) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over a bucket accumulating the count of objects and their total sizes
|
||||
bucket_sizestats(SnapFun, Bucket, Tag) ->
|
||||
|
@ -69,13 +72,14 @@ bucket_sizestats(SnapFun, Bucket, Tag) ->
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec binary_bucketlist(fun(), atom(), fun(), any()) -> {async, fun()}.
|
||||
-spec binary_bucketlist(fun(), leveled_codec:tag(), fun(), any())
|
||||
-> {async, fun()}.
|
||||
%% @doc
|
||||
%% List buckets for tag, assuming bucket names are all binary type
|
||||
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc) ->
|
||||
binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc, -1).
|
||||
|
||||
-spec binary_bucketlist(fun(), atom(), fun(), any(), integer())
|
||||
-spec binary_bucketlist(fun(), leveled_codec:tag(), fun(), any(), integer())
|
||||
-> {async, fun()}.
|
||||
%% @doc
|
||||
%% set Max Buckets to -1 to list all buckets, otherwise will only return
|
||||
|
@ -94,7 +98,11 @@ binary_bucketlist(SnapFun, Tag, FoldBucketsFun, InitAcc, MaxBuckets) ->
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec index_query(fun(), tuple(), tuple()) -> {async, fun()}.
|
||||
-spec index_query(fun(),
|
||||
{leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key(),
|
||||
{boolean(), undefined|re:mp()|iodata()}},
|
||||
fun_and_acc()) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Secondary index query
|
||||
index_query(SnapFun, {StartKey, EndKey, TermHandling}, FoldAccT) ->
|
||||
|
@ -121,10 +129,13 @@ index_query(SnapFun, {StartKey, EndKey, TermHandling}, FoldAccT) ->
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec bucketkey_query(fun(), atom(), any(), key_range(), tuple()) -> {async, fun()}.
|
||||
-spec bucketkey_query(fun(), leveled_codec:tag(), any(),
|
||||
key_range(), fun_and_acc()) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over all keys in `KeyRange' under tag (restricted to a given bucket)
|
||||
bucketkey_query(SnapFun, Tag, Bucket, {StartKey, EndKey}, {FoldKeysFun, InitAcc}) ->
|
||||
bucketkey_query(SnapFun, Tag, Bucket,
|
||||
{StartKey, EndKey},
|
||||
{FoldKeysFun, InitAcc}) ->
|
||||
SK = leveled_codec:to_ledgerkey(Bucket, StartKey, Tag),
|
||||
EK = leveled_codec:to_ledgerkey(Bucket, EndKey, Tag),
|
||||
AccFun = accumulate_keys(FoldKeysFun),
|
||||
|
@ -141,13 +152,14 @@ bucketkey_query(SnapFun, Tag, Bucket, {StartKey, EndKey}, {FoldKeysFun, InitAcc}
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec bucketkey_query(fun(), atom(), any(), tuple()) -> {async, fun()}.
|
||||
-spec bucketkey_query(fun(), leveled_codec:tag(), any(), fun_and_acc())
|
||||
-> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over all keys under tag (potentially restricted to a given bucket)
|
||||
bucketkey_query(SnapFun, Tag, Bucket, FunAcc) ->
|
||||
bucketkey_query(SnapFun, Tag, Bucket, {null, null}, FunAcc).
|
||||
|
||||
-spec hashlist_query(fun(), atom(), boolean()) -> {async, fun()}.
|
||||
-spec hashlist_query(fun(), leveled_codec:tag(), boolean()) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold pver the key accumulating the hashes
|
||||
hashlist_query(SnapFun, Tag, JournalCheck) ->
|
||||
|
@ -173,7 +185,9 @@ hashlist_query(SnapFun, Tag, JournalCheck) ->
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec tictactree(fun(), {atom(), any(), tuple()}, boolean(), atom(), fun())
|
||||
-spec tictactree(fun(),
|
||||
{leveled_codec:tag(), any(), tuple()},
|
||||
boolean(), atom(), fun())
|
||||
-> {async, fun()}.
|
||||
%% @doc
|
||||
%% Return a merkle tree from the fold, directly accessing hashes cached in the
|
||||
|
@ -233,7 +247,8 @@ tictactree(SnapFun, {Tag, Bucket, Query}, JournalCheck, TreeSize, Filter) ->
|
|||
end,
|
||||
{async, Runner}.
|
||||
|
||||
-spec foldheads_allkeys(fun(), atom(), fun(), boolean(), false|list(integer()))
|
||||
-spec foldheads_allkeys(fun(), leveled_codec:tag(),
|
||||
fun(), boolean(), false|list(integer()))
|
||||
-> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over all heads in the store for a given tag - applying the passed
|
||||
|
@ -248,8 +263,8 @@ foldheads_allkeys(SnapFun, Tag, FoldFun, JournalCheck, SegmentList) ->
|
|||
{true, JournalCheck},
|
||||
SegmentList).
|
||||
|
||||
-spec foldobjects_allkeys(fun(), atom(), fun(), key_order|sqn_order)
|
||||
-> {async, fun()}.
|
||||
-spec foldobjects_allkeys(fun(), leveled_codec:tag(), fun(),
|
||||
key_order|sqn_order) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over all objects for a given tag
|
||||
foldobjects_allkeys(SnapFun, Tag, FoldFun, key_order) ->
|
||||
|
@ -345,8 +360,10 @@ foldobjects_allkeys(SnapFun, Tag, FoldObjectsFun, sqn_order) ->
|
|||
{async, Folder}.
|
||||
|
||||
|
||||
-spec foldobjects_bybucket(fun(), atom(), list({any(), any()}), fun()) ->
|
||||
{async, fun()}.
|
||||
-spec foldobjects_bybucket(fun(),
|
||||
leveled_codec:tag(),
|
||||
list(key_range()),
|
||||
fun()) -> {async, fun()}.
|
||||
%% @doc
|
||||
%% Fold over all objects within a given key range in a bucket
|
||||
foldobjects_bybucket(SnapFun, Tag, KeyRanges, FoldFun) ->
|
||||
|
|
|
@ -127,7 +127,17 @@
|
|||
size :: integer(),
|
||||
max_sqn :: integer()}).
|
||||
|
||||
-type press_methods() :: lz4|native|none.
|
||||
-type press_methods()
|
||||
:: lz4|native|none.
|
||||
-type range_endpoint()
|
||||
:: all|leveled_codec:leveled_key().
|
||||
-type slot_pointer()
|
||||
:: {pointer, pid(), integer(), range_endpoint(), range_endpoint()}.
|
||||
-type sst_pointer()
|
||||
:: {next,
|
||||
leveled_pmanifest:manifest_entry(),
|
||||
leveled_codec:ledger_key()|all}.
|
||||
|
||||
|
||||
%% yield_blockquery is used to detemrine if the work necessary to process a
|
||||
%% range query beyond the fetching the slot should be managed from within
|
||||
|
@ -177,8 +187,10 @@
|
|||
%%% API
|
||||
%%%============================================================================
|
||||
|
||||
-spec sst_open(string(), string()) ->
|
||||
{ok, pid(), {tuple(), tuple()}, binary()}.
|
||||
-spec sst_open(string(), string())
|
||||
-> {ok, pid(),
|
||||
{leveled_codec:ledger_key(), leveled_codec:ledger_key()},
|
||||
binary()}.
|
||||
%% @doc
|
||||
%% Open an SST file at a given path and filename. The first and last keys
|
||||
%% are returned in response to the request - so that those keys can be used
|
||||
|
@ -197,8 +209,11 @@ sst_open(RootPath, Filename) ->
|
|||
end.
|
||||
|
||||
-spec sst_new(string(), string(), integer(),
|
||||
list(), integer(), press_methods()) ->
|
||||
{ok, pid(), {tuple(), tuple()}, binary()}.
|
||||
list(leveled_codec:ledger_kv()),
|
||||
integer(), press_methods())
|
||||
-> {ok, pid(),
|
||||
{leveled_codec:ledger_key(), leveled_codec:ledger_key()},
|
||||
binary()}.
|
||||
%% @doc
|
||||
%% Start a new SST file at the assigned level passing in a list of Key, Value
|
||||
%% pairs. This should not be used for basement levels or unexpanded Key/Value
|
||||
|
@ -220,9 +235,17 @@ sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod) ->
|
|||
{ok, Pid, {SK, EK}, Bloom}
|
||||
end.
|
||||
|
||||
-spec sst_new(string(), string(), list(), list(),
|
||||
boolean(), integer(), integer(), press_methods()) ->
|
||||
empty|{ok, pid(), {{list(), list()}, tuple(), tuple()}, binary()}.
|
||||
-spec sst_new(string(), string(),
|
||||
list(leveled_codec:ledger_kv()|sst_pointer()),
|
||||
list(leveled_codec:ledger_kv()|sst_pointer()),
|
||||
boolean(), integer(),
|
||||
integer(), press_methods())
|
||||
-> empty|{ok, pid(),
|
||||
{{list(leveled_codec:ledger_kv()),
|
||||
list(leveled_codec:ledger_kv())},
|
||||
leveled_codec:ledger_key(),
|
||||
leveled_codec:ledger_key()},
|
||||
binary()}.
|
||||
%% @doc
|
||||
%% Start a new SST file at the assigned level passing in a two lists of
|
||||
%% {Key, Value} pairs to be merged. The merge_lists function will use the
|
||||
|
@ -231,7 +254,7 @@ sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod) ->
|
|||
%%
|
||||
%% The remainder of the lists is returned along with the StartKey and EndKey
|
||||
%% so that the remainder cna be used in the next file in the merge. It might
|
||||
%% be that the merge_lists returns nothin (for example when a basement file is
|
||||
%% be that the merge_lists returns nothing (for example when a basement file is
|
||||
%% all tombstones) - and the atome empty is returned in this case so that the
|
||||
%% file is not added to the manifest.
|
||||
sst_new(RootPath, Filename,
|
||||
|
@ -283,7 +306,8 @@ sst_newlevelzero(RootPath, Filename,
|
|||
PressMethod0}),
|
||||
{ok, Pid, noreply}.
|
||||
|
||||
-spec sst_get(pid(), tuple()) -> tuple()|not_present.
|
||||
-spec sst_get(pid(), leveled_codec:ledger_key())
|
||||
-> leveled_codec:ledger_kv()|not_present.
|
||||
%% @doc
|
||||
%% Return a Key, Value pair matching a Key or not_present if the Key is not in
|
||||
%% the store. The segment_hash function is used to accelerate the seeking of
|
||||
|
@ -291,7 +315,8 @@ sst_newlevelzero(RootPath, Filename,
|
|||
sst_get(Pid, LedgerKey) ->
|
||||
sst_get(Pid, LedgerKey, leveled_codec:segment_hash(LedgerKey)).
|
||||
|
||||
-spec sst_get(pid(), tuple(), {integer(), integer()}) -> tuple()|not_present.
|
||||
-spec sst_get(pid(), leveled_codec:ledger_key(), leveled_codec:segment_hash())
|
||||
-> leveled_codec:ledger_kv()|not_present.
|
||||
%% @doc
|
||||
%% Return a Key, Value pair matching a Key or not_present if the Key is not in
|
||||
%% the store (with the magic hash precalculated).
|
||||
|
@ -299,7 +324,11 @@ sst_get(Pid, LedgerKey, Hash) ->
|
|||
gen_fsm:sync_send_event(Pid, {get_kv, LedgerKey, Hash}, infinity).
|
||||
|
||||
|
||||
-spec sst_getkvrange(pid(), tuple()|all, tuple()|all, integer()) -> list().
|
||||
-spec sst_getkvrange(pid(),
|
||||
range_endpoint(),
|
||||
range_endpoint(),
|
||||
integer())
|
||||
-> list(leveled_codec:ledger_kv()|slot_pointer()).
|
||||
%% @doc
|
||||
%% Get a range of {Key, Value} pairs as a list between StartKey and EndKey
|
||||
%% (inclusive). The ScanWidth is the maximum size of the range, a pointer
|
||||
|
@ -309,8 +338,12 @@ sst_getkvrange(Pid, StartKey, EndKey, ScanWidth) ->
|
|||
sst_getfilteredrange(Pid, StartKey, EndKey, ScanWidth, false).
|
||||
|
||||
|
||||
-spec sst_getfilteredrange(pid(), tuple()|all, tuple()|all, integer(),
|
||||
list()|false) -> list().
|
||||
-spec sst_getfilteredrange(pid(),
|
||||
range_endpoint(),
|
||||
range_endpoint(),
|
||||
integer(),
|
||||
leveled_codec:segment_list())
|
||||
-> list(leveled_codec:ledger_kv()|slot_pointer()).
|
||||
%% @doc
|
||||
%% Get a range of {Key, Value} pairs as a list between StartKey and EndKey
|
||||
%% (inclusive). The ScanWidth is the maximum size of the range, a pointer
|
||||
|
@ -340,7 +373,8 @@ sst_getfilteredrange(Pid, StartKey, EndKey, ScanWidth, SegList) ->
|
|||
Reply
|
||||
end.
|
||||
|
||||
-spec sst_getslots(pid(), list()) -> list().
|
||||
-spec sst_getslots(pid(), list(slot_pointer()))
|
||||
-> list(leveled_codec:ledger_kv()).
|
||||
%% @doc
|
||||
%% Get a list of slots by their ID. The slot will be converted from the binary
|
||||
%% to term form outside of the FSM loop, this is to stop the copying of the
|
||||
|
@ -348,7 +382,10 @@ sst_getfilteredrange(Pid, StartKey, EndKey, ScanWidth, SegList) ->
|
|||
sst_getslots(Pid, SlotList) ->
|
||||
sst_getfilteredslots(Pid, SlotList, false).
|
||||
|
||||
-spec sst_getfilteredslots(pid(), list(), false|list()) -> list().
|
||||
-spec sst_getfilteredslots(pid(),
|
||||
list(slot_pointer()),
|
||||
leveled_codec:segment_list())
|
||||
-> list(leveled_codec:ledger_kv()).
|
||||
%% @doc
|
||||
%% Get a list of slots by their ID. The slot will be converted from the binary
|
||||
%% to term form outside of the FSM loop
|
||||
|
@ -392,7 +429,9 @@ sst_clear(Pid) ->
|
|||
sst_deleteconfirmed(Pid) ->
|
||||
gen_fsm:send_event(Pid, close).
|
||||
|
||||
-spec sst_checkready(pid()) -> {ok, string(), tuple(), tuple()}.
|
||||
-spec sst_checkready(pid()) -> {ok, string(),
|
||||
leveled_codec:leveled_key(),
|
||||
leveled_codec:leveled_key()}.
|
||||
%% @doc
|
||||
%% If a file has been set to be built, check that it has been built. Returns
|
||||
%% the filename and the {startKey, EndKey} for the manifest.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue