2016-08-02 13:44:48 +01:00
|
|
|
%% -------- Overview ---------
|
|
|
|
%%
|
|
|
|
%% The eleveleddb is based on the LSM-tree similar to leveldb, except that:
|
|
|
|
%% - Keys, Metadata and Values are not persisted together - the Keys and
|
|
|
|
%% Metadata are kept in a tree-based ledger, whereas the values are stored
|
|
|
|
%% only in a sequential Journal.
|
|
|
|
%% - Different file formats are used for Journal (based on constant
|
|
|
|
%% database), and the ledger (sft, based on sst)
|
|
|
|
%% - It is not intended to be general purpose, but be specifically suited for
|
|
|
|
%% use as a Riak backend in specific circumstances (relatively large values,
|
|
|
|
%% and frequent use of iterators)
|
|
|
|
%% - The Journal is an extended nursery log in leveldb terms. It is keyed
|
|
|
|
%% on the sequence number of the write
|
2016-10-03 23:34:28 +01:00
|
|
|
%% - The ledger is a merge tree, where the key is the actaul object key, and
|
2016-08-02 13:44:48 +01:00
|
|
|
%% the value is the metadata of the object including the sequence number
|
|
|
|
%%
|
|
|
|
%%
|
|
|
|
%% -------- The actors ---------
|
|
|
|
%%
|
|
|
|
%% The store is fronted by a Bookie, who takes support from different actors:
|
2016-10-09 22:33:45 +01:00
|
|
|
%% - An Inker who persists new data into the journal, and returns items from
|
2016-08-02 13:44:48 +01:00
|
|
|
%% the journal based on sequence number
|
2016-10-09 22:33:45 +01:00
|
|
|
%% - A Penciller who periodically redraws the ledger, that associates keys with
|
|
|
|
%% sequence numbers and other metadata, as well as secondary keys (for index
|
|
|
|
%% queries)
|
2016-08-02 13:44:48 +01:00
|
|
|
%% - One or more Clerks, who may be used by either the inker or the penciller
|
|
|
|
%% to fulfill background tasks
|
|
|
|
%%
|
|
|
|
%% Both the Inker and the Penciller maintain a manifest of the files which
|
|
|
|
%% represent the current state of the Journal and the Ledger repsectively.
|
|
|
|
%% For the Inker the manifest maps ranges of sequence numbers to cdb files.
|
|
|
|
%% For the Penciller the manifest maps key ranges to files at each level of
|
|
|
|
%% the Ledger.
|
|
|
|
%%
|
|
|
|
%% -------- PUT --------
|
|
|
|
%%
|
|
|
|
%% A PUT request consists of
|
2016-09-15 10:53:24 +01:00
|
|
|
%% - A Primary Key and a Value
|
|
|
|
%% - IndexSpecs - a set of secondary key changes associated with the
|
|
|
|
%% transaction
|
2016-08-02 13:44:48 +01:00
|
|
|
%%
|
|
|
|
%% The Bookie takes the place request and passes it first to the Inker to add
|
|
|
|
%% the request to the ledger.
|
|
|
|
%%
|
2016-09-15 10:53:24 +01:00
|
|
|
%% The inker will pass the PK/Value/IndexSpecs to the current (append only)
|
|
|
|
%% CDB journal file to persist the change. The call should return either 'ok'
|
|
|
|
%% or 'roll'. -'roll' indicates that the CDB file has insufficient capacity for
|
2016-08-02 13:44:48 +01:00
|
|
|
%% this write.
|
2016-09-15 10:53:24 +01:00
|
|
|
%%
|
|
|
|
%% (Note that storing the IndexSpecs will create some duplication with the
|
|
|
|
%% Metadata wrapped up within the Object value. This Value and the IndexSpecs
|
|
|
|
%% are compressed before storage, so this should provide some mitigation for
|
|
|
|
%% the duplication).
|
|
|
|
%%
|
2016-08-02 13:44:48 +01:00
|
|
|
%% In resonse to a 'roll', the inker should:
|
|
|
|
%% - start a new active journal file with an open_write_request, and then;
|
|
|
|
%% - call to PUT the object in this file;
|
|
|
|
%% - reply to the bookie, but then in the background
|
|
|
|
%% - close the previously active journal file (writing the hashtree), and move
|
|
|
|
%% it to the historic journal
|
|
|
|
%%
|
2016-09-15 10:53:24 +01:00
|
|
|
%% The inker will also return the SQN which the change has been made at, as
|
|
|
|
%% well as the object size on disk within the Journal.
|
|
|
|
%%
|
|
|
|
%% Once the object has been persisted to the Journal, the Ledger can be updated.
|
2016-10-09 22:33:45 +01:00
|
|
|
%% The Ledger is updated by the Bookie applying a function (extract_metadata/4)
|
|
|
|
%% to the Value to return the Object Metadata, a function to generate a hash
|
|
|
|
%% of the Value and also taking the Primary Key, the IndexSpecs, the Sequence
|
|
|
|
%% Number in the Journal and the Object Size (returned from the Inker).
|
2016-09-15 10:53:24 +01:00
|
|
|
%%
|
|
|
|
%% The Bookie should generate a series of ledger key changes from this
|
|
|
|
%% information, using a function passed in at startup. For Riak this will be
|
|
|
|
%% of the form:
|
2016-10-16 15:41:09 +01:00
|
|
|
%% {{o_rkv, Bucket, Key, SubKey|null},
|
2016-09-15 10:53:24 +01:00
|
|
|
%% SQN,
|
|
|
|
%% {Hash, Size, {Riak_Metadata}},
|
|
|
|
%% {active, TS}|{tomb, TS}} or
|
2016-10-14 18:43:16 +01:00
|
|
|
%% {{i, Bucket, {IndexTerm, IndexField}, Key},
|
2016-09-15 10:53:24 +01:00
|
|
|
%% SQN,
|
|
|
|
%% null,
|
|
|
|
%% {active, TS}|{tomb, TS}}
|
2016-08-02 13:44:48 +01:00
|
|
|
%%
|
2016-10-09 22:33:45 +01:00
|
|
|
%% Recent Ledger changes are retained initially in the Bookies' memory (in a
|
|
|
|
%% small generally balanced tree). Periodically, the current table is pushed to
|
|
|
|
%% the Penciller for eventual persistence, and a new table is started.
|
2016-08-02 13:44:48 +01:00
|
|
|
%%
|
|
|
|
%% This completes the non-deferrable work associated with a PUT
|
|
|
|
%%
|
|
|
|
%% -------- Snapshots (Key & Metadata Only) --------
|
|
|
|
%%
|
|
|
|
%% If there is a snapshot request (e.g. to iterate over the keys) the Bookie
|
2016-10-09 22:33:45 +01:00
|
|
|
%% may request a clone of the Penciller, or the Penciller and the Inker.
|
|
|
|
%%
|
|
|
|
%% The clone is seeded with the manifest. Teh clone should be registered with
|
|
|
|
%% the real Inker/Penciller, so that the real Inker/Penciller may prevent the
|
|
|
|
%% deletion of files still in use by a snapshot clone.
|
2016-08-02 13:44:48 +01:00
|
|
|
%%
|
|
|
|
%% Iterators should de-register themselves from the Penciller on completion.
|
|
|
|
%% Iterators should be automatically release after a timeout period. A file
|
|
|
|
%% can only be deleted from the Ledger if it is no longer in the manifest, and
|
|
|
|
%% there are no registered iterators from before the point the file was
|
|
|
|
%% removed from the manifest.
|
|
|
|
%%
|
|
|
|
%% -------- Special Ops --------
|
|
|
|
%%
|
|
|
|
%% e.g. Get all for SegmentID/Partition
|
|
|
|
%%
|
2016-09-15 10:53:24 +01:00
|
|
|
%%
|
|
|
|
%%
|
|
|
|
%% -------- On Startup --------
|
|
|
|
%%
|
|
|
|
%% On startup the Bookie must restart both the Inker to load the Journal, and
|
|
|
|
%% the Penciller to load the Ledger. Once the Penciller has started, the
|
|
|
|
%% Bookie should request the highest sequence number in the Ledger, and then
|
2016-10-09 22:33:45 +01:00
|
|
|
%% and try and rebuild any missing information from the Journal.
|
2016-09-15 10:53:24 +01:00
|
|
|
%%
|
|
|
|
%% To rebuild the Ledger it requests the Inker to scan over the files from
|
2016-10-09 22:33:45 +01:00
|
|
|
%% the sequence number and re-generate the Ledger changes - pushing the changes
|
|
|
|
%% directly back into the Ledger.
|
2016-09-15 10:53:24 +01:00
|
|
|
|
2016-08-02 13:44:48 +01:00
|
|
|
|
|
|
|
|
|
|
|
-module(leveled_bookie).
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
-behaviour(gen_server).
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
-include("include/leveled.hrl").
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
-export([init/1,
|
|
|
|
handle_call/3,
|
|
|
|
handle_cast/2,
|
|
|
|
handle_info/2,
|
|
|
|
terminate/2,
|
2016-09-15 10:53:24 +01:00
|
|
|
code_change/3,
|
|
|
|
book_start/1,
|
2016-10-19 17:34:58 +01:00
|
|
|
book_start/3,
|
2016-09-15 15:14:49 +01:00
|
|
|
book_riakput/3,
|
2016-10-16 15:41:09 +01:00
|
|
|
book_riakdelete/4,
|
2016-09-15 15:14:49 +01:00
|
|
|
book_riakget/3,
|
|
|
|
book_riakhead/3,
|
2016-10-14 18:43:16 +01:00
|
|
|
book_put/5,
|
2016-10-16 15:41:09 +01:00
|
|
|
book_delete/4,
|
2016-10-14 18:43:16 +01:00
|
|
|
book_get/3,
|
|
|
|
book_head/3,
|
2016-10-12 17:12:49 +01:00
|
|
|
book_returnfolder/2,
|
2016-09-23 18:50:29 +01:00
|
|
|
book_snapshotstore/3,
|
|
|
|
book_snapshotledger/3,
|
2016-10-03 23:34:28 +01:00
|
|
|
book_compactjournal/2,
|
2016-10-13 21:02:15 +01:00
|
|
|
book_close/1]).
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
-include_lib("eunit/include/eunit.hrl").
|
|
|
|
|
2016-10-18 19:41:33 +01:00
|
|
|
-define(CACHE_SIZE, 2000).
|
2016-09-15 15:14:49 +01:00
|
|
|
-define(JOURNAL_FP, "journal").
|
|
|
|
-define(LEDGER_FP, "ledger").
|
2016-10-03 23:34:28 +01:00
|
|
|
-define(SHUTDOWN_WAITS, 60).
|
|
|
|
-define(SHUTDOWN_PAUSE, 10000).
|
2016-10-05 09:54:53 +01:00
|
|
|
-define(SNAPSHOT_TIMEOUT, 300000).
|
2016-10-20 02:23:45 +01:00
|
|
|
-define(JITTER_PROBABILITY, 0.1).
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
-record(state, {inker :: pid(),
|
2016-09-15 10:53:24 +01:00
|
|
|
penciller :: pid(),
|
|
|
|
cache_size :: integer(),
|
|
|
|
back_pressure :: boolean(),
|
2016-10-19 00:10:48 +01:00
|
|
|
ledger_cache :: gb_trees:tree(),
|
2016-10-05 09:54:53 +01:00
|
|
|
is_snapshot :: boolean()}).
|
2016-09-09 15:58:19 +01:00
|
|
|
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% API
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-10-19 17:34:58 +01:00
|
|
|
book_start(RootPath, LedgerCacheSize, JournalSize) ->
|
|
|
|
book_start(#bookie_options{root_path=RootPath,
|
|
|
|
cache_size=LedgerCacheSize,
|
|
|
|
max_journalsize=JournalSize}).
|
|
|
|
|
2016-09-15 10:53:24 +01:00
|
|
|
book_start(Opts) ->
|
|
|
|
gen_server:start(?MODULE, [Opts], []).
|
|
|
|
|
2016-10-14 18:43:16 +01:00
|
|
|
book_riakput(Pid, RiakObject, IndexSpecs) ->
|
|
|
|
{Bucket, Key} = leveled_codec:riakto_keydetails(RiakObject),
|
2016-10-16 15:41:09 +01:00
|
|
|
book_put(Pid, Bucket, Key, RiakObject, IndexSpecs, ?RIAK_TAG).
|
2016-10-14 18:43:16 +01:00
|
|
|
|
|
|
|
book_put(Pid, Bucket, Key, Object, IndexSpecs) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
book_put(Pid, Bucket, Key, Object, IndexSpecs, ?STD_TAG).
|
|
|
|
|
2016-10-24 11:44:28 +01:00
|
|
|
%% TODO:
|
|
|
|
%% It is not enough simply to change the value to delete, as the journal
|
|
|
|
%% needs to know the key is a tombstone at compaction time, and currently at
|
|
|
|
%% compaction time the clerk only knows the Key and not the Value.
|
|
|
|
%%
|
|
|
|
%% The tombstone cannot be removed from the Journal on compaction, as the
|
|
|
|
%% journal entry the tombstone deletes may not have been reaped - and so if the
|
|
|
|
%% ledger got erased, the value would be resurrected.
|
|
|
|
|
2016-10-16 15:41:09 +01:00
|
|
|
book_riakdelete(Pid, Bucket, Key, IndexSpecs) ->
|
|
|
|
book_put(Pid, Bucket, Key, delete, IndexSpecs, ?RIAK_TAG).
|
|
|
|
|
|
|
|
book_delete(Pid, Bucket, Key, IndexSpecs) ->
|
|
|
|
book_put(Pid, Bucket, Key, delete, IndexSpecs, ?STD_TAG).
|
2016-09-08 14:21:30 +01:00
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
book_riakget(Pid, Bucket, Key) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
book_get(Pid, Bucket, Key, ?RIAK_TAG).
|
2016-10-14 18:43:16 +01:00
|
|
|
|
|
|
|
book_get(Pid, Bucket, Key) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
book_get(Pid, Bucket, Key, ?STD_TAG).
|
2016-09-15 10:53:24 +01:00
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
book_riakhead(Pid, Bucket, Key) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
book_head(Pid, Bucket, Key, ?RIAK_TAG).
|
2016-10-14 18:43:16 +01:00
|
|
|
|
|
|
|
book_head(Pid, Bucket, Key) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
book_head(Pid, Bucket, Key, ?STD_TAG).
|
2016-10-14 18:43:16 +01:00
|
|
|
|
|
|
|
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag) ->
|
|
|
|
gen_server:call(Pid, {put, Bucket, Key, Object, IndexSpecs, Tag}, infinity).
|
|
|
|
|
|
|
|
book_get(Pid, Bucket, Key, Tag) ->
|
|
|
|
gen_server:call(Pid, {get, Bucket, Key, Tag}, infinity).
|
|
|
|
|
|
|
|
book_head(Pid, Bucket, Key, Tag) ->
|
|
|
|
gen_server:call(Pid, {head, Bucket, Key, Tag}, infinity).
|
2016-09-08 14:21:30 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
book_returnfolder(Pid, FolderType) ->
|
|
|
|
gen_server:call(Pid, {return_folder, FolderType}, infinity).
|
|
|
|
|
2016-09-23 18:50:29 +01:00
|
|
|
book_snapshotstore(Pid, Requestor, Timeout) ->
|
|
|
|
gen_server:call(Pid, {snapshot, Requestor, store, Timeout}, infinity).
|
|
|
|
|
|
|
|
book_snapshotledger(Pid, Requestor, Timeout) ->
|
|
|
|
gen_server:call(Pid, {snapshot, Requestor, ledger, Timeout}, infinity).
|
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
book_compactjournal(Pid, Timeout) ->
|
|
|
|
gen_server:call(Pid, {compact_journal, Timeout}, infinity).
|
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
book_close(Pid) ->
|
|
|
|
gen_server:call(Pid, close, infinity).
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% gen_server callbacks
|
|
|
|
%%%============================================================================
|
|
|
|
|
|
|
|
init([Opts]) ->
|
2016-10-05 09:54:53 +01:00
|
|
|
case Opts#bookie_options.snapshot_bookie of
|
|
|
|
undefined ->
|
|
|
|
% Start from file not snapshot
|
|
|
|
{InkerOpts, PencillerOpts} = set_options(Opts),
|
|
|
|
{Inker, Penciller} = startup(InkerOpts, PencillerOpts),
|
|
|
|
CacheSize = if
|
|
|
|
Opts#bookie_options.cache_size == undefined ->
|
|
|
|
?CACHE_SIZE;
|
|
|
|
true ->
|
|
|
|
Opts#bookie_options.cache_size
|
|
|
|
end,
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Bookie starting with Pcl ~w Ink ~w~n",
|
|
|
|
[Penciller, Inker]),
|
2016-10-05 09:54:53 +01:00
|
|
|
{ok, #state{inker=Inker,
|
|
|
|
penciller=Penciller,
|
|
|
|
cache_size=CacheSize,
|
2016-10-19 00:10:48 +01:00
|
|
|
ledger_cache=gb_trees:empty(),
|
2016-10-05 09:54:53 +01:00
|
|
|
is_snapshot=false}};
|
|
|
|
Bookie ->
|
|
|
|
{ok,
|
|
|
|
{Penciller, LedgerCache},
|
|
|
|
Inker} = book_snapshotstore(Bookie, self(), ?SNAPSHOT_TIMEOUT),
|
|
|
|
ok = leveled_penciller:pcl_loadsnapshot(Penciller, []),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Snapshot starting with Pcl ~w Ink ~w~n",
|
|
|
|
[Penciller, Inker]),
|
2016-10-05 09:54:53 +01:00
|
|
|
{ok, #state{penciller=Penciller,
|
|
|
|
inker=Inker,
|
|
|
|
ledger_cache=LedgerCache,
|
|
|
|
is_snapshot=true}}
|
|
|
|
end.
|
2016-09-15 10:53:24 +01:00
|
|
|
|
|
|
|
|
2016-10-14 18:43:16 +01:00
|
|
|
handle_call({put, Bucket, Key, Object, IndexSpecs, Tag}, From, State) ->
|
|
|
|
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
|
2016-09-15 15:14:49 +01:00
|
|
|
{ok, SQN, ObjSize} = leveled_inker:ink_put(State#state.inker,
|
2016-10-14 18:43:16 +01:00
|
|
|
LedgerKey,
|
2016-09-15 15:14:49 +01:00
|
|
|
Object,
|
|
|
|
IndexSpecs),
|
2016-10-14 18:43:16 +01:00
|
|
|
Changes = preparefor_ledgercache(LedgerKey,
|
2016-09-15 10:53:24 +01:00
|
|
|
SQN,
|
|
|
|
Object,
|
|
|
|
ObjSize,
|
|
|
|
IndexSpecs),
|
|
|
|
Cache0 = addto_ledgercache(Changes, State#state.ledger_cache),
|
|
|
|
gen_server:reply(From, ok),
|
|
|
|
case maybepush_ledgercache(State#state.cache_size,
|
|
|
|
Cache0,
|
|
|
|
State#state.penciller) of
|
|
|
|
{ok, NewCache} ->
|
|
|
|
{noreply, State#state{ledger_cache=NewCache, back_pressure=false}};
|
|
|
|
{pause, NewCache} ->
|
|
|
|
{noreply, State#state{ledger_cache=NewCache, back_pressure=true}}
|
|
|
|
end;
|
2016-10-14 18:43:16 +01:00
|
|
|
handle_call({get, Bucket, Key, Tag}, _From, State) ->
|
|
|
|
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
|
|
|
|
case fetch_head(LedgerKey,
|
|
|
|
State#state.penciller,
|
|
|
|
State#state.ledger_cache) of
|
2016-09-15 10:53:24 +01:00
|
|
|
not_present ->
|
|
|
|
{reply, not_found, State};
|
|
|
|
Head ->
|
2016-10-13 21:02:15 +01:00
|
|
|
{Seqn, Status, _MD} = leveled_codec:striphead_to_details(Head),
|
2016-09-15 10:53:24 +01:00
|
|
|
case Status of
|
2016-10-16 15:41:09 +01:00
|
|
|
tomb ->
|
2016-09-15 10:53:24 +01:00
|
|
|
{reply, not_found, State};
|
|
|
|
{active, _} ->
|
2016-10-14 18:43:16 +01:00
|
|
|
case fetch_value(LedgerKey, Seqn, State#state.inker) of
|
2016-09-15 10:53:24 +01:00
|
|
|
not_present ->
|
|
|
|
{reply, not_found, State};
|
|
|
|
Object ->
|
|
|
|
{reply, {ok, Object}, State}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end;
|
2016-10-14 18:43:16 +01:00
|
|
|
handle_call({head, Bucket, Key, Tag}, _From, State) ->
|
|
|
|
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
|
|
|
|
case fetch_head(LedgerKey,
|
|
|
|
State#state.penciller,
|
|
|
|
State#state.ledger_cache) of
|
2016-09-15 10:53:24 +01:00
|
|
|
not_present ->
|
|
|
|
{reply, not_found, State};
|
|
|
|
Head ->
|
2016-10-13 21:02:15 +01:00
|
|
|
{_Seqn, Status, MD} = leveled_codec:striphead_to_details(Head),
|
2016-09-15 10:53:24 +01:00
|
|
|
case Status of
|
2016-10-16 15:41:09 +01:00
|
|
|
tomb ->
|
2016-09-15 10:53:24 +01:00
|
|
|
{reply, not_found, State};
|
|
|
|
{active, _} ->
|
2016-10-14 18:43:16 +01:00
|
|
|
OMD = leveled_codec:build_metadata_object(LedgerKey, MD),
|
2016-09-15 15:14:49 +01:00
|
|
|
{reply, {ok, OMD}, State}
|
2016-09-15 10:53:24 +01:00
|
|
|
end
|
2016-09-15 15:14:49 +01:00
|
|
|
end;
|
2016-10-07 10:04:48 +01:00
|
|
|
handle_call({snapshot, _Requestor, SnapType, _Timeout}, _From, State) ->
|
2016-09-23 18:50:29 +01:00
|
|
|
PCLopts = #penciller_options{start_snapshot=true,
|
2016-10-07 10:04:48 +01:00
|
|
|
source_penciller=State#state.penciller},
|
2016-09-23 18:50:29 +01:00
|
|
|
{ok, LedgerSnapshot} = leveled_penciller:pcl_start(PCLopts),
|
|
|
|
case SnapType of
|
|
|
|
store ->
|
2016-10-05 09:54:53 +01:00
|
|
|
InkerOpts = #inker_options{start_snapshot=true,
|
2016-10-07 10:04:48 +01:00
|
|
|
source_inker=State#state.inker},
|
2016-09-23 18:50:29 +01:00
|
|
|
{ok, JournalSnapshot} = leveled_inker:ink_start(InkerOpts),
|
2016-10-05 09:54:53 +01:00
|
|
|
{reply,
|
|
|
|
{ok,
|
2016-10-05 18:28:31 +01:00
|
|
|
{LedgerSnapshot,
|
|
|
|
State#state.ledger_cache},
|
2016-10-05 09:54:53 +01:00
|
|
|
JournalSnapshot},
|
|
|
|
State};
|
2016-09-23 18:50:29 +01:00
|
|
|
ledger ->
|
2016-10-05 09:54:53 +01:00
|
|
|
{reply,
|
|
|
|
{ok,
|
2016-10-05 18:28:31 +01:00
|
|
|
{LedgerSnapshot,
|
|
|
|
State#state.ledger_cache},
|
2016-10-05 09:54:53 +01:00
|
|
|
null},
|
|
|
|
State}
|
2016-09-23 18:50:29 +01:00
|
|
|
end;
|
2016-10-12 17:12:49 +01:00
|
|
|
handle_call({return_folder, FolderType}, _From, State) ->
|
|
|
|
case FolderType of
|
2016-10-14 18:43:16 +01:00
|
|
|
{riakbucket_stats, Bucket} ->
|
2016-10-12 17:12:49 +01:00
|
|
|
{reply,
|
|
|
|
bucket_stats(State#state.penciller,
|
|
|
|
State#state.ledger_cache,
|
2016-10-14 18:43:16 +01:00
|
|
|
Bucket,
|
2016-10-16 15:41:09 +01:00
|
|
|
?RIAK_TAG),
|
2016-10-18 01:59:03 +01:00
|
|
|
State};
|
|
|
|
{index_query,
|
|
|
|
Bucket,
|
|
|
|
{IdxField, StartValue, EndValue},
|
|
|
|
{ReturnTerms, TermRegex}} ->
|
|
|
|
{reply,
|
|
|
|
index_query(State#state.penciller,
|
|
|
|
State#state.ledger_cache,
|
|
|
|
Bucket,
|
|
|
|
{IdxField, StartValue, EndValue},
|
|
|
|
{ReturnTerms, TermRegex}),
|
2016-10-23 22:45:43 +01:00
|
|
|
State};
|
|
|
|
{keylist, Tag} ->
|
|
|
|
{reply,
|
|
|
|
allkey_query(State#state.penciller,
|
|
|
|
State#state.ledger_cache,
|
|
|
|
Tag),
|
|
|
|
State}
|
2016-10-12 17:12:49 +01:00
|
|
|
end;
|
2016-10-03 23:34:28 +01:00
|
|
|
handle_call({compact_journal, Timeout}, _From, State) ->
|
|
|
|
ok = leveled_inker:ink_compactjournal(State#state.inker,
|
2016-10-05 18:28:31 +01:00
|
|
|
self(),
|
2016-10-03 23:34:28 +01:00
|
|
|
Timeout),
|
|
|
|
{reply, ok, State};
|
2016-09-15 15:14:49 +01:00
|
|
|
handle_call(close, _From, State) ->
|
|
|
|
{stop, normal, ok, State}.
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
handle_cast(_Msg, State) ->
|
|
|
|
{noreply, State}.
|
|
|
|
|
|
|
|
handle_info(_Info, State) ->
|
|
|
|
{noreply, State}.
|
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
terminate(Reason, State) ->
|
|
|
|
io:format("Bookie closing for reason ~w~n", [Reason]),
|
2016-10-03 23:34:28 +01:00
|
|
|
WaitList = lists:duplicate(?SHUTDOWN_WAITS, ?SHUTDOWN_PAUSE),
|
|
|
|
ok = case shutdown_wait(WaitList, State#state.inker) of
|
|
|
|
false ->
|
|
|
|
io:format("Forcing close of inker following wait of "
|
|
|
|
++ "~w milliseconds~n",
|
|
|
|
[lists:sum(WaitList)]),
|
|
|
|
leveled_inker:ink_forceclose(State#state.inker);
|
|
|
|
true ->
|
|
|
|
ok
|
|
|
|
end,
|
2016-09-15 15:14:49 +01:00
|
|
|
ok = leveled_penciller:pcl_close(State#state.penciller).
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
code_change(_OldVsn, State, _Extra) ->
|
|
|
|
{ok, State}.
|
|
|
|
|
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% Internal functions
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-10-19 00:10:48 +01:00
|
|
|
bucket_stats(Penciller, LedgerCache, Bucket, Tag) ->
|
2016-10-13 17:51:47 +01:00
|
|
|
PCLopts = #penciller_options{start_snapshot=true,
|
|
|
|
source_penciller=Penciller},
|
|
|
|
{ok, LedgerSnapshot} = leveled_penciller:pcl_start(PCLopts),
|
2016-10-12 17:12:49 +01:00
|
|
|
Folder = fun() ->
|
2016-10-19 00:10:48 +01:00
|
|
|
Increment = gb_trees:to_list(LedgerCache),
|
2016-10-13 17:51:47 +01:00
|
|
|
io:format("Length of increment in snapshot is ~w~n",
|
2016-10-19 00:10:48 +01:00
|
|
|
[length(Increment)]),
|
2016-10-12 17:12:49 +01:00
|
|
|
ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot,
|
2016-10-19 00:10:48 +01:00
|
|
|
{infinity, Increment}),
|
2016-10-14 18:43:16 +01:00
|
|
|
StartKey = leveled_codec:to_ledgerkey(Bucket, null, Tag),
|
|
|
|
EndKey = leveled_codec:to_ledgerkey(Bucket, null, Tag),
|
2016-10-12 17:12:49 +01:00
|
|
|
Acc = leveled_penciller:pcl_fetchkeys(LedgerSnapshot,
|
|
|
|
StartKey,
|
|
|
|
EndKey,
|
|
|
|
fun accumulate_size/3,
|
|
|
|
{0, 0}),
|
|
|
|
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
|
|
|
Acc
|
|
|
|
end,
|
|
|
|
{async, Folder}.
|
|
|
|
|
2016-10-19 00:10:48 +01:00
|
|
|
index_query(Penciller, LedgerCache,
|
2016-10-18 01:59:03 +01:00
|
|
|
Bucket,
|
|
|
|
{IdxField, StartValue, EndValue},
|
|
|
|
{ReturnTerms, TermRegex}) ->
|
|
|
|
PCLopts = #penciller_options{start_snapshot=true,
|
|
|
|
source_penciller=Penciller},
|
|
|
|
{ok, LedgerSnapshot} = leveled_penciller:pcl_start(PCLopts),
|
|
|
|
Folder = fun() ->
|
2016-10-19 00:10:48 +01:00
|
|
|
Increment = gb_trees:to_list(LedgerCache),
|
2016-10-18 01:59:03 +01:00
|
|
|
io:format("Length of increment in snapshot is ~w~n",
|
2016-10-19 00:10:48 +01:00
|
|
|
[length(Increment)]),
|
2016-10-18 01:59:03 +01:00
|
|
|
ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot,
|
2016-10-19 00:10:48 +01:00
|
|
|
{infinity, Increment}),
|
2016-10-18 01:59:03 +01:00
|
|
|
StartKey = leveled_codec:to_ledgerkey(Bucket, null, ?IDX_TAG,
|
|
|
|
IdxField, StartValue),
|
|
|
|
EndKey = leveled_codec:to_ledgerkey(Bucket, null, ?IDX_TAG,
|
|
|
|
IdxField, EndValue),
|
|
|
|
AddFun = case ReturnTerms of
|
|
|
|
true ->
|
|
|
|
fun add_terms/3;
|
|
|
|
_ ->
|
|
|
|
fun add_keys/3
|
|
|
|
end,
|
|
|
|
AccFun = accumulate_index(TermRegex, AddFun),
|
|
|
|
Acc = leveled_penciller:pcl_fetchkeys(LedgerSnapshot,
|
|
|
|
StartKey,
|
|
|
|
EndKey,
|
|
|
|
AccFun,
|
|
|
|
[]),
|
|
|
|
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
|
|
|
Acc
|
|
|
|
end,
|
|
|
|
{async, Folder}.
|
|
|
|
|
2016-10-23 22:45:43 +01:00
|
|
|
allkey_query(Penciller, LedgerCache, Tag) ->
|
|
|
|
PCLopts = #penciller_options{start_snapshot=true,
|
|
|
|
source_penciller=Penciller},
|
|
|
|
{ok, LedgerSnapshot} = leveled_penciller:pcl_start(PCLopts),
|
|
|
|
Folder = fun() ->
|
|
|
|
Increment = gb_trees:to_list(LedgerCache),
|
|
|
|
io:format("Length of increment in snapshot is ~w~n",
|
|
|
|
[length(Increment)]),
|
|
|
|
ok = leveled_penciller:pcl_loadsnapshot(LedgerSnapshot,
|
|
|
|
{infinity, Increment}),
|
|
|
|
SK = leveled_codec:to_ledgerkey(null, null, Tag),
|
|
|
|
EK = leveled_codec:to_ledgerkey(null, null, Tag),
|
|
|
|
Acc = leveled_penciller:pcl_fetchkeys(LedgerSnapshot,
|
|
|
|
SK,
|
|
|
|
EK,
|
|
|
|
fun accumulate_keys/3,
|
|
|
|
[]),
|
|
|
|
ok = leveled_penciller:pcl_close(LedgerSnapshot),
|
|
|
|
lists:reverse(Acc)
|
|
|
|
end,
|
|
|
|
{async, Folder}.
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
shutdown_wait([], _Inker) ->
|
|
|
|
false;
|
|
|
|
shutdown_wait([TopPause|Rest], Inker) ->
|
|
|
|
case leveled_inker:ink_close(Inker) of
|
|
|
|
ok ->
|
|
|
|
true;
|
|
|
|
pause ->
|
|
|
|
io:format("Inker shutdown stil waiting process to complete~n"),
|
|
|
|
ok = timer:sleep(TopPause),
|
|
|
|
shutdown_wait(Rest, Inker)
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-09-15 10:53:24 +01:00
|
|
|
set_options(Opts) ->
|
2016-10-03 23:34:28 +01:00
|
|
|
MaxJournalSize = case Opts#bookie_options.max_journalsize of
|
|
|
|
undefined ->
|
|
|
|
30000;
|
|
|
|
MS ->
|
|
|
|
MS
|
|
|
|
end,
|
2016-09-15 15:14:49 +01:00
|
|
|
{#inker_options{root_path = Opts#bookie_options.root_path ++
|
|
|
|
"/" ++ ?JOURNAL_FP,
|
2016-10-08 22:15:48 +01:00
|
|
|
cdb_options = #cdb_options{max_size=MaxJournalSize,
|
|
|
|
binary_mode=true}},
|
2016-09-15 15:14:49 +01:00
|
|
|
#penciller_options{root_path=Opts#bookie_options.root_path ++
|
|
|
|
"/" ++ ?LEDGER_FP}}.
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
startup(InkerOpts, PencillerOpts) ->
|
|
|
|
{ok, Inker} = leveled_inker:ink_start(InkerOpts),
|
|
|
|
{ok, Penciller} = leveled_penciller:pcl_start(PencillerOpts),
|
|
|
|
LedgerSQN = leveled_penciller:pcl_getstartupsequencenumber(Penciller),
|
2016-09-15 18:38:23 +01:00
|
|
|
io:format("LedgerSQN=~w at startup~n", [LedgerSQN]),
|
|
|
|
ok = leveled_inker:ink_loadpcl(Inker,
|
|
|
|
LedgerSQN + 1,
|
|
|
|
fun load_fun/5,
|
|
|
|
Penciller),
|
2016-09-08 14:21:30 +01:00
|
|
|
{Inker, Penciller}.
|
|
|
|
|
|
|
|
|
2016-10-19 00:10:48 +01:00
|
|
|
fetch_head(Key, Penciller, LedgerCache) ->
|
|
|
|
case gb_trees:lookup(Key, LedgerCache) of
|
2016-09-15 10:53:24 +01:00
|
|
|
{value, Head} ->
|
|
|
|
Head;
|
|
|
|
none ->
|
|
|
|
case leveled_penciller:pcl_fetch(Penciller, Key) of
|
|
|
|
{Key, Head} ->
|
|
|
|
Head;
|
|
|
|
not_present ->
|
|
|
|
not_present
|
|
|
|
end
|
|
|
|
end.
|
|
|
|
|
|
|
|
fetch_value(Key, SQN, Inker) ->
|
|
|
|
case leveled_inker:ink_fetch(Inker, Key, SQN) of
|
|
|
|
{ok, Value} ->
|
|
|
|
Value;
|
|
|
|
not_present ->
|
|
|
|
not_present
|
|
|
|
end.
|
|
|
|
|
2016-10-14 18:43:16 +01:00
|
|
|
accumulate_size(Key, Value, {Size, Count}) ->
|
2016-10-16 15:41:09 +01:00
|
|
|
case leveled_codec:is_active(Key, Value) of
|
|
|
|
true ->
|
|
|
|
{Size + leveled_codec:get_size(Key, Value), Count + 1};
|
|
|
|
false ->
|
|
|
|
{Size, Count}
|
|
|
|
end.
|
2016-09-15 10:53:24 +01:00
|
|
|
|
2016-10-23 22:45:43 +01:00
|
|
|
accumulate_keys(Key, Value, KeyList) ->
|
|
|
|
case leveled_codec:is_active(Key, Value) of
|
|
|
|
true ->
|
|
|
|
[leveled_codec:from_ledgerkey(Key)|KeyList];
|
|
|
|
false ->
|
|
|
|
KeyList
|
|
|
|
end.
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
|
|
|
|
add_keys(ObjKey, _IdxValue, Acc) ->
|
|
|
|
Acc ++ [ObjKey].
|
|
|
|
|
|
|
|
add_terms(ObjKey, IdxValue, Acc) ->
|
|
|
|
Acc ++ [{IdxValue, ObjKey}].
|
|
|
|
|
|
|
|
accumulate_index(TermRe, AddFun) ->
|
|
|
|
case TermRe of
|
|
|
|
undefined ->
|
|
|
|
fun(Key, Value, Acc) ->
|
|
|
|
case leveled_codec:is_active(Key, Value) of
|
|
|
|
true ->
|
|
|
|
{_Bucket,
|
|
|
|
ObjKey,
|
|
|
|
IdxValue} = leveled_codec:from_ledgerkey(Key),
|
|
|
|
AddFun(ObjKey, IdxValue, Acc);
|
|
|
|
false ->
|
|
|
|
Acc
|
|
|
|
end end;
|
|
|
|
TermRe ->
|
|
|
|
fun(Key, Value, Acc) ->
|
|
|
|
case leveled_codec:is_active(Key, Value) of
|
|
|
|
true ->
|
|
|
|
{_Bucket,
|
|
|
|
ObjKey,
|
|
|
|
IdxValue} = leveled_codec:from_ledgerkey(Key),
|
|
|
|
case re:run(IdxValue, TermRe) of
|
|
|
|
nomatch ->
|
|
|
|
Acc;
|
|
|
|
_ ->
|
|
|
|
AddFun(ObjKey, IdxValue, Acc)
|
|
|
|
end;
|
|
|
|
false ->
|
|
|
|
Acc
|
|
|
|
end end
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-09-15 10:53:24 +01:00
|
|
|
preparefor_ledgercache(PK, SQN, Obj, Size, IndexSpecs) ->
|
2016-10-14 18:43:16 +01:00
|
|
|
{Bucket, Key, PrimaryChange} = leveled_codec:generate_ledgerkv(PK,
|
|
|
|
SQN,
|
|
|
|
Obj,
|
|
|
|
Size),
|
|
|
|
ConvSpecs = leveled_codec:convert_indexspecs(IndexSpecs, Bucket, Key, SQN),
|
|
|
|
[PrimaryChange] ++ ConvSpecs.
|
2016-09-15 10:53:24 +01:00
|
|
|
|
|
|
|
addto_ledgercache(Changes, Cache) ->
|
2016-10-19 00:10:48 +01:00
|
|
|
lists:foldl(fun({K, V}, Acc) -> gb_trees:enter(K, V, Acc) end,
|
|
|
|
Cache,
|
|
|
|
Changes).
|
2016-09-15 10:53:24 +01:00
|
|
|
|
|
|
|
maybepush_ledgercache(MaxCacheSize, Cache, Penciller) ->
|
2016-10-19 00:10:48 +01:00
|
|
|
CacheSize = gb_trees:size(Cache),
|
2016-10-20 02:23:45 +01:00
|
|
|
TimeToPush = maybe_withjitter(CacheSize, MaxCacheSize),
|
2016-09-15 10:53:24 +01:00
|
|
|
if
|
2016-10-20 02:23:45 +01:00
|
|
|
TimeToPush ->
|
|
|
|
Dump = gb_trees:to_list(Cache),
|
|
|
|
case leveled_penciller:pcl_pushmem(Penciller, Dump) of
|
2016-09-15 10:53:24 +01:00
|
|
|
ok ->
|
2016-10-19 00:10:48 +01:00
|
|
|
{ok, gb_trees:empty()};
|
2016-09-15 10:53:24 +01:00
|
|
|
pause ->
|
2016-10-19 00:10:48 +01:00
|
|
|
{pause, gb_trees:empty()};
|
2016-10-20 02:23:45 +01:00
|
|
|
returned ->
|
2016-09-15 10:53:24 +01:00
|
|
|
{ok, Cache}
|
|
|
|
end;
|
|
|
|
true ->
|
2016-10-20 02:23:45 +01:00
|
|
|
{ok, Cache}
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
|
|
|
maybe_withjitter(CacheSize, MaxCacheSize) ->
|
|
|
|
if
|
|
|
|
CacheSize > 2 * MaxCacheSize ->
|
|
|
|
true;
|
|
|
|
CacheSize > MaxCacheSize ->
|
|
|
|
R = random:uniform(),
|
|
|
|
if
|
|
|
|
R < ?JITTER_PROBABILITY ->
|
|
|
|
true;
|
|
|
|
true ->
|
|
|
|
false
|
|
|
|
end;
|
|
|
|
true ->
|
|
|
|
false
|
2016-09-15 10:53:24 +01:00
|
|
|
end.
|
|
|
|
|
2016-09-15 18:38:23 +01:00
|
|
|
load_fun(KeyInLedger, ValueInLedger, _Position, Acc0, ExtractFun) ->
|
2016-10-20 02:23:45 +01:00
|
|
|
{MinSQN, MaxSQN, OutputTree} = Acc0,
|
2016-10-25 01:57:12 +01:00
|
|
|
{SQN, _Type, PK} = KeyInLedger,
|
2016-10-08 22:15:48 +01:00
|
|
|
% VBin may already be a term
|
|
|
|
{VBin, VSize} = ExtractFun(ValueInLedger),
|
|
|
|
{Obj, IndexSpecs} = case is_binary(VBin) of
|
|
|
|
true ->
|
|
|
|
binary_to_term(VBin);
|
|
|
|
false ->
|
|
|
|
VBin
|
|
|
|
end,
|
2016-09-15 10:53:24 +01:00
|
|
|
case SQN of
|
|
|
|
SQN when SQN < MinSQN ->
|
|
|
|
{loop, Acc0};
|
2016-10-07 10:04:48 +01:00
|
|
|
SQN when SQN < MaxSQN ->
|
2016-10-08 22:15:48 +01:00
|
|
|
Changes = preparefor_ledgercache(PK, SQN, Obj, VSize, IndexSpecs),
|
2016-10-20 02:23:45 +01:00
|
|
|
{loop, {MinSQN, MaxSQN, addto_ledgercache(Changes, OutputTree)}};
|
2016-10-07 10:04:48 +01:00
|
|
|
MaxSQN ->
|
|
|
|
io:format("Reached end of load batch with SQN ~w~n", [SQN]),
|
2016-10-08 22:15:48 +01:00
|
|
|
Changes = preparefor_ledgercache(PK, SQN, Obj, VSize, IndexSpecs),
|
2016-10-20 02:23:45 +01:00
|
|
|
{stop, {MinSQN, MaxSQN, addto_ledgercache(Changes, OutputTree)}};
|
2016-09-15 10:53:24 +01:00
|
|
|
SQN when SQN > MaxSQN ->
|
2016-10-03 23:34:28 +01:00
|
|
|
io:format("Skipping as exceeded MaxSQN ~w with SQN ~w~n",
|
|
|
|
[MaxSQN, SQN]),
|
2016-09-15 10:53:24 +01:00
|
|
|
{stop, Acc0}
|
|
|
|
end.
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% Test
|
|
|
|
%%%============================================================================
|
|
|
|
|
|
|
|
-ifdef(TEST).
|
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
reset_filestructure() ->
|
|
|
|
RootPath = "../test",
|
|
|
|
leveled_inker:clean_testdir(RootPath ++ "/" ++ ?JOURNAL_FP),
|
|
|
|
leveled_penciller:clean_testdir(RootPath ++ "/" ++ ?LEDGER_FP),
|
|
|
|
RootPath.
|
2016-09-19 15:31:26 +01:00
|
|
|
|
2016-10-14 18:43:16 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
2016-09-19 15:31:26 +01:00
|
|
|
generate_multiple_objects(Count, KeyNumber) ->
|
|
|
|
generate_multiple_objects(Count, KeyNumber, []).
|
2016-09-15 15:14:49 +01:00
|
|
|
|
2016-09-19 15:31:26 +01:00
|
|
|
generate_multiple_objects(0, _KeyNumber, ObjL) ->
|
|
|
|
ObjL;
|
|
|
|
generate_multiple_objects(Count, KeyNumber, ObjL) ->
|
|
|
|
Obj = {"Bucket",
|
|
|
|
"Key" ++ integer_to_list(KeyNumber),
|
2016-09-19 15:56:35 +01:00
|
|
|
crypto:rand_bytes(1024),
|
2016-09-19 15:31:26 +01:00
|
|
|
[],
|
|
|
|
[{"MDK", "MDV" ++ integer_to_list(KeyNumber)},
|
|
|
|
{"MDK2", "MDV" ++ integer_to_list(KeyNumber)}]},
|
|
|
|
{B1, K1, V1, Spec1, MD} = Obj,
|
|
|
|
Content = #r_content{metadata=MD, value=V1},
|
|
|
|
Obj1 = #r_object{bucket=B1, key=K1, contents=[Content], vclock=[{'a',1}]},
|
|
|
|
generate_multiple_objects(Count - 1, KeyNumber + 1, ObjL ++ [{Obj1, Spec1}]).
|
|
|
|
|
|
|
|
|
2016-09-15 15:14:49 +01:00
|
|
|
single_key_test() ->
|
|
|
|
RootPath = reset_filestructure(),
|
2016-09-15 18:38:23 +01:00
|
|
|
{ok, Bookie1} = book_start(#bookie_options{root_path=RootPath}),
|
2016-09-15 15:14:49 +01:00
|
|
|
{B1, K1, V1, Spec1, MD} = {"Bucket1",
|
|
|
|
"Key1",
|
|
|
|
"Value1",
|
|
|
|
[],
|
|
|
|
{"MDK1", "MDV1"}},
|
|
|
|
Content = #r_content{metadata=MD, value=V1},
|
|
|
|
Object = #r_object{bucket=B1, key=K1, contents=[Content], vclock=[{'a',1}]},
|
2016-09-15 18:38:23 +01:00
|
|
|
ok = book_riakput(Bookie1, Object, Spec1),
|
|
|
|
{ok, F1} = book_riakget(Bookie1, B1, K1),
|
2016-09-15 15:14:49 +01:00
|
|
|
?assertMatch(F1, Object),
|
2016-09-15 18:38:23 +01:00
|
|
|
ok = book_close(Bookie1),
|
|
|
|
{ok, Bookie2} = book_start(#bookie_options{root_path=RootPath}),
|
|
|
|
{ok, F2} = book_riakget(Bookie2, B1, K1),
|
|
|
|
?assertMatch(F2, Object),
|
|
|
|
ok = book_close(Bookie2),
|
2016-09-15 15:14:49 +01:00
|
|
|
reset_filestructure().
|
|
|
|
|
2016-09-19 15:31:26 +01:00
|
|
|
multi_key_test() ->
|
|
|
|
RootPath = reset_filestructure(),
|
|
|
|
{ok, Bookie1} = book_start(#bookie_options{root_path=RootPath}),
|
|
|
|
{B1, K1, V1, Spec1, MD1} = {"Bucket",
|
|
|
|
"Key1",
|
|
|
|
"Value1",
|
|
|
|
[],
|
|
|
|
{"MDK1", "MDV1"}},
|
|
|
|
C1 = #r_content{metadata=MD1, value=V1},
|
|
|
|
Obj1 = #r_object{bucket=B1, key=K1, contents=[C1], vclock=[{'a',1}]},
|
|
|
|
{B2, K2, V2, Spec2, MD2} = {"Bucket",
|
|
|
|
"Key2",
|
|
|
|
"Value2",
|
|
|
|
[],
|
|
|
|
{"MDK2", "MDV2"}},
|
|
|
|
C2 = #r_content{metadata=MD2, value=V2},
|
|
|
|
Obj2 = #r_object{bucket=B2, key=K2, contents=[C2], vclock=[{'a',1}]},
|
|
|
|
ok = book_riakput(Bookie1, Obj1, Spec1),
|
|
|
|
ObjL1 = generate_multiple_objects(100, 3),
|
|
|
|
SW1 = os:timestamp(),
|
|
|
|
lists:foreach(fun({O, S}) -> ok = book_riakput(Bookie1, O, S) end, ObjL1),
|
|
|
|
io:format("PUT of 100 objects completed in ~w microseconds~n",
|
|
|
|
[timer:now_diff(os:timestamp(),SW1)]),
|
|
|
|
ok = book_riakput(Bookie1, Obj2, Spec2),
|
|
|
|
{ok, F1A} = book_riakget(Bookie1, B1, K1),
|
|
|
|
?assertMatch(F1A, Obj1),
|
|
|
|
{ok, F2A} = book_riakget(Bookie1, B2, K2),
|
|
|
|
?assertMatch(F2A, Obj2),
|
|
|
|
ObjL2 = generate_multiple_objects(100, 103),
|
|
|
|
SW2 = os:timestamp(),
|
|
|
|
lists:foreach(fun({O, S}) -> ok = book_riakput(Bookie1, O, S) end, ObjL2),
|
|
|
|
io:format("PUT of 100 objects completed in ~w microseconds~n",
|
|
|
|
[timer:now_diff(os:timestamp(),SW2)]),
|
|
|
|
{ok, F1B} = book_riakget(Bookie1, B1, K1),
|
|
|
|
?assertMatch(F1B, Obj1),
|
|
|
|
{ok, F2B} = book_riakget(Bookie1, B2, K2),
|
|
|
|
?assertMatch(F2B, Obj2),
|
|
|
|
ok = book_close(Bookie1),
|
|
|
|
%% Now reopen the file, and confirm that a fetch is still possible
|
|
|
|
{ok, Bookie2} = book_start(#bookie_options{root_path=RootPath}),
|
|
|
|
{ok, F1C} = book_riakget(Bookie2, B1, K1),
|
|
|
|
?assertMatch(F1C, Obj1),
|
|
|
|
{ok, F2C} = book_riakget(Bookie2, B2, K2),
|
|
|
|
?assertMatch(F2C, Obj2),
|
|
|
|
ObjL3 = generate_multiple_objects(100, 203),
|
|
|
|
SW3 = os:timestamp(),
|
|
|
|
lists:foreach(fun({O, S}) -> ok = book_riakput(Bookie2, O, S) end, ObjL3),
|
|
|
|
io:format("PUT of 100 objects completed in ~w microseconds~n",
|
|
|
|
[timer:now_diff(os:timestamp(),SW3)]),
|
|
|
|
{ok, F1D} = book_riakget(Bookie2, B1, K1),
|
|
|
|
?assertMatch(F1D, Obj1),
|
|
|
|
{ok, F2D} = book_riakget(Bookie2, B2, K2),
|
|
|
|
?assertMatch(F2D, Obj2),
|
|
|
|
ok = book_close(Bookie2),
|
2016-09-19 15:56:35 +01:00
|
|
|
reset_filestructure().
|
2016-10-05 18:28:31 +01:00
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
-endif.
|