2016-08-02 17:51:43 +01:00
|
|
|
%% -------- PENCILLER ---------
|
2016-07-29 17:19:30 +01:00
|
|
|
%%
|
2016-08-16 12:45:48 +01:00
|
|
|
%% The penciller is responsible for writing and re-writing the ledger - a
|
2016-08-02 13:44:48 +01:00
|
|
|
%% persisted, ordered view of non-recent Keys and Metadata which have been
|
|
|
|
%% added to the store.
|
|
|
|
%% - The penciller maintains a manifest of all the files within the current
|
|
|
|
%% Ledger.
|
2016-08-16 12:45:48 +01:00
|
|
|
%% - The Penciller provides re-write (compaction) work up to be managed by
|
|
|
|
%% the Penciller's Clerk
|
2016-10-09 22:33:45 +01:00
|
|
|
%% - The Penciller can be cloned and maintains a register of clones who have
|
|
|
|
%% requested snapshots of the Ledger
|
2016-10-27 20:56:18 +01:00
|
|
|
%% - The accepts new dumps (in the form of a gb_tree) from the Bookie, and
|
2016-08-16 12:45:48 +01:00
|
|
|
%% calls the Bookie once the process of pencilling this data in the Ledger is
|
|
|
|
%% complete - and the Bookie is free to forget about the data
|
2016-10-03 23:34:28 +01:00
|
|
|
%% - The Penciller's persistence of the ledger may not be reliable, in that it
|
|
|
|
%% may lose data but only in sequence from a particular sequence number. On
|
|
|
|
%% startup the Penciller will inform the Bookie of the highest sequence number
|
|
|
|
%% it has, and the Bookie should load any missing data from that point out of
|
2016-11-05 15:59:31 +00:00
|
|
|
%% the journal.
|
2016-07-29 17:19:30 +01:00
|
|
|
%%
|
2016-08-02 17:51:43 +01:00
|
|
|
%% -------- LEDGER ---------
|
2016-07-29 17:19:30 +01:00
|
|
|
%%
|
2016-08-02 13:44:48 +01:00
|
|
|
%% The Ledger is divided into many levels
|
2016-10-27 20:56:18 +01:00
|
|
|
%% - L0: New keys are received from the Bookie and merged into a single
|
|
|
|
%% gb_tree, until that tree is the size of a SFT file, and it is then persisted
|
2016-08-16 12:45:48 +01:00
|
|
|
%% as a SFT file at this level. L0 SFT files can be larger than the normal
|
|
|
|
%% maximum size - so we don't have to consider problems of either having more
|
|
|
|
%% than one L0 file (and handling what happens on a crash between writing the
|
|
|
|
%% files when the second may have overlapping sequence numbers), or having a
|
|
|
|
%% remainder with overlapping in sequence numbers in memory after the file is
|
2016-10-27 20:56:18 +01:00
|
|
|
%% written. Once the persistence is completed, the L0 tree can be erased.
|
2016-08-16 12:45:48 +01:00
|
|
|
%% There can be only one SFT file at Level 0, so the work to merge that file
|
|
|
|
%% to the lower level must be the highest priority, as otherwise writes to the
|
|
|
|
%% ledger will stall, when there is next a need to persist.
|
|
|
|
%% - L1 TO L7: May contain multiple processes managing non-overlapping sft
|
|
|
|
%% files. Compaction work should be sheduled if the number of files exceeds
|
|
|
|
%% the target size of the level, where the target size is 8 ^ n.
|
2016-07-27 18:03:44 +01:00
|
|
|
%%
|
|
|
|
%% The most recent revision of a Key can be found by checking each level until
|
2016-08-02 13:44:48 +01:00
|
|
|
%% the key is found. To check a level the correct file must be sought from the
|
|
|
|
%% manifest for that level, and then a call is made to that file. If the Key
|
|
|
|
%% is not present then every level should be checked.
|
2016-07-27 18:03:44 +01:00
|
|
|
%%
|
|
|
|
%% If a compaction change takes the size of a level beyond the target size,
|
|
|
|
%% then compaction work for that level + 1 should be added to the compaction
|
|
|
|
%% work queue.
|
2016-08-16 12:45:48 +01:00
|
|
|
%% Compaction work is fetched by the Penciller's Clerk because:
|
2016-07-27 18:03:44 +01:00
|
|
|
%% - it has timed out due to a period of inactivity
|
|
|
|
%% - it has been triggered by the a cast to indicate the arrival of high
|
|
|
|
%% priority compaction work
|
2016-08-02 17:51:43 +01:00
|
|
|
%% The Penciller's Clerk (which performs compaction worker) will always call
|
2016-08-16 12:45:48 +01:00
|
|
|
%% the Penciller to find out the highest priority work currently required
|
2016-08-02 17:51:43 +01:00
|
|
|
%% whenever it has either completed work, or a timeout has occurred since it
|
|
|
|
%% was informed there was no work to do.
|
2016-07-27 18:03:44 +01:00
|
|
|
%%
|
2016-08-16 12:45:48 +01:00
|
|
|
%% When the clerk picks work it will take the current manifest, and the
|
|
|
|
%% Penciller assumes the manifest sequence number is to be incremented.
|
2016-11-07 11:17:13 +00:00
|
|
|
%% When the clerk has completed the work it can request that the manifest
|
2016-08-16 12:45:48 +01:00
|
|
|
%% change be committed by the Penciller. The commit is made through changing
|
|
|
|
%% the filename of the new manifest - so the Penciller is not held up by the
|
|
|
|
%% process of wiritng a file, just altering file system metadata.
|
2016-08-02 13:44:48 +01:00
|
|
|
%%
|
2016-08-02 17:51:43 +01:00
|
|
|
%% ---------- PUSH ----------
|
|
|
|
%%
|
2016-08-16 12:45:48 +01:00
|
|
|
%% The Penciller must support the PUSH of a dump of keys from the Bookie. The
|
2016-08-02 17:51:43 +01:00
|
|
|
%% call to PUSH should be immediately acknowledged, and then work should be
|
2016-11-07 11:17:13 +00:00
|
|
|
%% completed to merge the tree into the L0 tree.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% The Penciller MUST NOT accept a new PUSH if the Clerk has commenced the
|
2016-11-07 11:17:13 +00:00
|
|
|
%% conversion of the current L0 tree into a SFT file, but not completed this
|
2016-10-27 09:45:05 +01:00
|
|
|
%% change. The Penciller in this case returns the push, and the Bookie should
|
2016-11-07 11:17:13 +00:00
|
|
|
%% continue to grow the cache before trying again.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% ---------- FETCH ----------
|
|
|
|
%%
|
2016-11-07 11:17:13 +00:00
|
|
|
%% On request to fetch a key the Penciller should look first in the in-memory
|
|
|
|
%% L0 tree, then look in the SFT files Level by Level (including level 0),
|
|
|
|
%% consulting the Manifest to determine which file should be checked at each
|
|
|
|
%% level.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% ---------- SNAPSHOT ----------
|
|
|
|
%%
|
2016-10-03 23:34:28 +01:00
|
|
|
%% Iterators may request a snapshot of the database. A snapshot is a cloned
|
2016-10-27 20:56:18 +01:00
|
|
|
%% Penciller seeded not from disk, but by the in-memory L0 gb_tree and the
|
|
|
|
%% in-memory manifest, allowing for direct reference for the SFT file processes.
|
2016-10-03 23:34:28 +01:00
|
|
|
%%
|
|
|
|
%% Clones formed to support snapshots are registered by the Penciller, so that
|
|
|
|
%% SFT files valid at the point of the snapshot until either the iterator is
|
2016-08-02 17:51:43 +01:00
|
|
|
%% completed or has timed out.
|
|
|
|
%%
|
|
|
|
%% ---------- ON STARTUP ----------
|
|
|
|
%%
|
|
|
|
%% On Startup the Bookie with ask the Penciller to initiate the Ledger first.
|
|
|
|
%% To initiate the Ledger the must consult the manifest, and then start a SFT
|
|
|
|
%% management process for each file in the manifest.
|
|
|
|
%%
|
2016-08-15 16:43:39 +01:00
|
|
|
%% The penciller should then try and read any Level 0 file which has the
|
|
|
|
%% manifest sequence number one higher than the last store in the manifest.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% The Bookie will ask the Inker for any Keys seen beyond that sequence number
|
|
|
|
%% before the startup of the overall store can be completed.
|
|
|
|
%%
|
|
|
|
%% ---------- ON SHUTDOWN ----------
|
|
|
|
%%
|
|
|
|
%% On a controlled shutdown the Penciller should attempt to write any in-memory
|
2016-10-03 23:34:28 +01:00
|
|
|
%% ETS table to a L0 SFT file, assuming one is nto already pending. If one is
|
|
|
|
%% already pending then the Penciller will not persist this part of the Ledger.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% ---------- FOLDER STRUCTURE ----------
|
|
|
|
%%
|
|
|
|
%% The following folders are used by the Penciller
|
2016-10-03 23:34:28 +01:00
|
|
|
%% $ROOT/ledger/ledger_manifest/ - used for keeping manifest files
|
|
|
|
%% $ROOT/ledger/ledger_files/ - containing individual SFT files
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% In larger stores there could be a large number of files in the ledger_file
|
|
|
|
%% folder - perhaps o(1000). It is assumed that modern file systems should
|
|
|
|
%% handle this efficiently.
|
|
|
|
%%
|
|
|
|
%% ---------- COMPACTION & MANIFEST UPDATES ----------
|
|
|
|
%%
|
|
|
|
%% The Penciller can have one and only one Clerk for performing compaction
|
|
|
|
%% work. When the Clerk has requested and taken work, it should perform the
|
|
|
|
%5 compaction work starting the new SFT process to manage the new Ledger state
|
|
|
|
%% and then write a new manifest file that represents that state with using
|
2016-08-09 16:09:29 +01:00
|
|
|
%% the next Manifest sequence number as the filename:
|
|
|
|
%% - nonzero_<ManifestSQN#>.pnd
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
2016-08-09 16:09:29 +01:00
|
|
|
%% The Penciller on accepting the change should rename the manifest file to -
|
|
|
|
%% - nonzero_<ManifestSQN#>.crr
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
2016-08-09 16:09:29 +01:00
|
|
|
%% On startup, the Penciller should look for the nonzero_*.crr file with the
|
2016-10-19 17:34:58 +01:00
|
|
|
%% highest such manifest sequence number. This will be started as the
|
|
|
|
%% manifest, together with any _0_0.sft file found at that Manifest SQN.
|
|
|
|
%% Level zero files are not kept in the persisted manifest, and adding a L0
|
|
|
|
%% file does not advanced the Manifest SQN.
|
2016-08-02 17:51:43 +01:00
|
|
|
%%
|
|
|
|
%% The pace at which the store can accept updates will be dependent on the
|
|
|
|
%% speed at which the Penciller's Clerk can merge files at lower levels plus
|
|
|
|
%% the time it takes to merge from Level 0. As if a clerk has commenced
|
|
|
|
%% compaction work at a lower level and then immediately a L0 SFT file is
|
|
|
|
%% written the Penciller will need to wait for this compaction work to
|
|
|
|
%% complete and the L0 file to be compacted before the ETS table can be
|
|
|
|
%% allowed to again reach capacity
|
2016-08-09 16:09:29 +01:00
|
|
|
%%
|
|
|
|
%% The writing of L0 files do not require the involvement of the clerk.
|
2016-10-27 20:56:18 +01:00
|
|
|
%% The L0 files are prompted directly by the penciller when the in-memory tree
|
2016-11-07 11:17:13 +00:00
|
|
|
%% has reached capacity. This places the penciller in a levelzero_pending
|
|
|
|
%% state, and in this state it must return new pushes. Once the SFT file has
|
|
|
|
%% been completed it will confirm completion to the penciller which can then
|
|
|
|
%% revert the levelzero_pending state, add the file to the manifest and clear
|
|
|
|
%% the current level zero in-memory view.
|
|
|
|
%%
|
|
|
|
|
2016-07-28 17:22:50 +01:00
|
|
|
|
2016-08-02 13:44:48 +01:00
|
|
|
-module(leveled_penciller).
|
2016-07-27 18:03:44 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
-behaviour(gen_server).
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
-include("include/leveled.hrl").
|
2016-07-27 18:03:44 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
-export([init/1,
|
|
|
|
handle_call/3,
|
|
|
|
handle_cast/2,
|
|
|
|
handle_info/2,
|
|
|
|
terminate/2,
|
|
|
|
code_change/3,
|
|
|
|
pcl_start/1,
|
|
|
|
pcl_pushmem/2,
|
2016-10-31 01:33:33 +00:00
|
|
|
pcl_fetchlevelzero/2,
|
2016-08-09 16:09:29 +01:00
|
|
|
pcl_fetch/2,
|
2016-10-12 17:12:49 +01:00
|
|
|
pcl_fetchkeys/5,
|
2016-11-20 21:21:31 +00:00
|
|
|
pcl_fetchnextkey/5,
|
2016-09-26 10:55:08 +01:00
|
|
|
pcl_checksequencenumber/3,
|
2016-08-09 16:09:29 +01:00
|
|
|
pcl_workforclerk/1,
|
2016-10-09 22:33:45 +01:00
|
|
|
pcl_promptmanifestchange/2,
|
2016-11-05 11:22:27 +00:00
|
|
|
pcl_confirml0complete/4,
|
2016-08-12 01:05:59 +01:00
|
|
|
pcl_confirmdelete/2,
|
2016-08-15 16:43:39 +01:00
|
|
|
pcl_close/1,
|
2016-11-21 12:34:40 +00:00
|
|
|
pcl_doom/1,
|
2016-09-21 18:31:42 +01:00
|
|
|
pcl_registersnapshot/2,
|
2016-10-12 17:12:49 +01:00
|
|
|
pcl_releasesnapshot/2,
|
2016-09-23 18:50:29 +01:00
|
|
|
pcl_loadsnapshot/2,
|
2016-09-15 10:53:24 +01:00
|
|
|
pcl_getstartupsequencenumber/1,
|
|
|
|
clean_testdir/1]).
|
2016-07-27 18:03:44 +01:00
|
|
|
|
|
|
|
-include_lib("eunit/include/eunit.hrl").
|
|
|
|
|
2016-07-28 17:22:50 +01:00
|
|
|
-define(LEVEL_SCALEFACTOR, [{0, 0}, {1, 8}, {2, 64}, {3, 512},
|
2016-10-19 17:34:58 +01:00
|
|
|
{4, 4096}, {5, 32768}, {6, 262144},
|
|
|
|
{7, infinity}]).
|
2016-07-27 18:03:44 +01:00
|
|
|
-define(MAX_LEVELS, 8).
|
|
|
|
-define(MAX_WORK_WAIT, 300).
|
2016-08-02 17:51:43 +01:00
|
|
|
-define(MANIFEST_FP, "ledger_manifest").
|
|
|
|
-define(FILES_FP, "ledger_files").
|
|
|
|
-define(CURRENT_FILEX, "crr").
|
|
|
|
-define(PENDING_FILEX, "pnd").
|
2016-08-09 16:09:29 +01:00
|
|
|
-define(MEMTABLE, mem).
|
|
|
|
-define(MAX_TABLESIZE, 32000).
|
2016-08-16 12:45:48 +01:00
|
|
|
-define(PROMPT_WAIT_ONL0, 5).
|
2016-11-05 14:04:45 +00:00
|
|
|
-define(WORKQUEUE_BACKLOG_TOLERANCE, 4).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-09-21 18:31:42 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
-record(state, {manifest = [] :: list(),
|
|
|
|
manifest_sqn = 0 :: integer(),
|
2016-10-30 18:25:30 +00:00
|
|
|
ledger_sqn = 0 :: integer(), % The highest SQN added to L0
|
|
|
|
persisted_sqn = 0 :: integer(), % The highest SQN persisted
|
2016-09-21 18:31:42 +01:00
|
|
|
registered_snapshots = [] :: list(),
|
2016-08-09 16:09:29 +01:00
|
|
|
unreferenced_files = [] :: list(),
|
2016-08-12 01:05:59 +01:00
|
|
|
root_path = "../test" :: string(),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-10-30 18:25:30 +00:00
|
|
|
clerk :: pid(),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
|
|
|
levelzero_pending = false :: boolean(),
|
|
|
|
levelzero_constructor :: pid(),
|
2016-11-25 14:50:13 +00:00
|
|
|
levelzero_cache = [] :: list(), % a list of skiplists
|
2016-11-25 18:20:17 +00:00
|
|
|
levelzero_index :: array(),
|
2016-10-30 18:25:30 +00:00
|
|
|
levelzero_size = 0 :: integer(),
|
2016-10-27 20:56:18 +01:00
|
|
|
levelzero_maxcachesize :: integer(),
|
|
|
|
|
2016-09-23 18:50:29 +01:00
|
|
|
is_snapshot = false :: boolean(),
|
|
|
|
snapshot_fully_loaded = false :: boolean(),
|
2016-10-27 20:56:18 +01:00
|
|
|
source_penciller :: pid(),
|
2016-11-25 14:50:13 +00:00
|
|
|
levelzero_astree :: list(), % skiplist
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-11-05 14:04:45 +00:00
|
|
|
ongoing_work = [] :: list(),
|
|
|
|
work_backlog = false :: boolean()}).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% API
|
|
|
|
%%%============================================================================
|
2016-09-15 18:38:23 +01:00
|
|
|
|
2016-08-12 01:05:59 +01:00
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
pcl_start(PCLopts) ->
|
|
|
|
gen_server:start(?MODULE, [PCLopts], []).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
|
|
|
pcl_pushmem(Pid, DumpList) ->
|
|
|
|
%% Bookie to dump memory onto penciller
|
|
|
|
gen_server:call(Pid, {push_mem, DumpList}, infinity).
|
2016-10-31 01:33:33 +00:00
|
|
|
|
|
|
|
pcl_fetchlevelzero(Pid, Slot) ->
|
2016-11-03 19:02:50 +00:00
|
|
|
%% Timeout to cause crash of L0 file when it can't get the close signal
|
|
|
|
%% as it is deadlocked making this call.
|
|
|
|
%%
|
|
|
|
%% If the timeout gets hit outside of close scenario the Penciller will
|
|
|
|
%% be stuck in L0 pending
|
2016-11-14 17:18:28 +00:00
|
|
|
gen_server:call(Pid, {fetch_levelzero, Slot}, 60000).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
|
|
|
pcl_fetch(Pid, Key) ->
|
|
|
|
gen_server:call(Pid, {fetch, Key}, infinity).
|
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
pcl_fetchkeys(Pid, StartKey, EndKey, AccFun, InitAcc) ->
|
|
|
|
gen_server:call(Pid,
|
2016-11-20 21:21:31 +00:00
|
|
|
{fetch_keys, StartKey, EndKey, AccFun, InitAcc, -1},
|
|
|
|
infinity).
|
|
|
|
|
|
|
|
pcl_fetchnextkey(Pid, StartKey, EndKey, AccFun, InitAcc) ->
|
|
|
|
gen_server:call(Pid,
|
|
|
|
{fetch_keys, StartKey, EndKey, AccFun, InitAcc, 1},
|
2016-10-12 17:12:49 +01:00
|
|
|
infinity).
|
|
|
|
|
2016-09-26 10:55:08 +01:00
|
|
|
pcl_checksequencenumber(Pid, Key, SQN) ->
|
|
|
|
gen_server:call(Pid, {check_sqn, Key, SQN}, infinity).
|
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
pcl_workforclerk(Pid) ->
|
|
|
|
gen_server:call(Pid, work_for_clerk, infinity).
|
|
|
|
|
2016-10-09 22:33:45 +01:00
|
|
|
pcl_promptmanifestchange(Pid, WI) ->
|
|
|
|
gen_server:cast(Pid, {manifest_change, WI}).
|
2016-08-10 13:02:08 +01:00
|
|
|
|
2016-11-05 11:22:27 +00:00
|
|
|
pcl_confirml0complete(Pid, FN, StartKey, EndKey) ->
|
|
|
|
gen_server:cast(Pid, {levelzero_complete, FN, StartKey, EndKey}).
|
|
|
|
|
2016-08-10 13:02:08 +01:00
|
|
|
pcl_confirmdelete(Pid, FileName) ->
|
2016-10-21 12:18:06 +01:00
|
|
|
gen_server:cast(Pid, {confirm_delete, FileName}).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-08-15 16:43:39 +01:00
|
|
|
pcl_getstartupsequencenumber(Pid) ->
|
2016-09-21 18:31:42 +01:00
|
|
|
gen_server:call(Pid, get_startup_sqn, infinity).
|
|
|
|
|
|
|
|
pcl_registersnapshot(Pid, Snapshot) ->
|
|
|
|
gen_server:call(Pid, {register_snapshot, Snapshot}, infinity).
|
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
pcl_releasesnapshot(Pid, Snapshot) ->
|
|
|
|
gen_server:cast(Pid, {release_snapshot, Snapshot}).
|
|
|
|
|
2016-09-23 18:50:29 +01:00
|
|
|
pcl_loadsnapshot(Pid, Increment) ->
|
|
|
|
gen_server:call(Pid, {load_snapshot, Increment}, infinity).
|
|
|
|
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-08-15 16:43:39 +01:00
|
|
|
pcl_close(Pid) ->
|
2016-10-13 17:51:47 +01:00
|
|
|
gen_server:call(Pid, close, 60000).
|
2016-08-15 16:43:39 +01:00
|
|
|
|
2016-11-21 12:34:40 +00:00
|
|
|
pcl_doom(Pid) ->
|
|
|
|
gen_server:call(Pid, doom, 60000).
|
2016-09-21 18:31:42 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% gen_server callbacks
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
init([PCLopts]) ->
|
2016-09-23 18:50:29 +01:00
|
|
|
case {PCLopts#penciller_options.root_path,
|
|
|
|
PCLopts#penciller_options.start_snapshot} of
|
|
|
|
{undefined, true} ->
|
|
|
|
SrcPenciller = PCLopts#penciller_options.source_penciller,
|
2016-10-27 20:56:18 +01:00
|
|
|
{ok, State} = pcl_registersnapshot(SrcPenciller, self()),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0001", [self()]),
|
2016-10-27 20:56:18 +01:00
|
|
|
{ok, State#state{is_snapshot=true, source_penciller=SrcPenciller}};
|
2016-09-23 18:50:29 +01:00
|
|
|
%% Need to do something about timeout
|
|
|
|
{_RootPath, false} ->
|
2016-09-21 18:31:42 +01:00
|
|
|
start_from_file(PCLopts)
|
|
|
|
end.
|
2016-08-15 16:43:39 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-10-27 20:56:18 +01:00
|
|
|
handle_call({push_mem, PushedTree}, From, State=#state{is_snapshot=Snap})
|
2016-10-07 10:04:48 +01:00
|
|
|
when Snap == false ->
|
2016-10-27 20:56:18 +01:00
|
|
|
% The push_mem process is as follows:
|
|
|
|
%
|
|
|
|
% 1 - Receive a gb_tree containing the latest Key/Value pairs (note that
|
|
|
|
% we mean value from the perspective of the Ledger, not the full value
|
|
|
|
% stored in the Inker)
|
|
|
|
%
|
2016-11-05 11:22:27 +00:00
|
|
|
% 2 - Check to see if there is a levelzero file pending. If so, the
|
|
|
|
% update must be returned. If not the update can be accepted
|
2016-10-27 20:56:18 +01:00
|
|
|
%
|
2016-11-05 11:22:27 +00:00
|
|
|
% 3 - The Penciller can now reply to the Bookie to show if the push has
|
2016-10-27 20:56:18 +01:00
|
|
|
% been accepted
|
|
|
|
%
|
2016-10-30 18:25:30 +00:00
|
|
|
% 4 - Update the cache:
|
|
|
|
% a) Append the cache to the list
|
|
|
|
% b) Add hashes for all the elements to the index
|
|
|
|
%
|
|
|
|
% Check the approximate size of the cache. If it is over the maximum size,
|
|
|
|
% trigger a backgroun L0 file write and update state of levelzero_pending.
|
2016-11-18 21:35:45 +00:00
|
|
|
case State#state.levelzero_pending or State#state.work_backlog of
|
|
|
|
true ->
|
|
|
|
leveled_log:log("P0018", [returned,
|
|
|
|
State#state.levelzero_pending,
|
|
|
|
State#state.work_backlog]),
|
2016-11-05 13:42:44 +00:00
|
|
|
{reply, returned, State};
|
2016-11-18 21:35:45 +00:00
|
|
|
false ->
|
|
|
|
leveled_log:log("P0018", [ok, false, false]),
|
2016-11-05 13:42:44 +00:00
|
|
|
gen_server:reply(From, ok),
|
|
|
|
{noreply, update_levelzero(State#state.levelzero_index,
|
|
|
|
State#state.levelzero_size,
|
|
|
|
PushedTree,
|
|
|
|
State#state.ledger_sqn,
|
|
|
|
State#state.levelzero_cache,
|
|
|
|
State)}
|
|
|
|
end;
|
2016-10-27 20:56:18 +01:00
|
|
|
handle_call({fetch, Key}, _From, State) ->
|
2016-10-07 10:04:48 +01:00
|
|
|
{reply,
|
2016-10-30 18:25:30 +00:00
|
|
|
fetch_mem(Key,
|
|
|
|
State#state.manifest,
|
|
|
|
State#state.levelzero_index,
|
|
|
|
State#state.levelzero_cache),
|
2016-10-07 10:04:48 +01:00
|
|
|
State};
|
2016-10-27 20:56:18 +01:00
|
|
|
handle_call({check_sqn, Key, SQN}, _From, State) ->
|
2016-10-07 10:04:48 +01:00
|
|
|
{reply,
|
2016-10-30 18:25:30 +00:00
|
|
|
compare_to_sqn(fetch_mem(Key,
|
|
|
|
State#state.manifest,
|
|
|
|
State#state.levelzero_index,
|
|
|
|
State#state.levelzero_cache),
|
2016-10-07 10:04:48 +01:00
|
|
|
SQN),
|
|
|
|
State};
|
2016-11-20 21:21:31 +00:00
|
|
|
handle_call({fetch_keys, StartKey, EndKey, AccFun, InitAcc, MaxKeys},
|
2016-10-12 17:12:49 +01:00
|
|
|
_From,
|
|
|
|
State=#state{snapshot_fully_loaded=Ready})
|
|
|
|
when Ready == true ->
|
2016-11-25 14:50:13 +00:00
|
|
|
L0AsList =
|
2016-11-20 21:21:31 +00:00
|
|
|
case State#state.levelzero_astree of
|
|
|
|
undefined ->
|
|
|
|
leveled_pmem:merge_trees(StartKey,
|
|
|
|
EndKey,
|
|
|
|
State#state.levelzero_cache,
|
2016-11-25 14:50:13 +00:00
|
|
|
leveled_skiplist:empty());
|
|
|
|
List ->
|
|
|
|
List
|
2016-11-20 21:21:31 +00:00
|
|
|
end,
|
2016-10-12 17:12:49 +01:00
|
|
|
SFTiter = initiate_rangequery_frommanifest(StartKey,
|
|
|
|
EndKey,
|
|
|
|
State#state.manifest),
|
2016-11-25 14:50:13 +00:00
|
|
|
Acc = keyfolder({L0AsList, SFTiter},
|
2016-11-20 21:21:31 +00:00
|
|
|
{StartKey, EndKey},
|
|
|
|
{AccFun, InitAcc},
|
|
|
|
MaxKeys),
|
2016-11-25 14:50:13 +00:00
|
|
|
{reply, Acc, State#state{levelzero_astree = L0AsList}};
|
2016-09-26 10:55:08 +01:00
|
|
|
handle_call(work_for_clerk, From, State) ->
|
|
|
|
{UpdState, Work} = return_work(State, From),
|
2016-10-20 02:23:45 +01:00
|
|
|
{reply, Work, UpdState};
|
2016-09-26 10:55:08 +01:00
|
|
|
handle_call(get_startup_sqn, _From, State) ->
|
2016-10-30 18:25:30 +00:00
|
|
|
{reply, State#state.persisted_sqn, State};
|
2016-09-26 10:55:08 +01:00
|
|
|
handle_call({register_snapshot, Snapshot}, _From, State) ->
|
2016-10-21 11:38:30 +01:00
|
|
|
Rs = [{Snapshot, State#state.manifest_sqn}|State#state.registered_snapshots],
|
2016-10-27 20:56:18 +01:00
|
|
|
{reply, {ok, State}, State#state{registered_snapshots = Rs}};
|
|
|
|
handle_call({load_snapshot, BookieIncrTree}, _From, State) ->
|
2016-10-30 18:25:30 +00:00
|
|
|
L0D = leveled_pmem:add_to_index(State#state.levelzero_index,
|
|
|
|
State#state.levelzero_size,
|
2016-10-27 20:56:18 +01:00
|
|
|
BookieIncrTree,
|
2016-10-30 18:25:30 +00:00
|
|
|
State#state.ledger_sqn,
|
|
|
|
State#state.levelzero_cache),
|
|
|
|
{LedgerSQN, L0Size, L0Index, L0Cache} = L0D,
|
|
|
|
{reply, ok, State#state{levelzero_cache=L0Cache,
|
|
|
|
levelzero_index=L0Index,
|
|
|
|
levelzero_size=L0Size,
|
|
|
|
ledger_sqn=LedgerSQN,
|
|
|
|
snapshot_fully_loaded=true}};
|
2016-10-31 01:33:33 +00:00
|
|
|
handle_call({fetch_levelzero, Slot}, _From, State) ->
|
|
|
|
{reply, lists:nth(Slot, State#state.levelzero_cache), State};
|
2016-09-26 10:55:08 +01:00
|
|
|
handle_call(close, _From, State) ->
|
2016-11-21 12:34:40 +00:00
|
|
|
{stop, normal, ok, State};
|
|
|
|
handle_call(doom, _From, State) ->
|
|
|
|
leveled_log:log("P0030", []),
|
|
|
|
ManifestFP = State#state.root_path ++ "/" ++ ?MANIFEST_FP ++ "/",
|
|
|
|
FilesFP = State#state.root_path ++ "/" ++ ?FILES_FP ++ "/",
|
|
|
|
{stop, normal, {ok, [ManifestFP, FilesFP]}, State}.
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-10-09 22:33:45 +01:00
|
|
|
handle_cast({manifest_change, WI}, State) ->
|
2016-10-08 22:15:48 +01:00
|
|
|
{ok, UpdState} = commit_manifest_change(WI, State),
|
2016-10-30 18:25:30 +00:00
|
|
|
ok = leveled_pclerk:clerk_manifestchange(State#state.clerk,
|
|
|
|
confirm,
|
|
|
|
false),
|
2016-10-08 22:15:48 +01:00
|
|
|
{noreply, UpdState};
|
2016-10-12 17:12:49 +01:00
|
|
|
handle_cast({release_snapshot, Snapshot}, State) ->
|
|
|
|
Rs = lists:keydelete(Snapshot, 1, State#state.registered_snapshots),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0003", [Snapshot]),
|
|
|
|
leveled_log:log("P0004", [Rs]),
|
2016-10-21 12:18:06 +01:00
|
|
|
{noreply, State#state{registered_snapshots=Rs}};
|
|
|
|
handle_cast({confirm_delete, FileName}, State=#state{is_snapshot=Snap})
|
|
|
|
when Snap == false ->
|
|
|
|
Reply = confirm_delete(FileName,
|
|
|
|
State#state.unreferenced_files,
|
|
|
|
State#state.registered_snapshots),
|
|
|
|
case Reply of
|
|
|
|
{true, Pid} ->
|
|
|
|
UF1 = lists:keydelete(FileName, 1, State#state.unreferenced_files),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0005", [FileName]),
|
2016-10-21 12:18:06 +01:00
|
|
|
ok = leveled_sft:sft_deleteconfirmed(Pid),
|
|
|
|
{noreply, State#state{unreferenced_files=UF1}};
|
|
|
|
_ ->
|
|
|
|
{noreply, State}
|
2016-11-05 11:22:27 +00:00
|
|
|
end;
|
|
|
|
handle_cast({levelzero_complete, FN, StartKey, EndKey}, State) ->
|
|
|
|
leveled_log:log("P0029", []),
|
|
|
|
ManEntry = #manifest_entry{start_key=StartKey,
|
|
|
|
end_key=EndKey,
|
|
|
|
owner=State#state.levelzero_constructor,
|
|
|
|
filename=FN},
|
|
|
|
UpdMan = lists:keystore(0, 1, State#state.manifest, {0, [ManEntry]}),
|
|
|
|
% Prompt clerk to ask about work - do this for every L0 roll
|
|
|
|
ok = leveled_pclerk:clerk_prompt(State#state.clerk),
|
|
|
|
{noreply, State#state{levelzero_cache=[],
|
|
|
|
levelzero_pending=false,
|
|
|
|
levelzero_constructor=undefined,
|
|
|
|
levelzero_index=leveled_pmem:new_index(),
|
|
|
|
levelzero_size=0,
|
2016-11-05 12:03:21 +00:00
|
|
|
manifest=UpdMan,
|
|
|
|
persisted_sqn=State#state.ledger_sqn}}.
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-10-30 18:25:30 +00:00
|
|
|
|
2016-11-05 15:59:31 +00:00
|
|
|
handle_info(_Info, State) ->
|
2016-09-26 10:55:08 +01:00
|
|
|
{noreply, State}.
|
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
terminate(Reason, State=#state{is_snapshot=Snap}) when Snap == true ->
|
|
|
|
ok = pcl_releasesnapshot(State#state.source_penciller, self()),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0007", [Reason]),
|
2016-10-07 10:04:48 +01:00
|
|
|
ok;
|
2016-10-12 17:12:49 +01:00
|
|
|
terminate(Reason, State) ->
|
2016-09-26 10:55:08 +01:00
|
|
|
%% When a Penciller shuts down it isn't safe to try an manage the safe
|
|
|
|
%% finishing of any outstanding work. The last commmitted manifest will
|
|
|
|
%% be used.
|
|
|
|
%%
|
|
|
|
%% Level 0 files lie outside of the manifest, and so if there is no L0
|
|
|
|
%% file present it is safe to write the current contents of memory. If
|
|
|
|
%% there is a L0 file present - then the memory can be dropped (it is
|
|
|
|
%% recoverable from the ledger, and there should not be a lot to recover
|
|
|
|
%% as presumably the ETS file has been recently flushed, hence the presence
|
|
|
|
%% of a L0 file).
|
|
|
|
%%
|
|
|
|
%% The penciller should close each file in the unreferenced files, and
|
|
|
|
%% then each file in the manifest, and cast a close on the clerk.
|
|
|
|
%% The cast may not succeed as the clerk could be synchronously calling
|
|
|
|
%% the penciller looking for a manifest commit
|
|
|
|
%%
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0008", [Reason]),
|
2016-10-30 18:25:30 +00:00
|
|
|
MC = leveled_pclerk:clerk_manifestchange(State#state.clerk,
|
|
|
|
return,
|
|
|
|
true),
|
2016-10-08 22:15:48 +01:00
|
|
|
UpdState = case MC of
|
|
|
|
{ok, WI} ->
|
|
|
|
{ok, NewState} = commit_manifest_change(WI, State),
|
2016-10-30 18:25:30 +00:00
|
|
|
Clerk = State#state.clerk,
|
|
|
|
ok = leveled_pclerk:clerk_manifestchange(Clerk,
|
|
|
|
confirm,
|
|
|
|
true),
|
2016-10-08 22:15:48 +01:00
|
|
|
NewState;
|
2016-10-09 22:33:45 +01:00
|
|
|
no_change ->
|
2016-10-08 22:15:48 +01:00
|
|
|
State
|
|
|
|
end,
|
|
|
|
case {UpdState#state.levelzero_pending,
|
2016-11-05 12:03:21 +00:00
|
|
|
get_item(0, UpdState#state.manifest, []),
|
|
|
|
UpdState#state.levelzero_size} of
|
2016-10-27 21:40:43 +01:00
|
|
|
{false, [], 0} ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0009", []);
|
2016-10-27 21:40:43 +01:00
|
|
|
{false, [], _N} ->
|
2016-10-31 01:33:33 +00:00
|
|
|
L0Pid = roll_memory(UpdState, true),
|
2016-10-27 20:56:18 +01:00
|
|
|
ok = leveled_sft:sft_close(L0Pid);
|
2016-11-14 17:18:28 +00:00
|
|
|
StatusTuple ->
|
|
|
|
leveled_log:log("P0010", [StatusTuple])
|
2016-10-07 10:04:48 +01:00
|
|
|
end,
|
2016-10-19 00:10:48 +01:00
|
|
|
|
|
|
|
% Tidy shutdown of individual files
|
2016-10-08 22:15:48 +01:00
|
|
|
ok = close_files(0, UpdState#state.manifest),
|
2016-10-07 10:04:48 +01:00
|
|
|
lists:foreach(fun({_FN, Pid, _SN}) ->
|
2016-10-21 21:26:28 +01:00
|
|
|
ok = leveled_sft:sft_close(Pid) end,
|
2016-10-08 22:15:48 +01:00
|
|
|
UpdState#state.unreferenced_files),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0011", []),
|
2016-10-07 10:04:48 +01:00
|
|
|
ok.
|
2016-09-26 10:55:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
code_change(_OldVsn, State, _Extra) ->
|
|
|
|
{ok, State}.
|
|
|
|
|
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% Internal functions
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-11-21 12:34:40 +00:00
|
|
|
|
2016-09-21 18:31:42 +01:00
|
|
|
start_from_file(PCLopts) ->
|
|
|
|
RootPath = PCLopts#penciller_options.root_path,
|
|
|
|
MaxTableSize = case PCLopts#penciller_options.max_inmemory_tablesize of
|
|
|
|
undefined ->
|
|
|
|
?MAX_TABLESIZE;
|
|
|
|
M ->
|
|
|
|
M
|
|
|
|
end,
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-10-30 18:25:30 +00:00
|
|
|
{ok, MergeClerk} = leveled_pclerk:clerk_new(self()),
|
|
|
|
InitState = #state{clerk=MergeClerk,
|
2016-09-21 18:31:42 +01:00
|
|
|
root_path=RootPath,
|
2016-10-30 18:25:30 +00:00
|
|
|
levelzero_index = leveled_pmem:new_index(),
|
2016-10-27 20:56:18 +01:00
|
|
|
levelzero_maxcachesize=MaxTableSize},
|
2016-09-21 18:31:42 +01:00
|
|
|
|
|
|
|
%% Open manifest
|
2016-11-03 20:48:23 +00:00
|
|
|
ManifestPath = InitState#state.root_path ++ "/" ++ ?MANIFEST_FP ++ "/",
|
2016-11-03 20:46:56 +00:00
|
|
|
ok = filelib:ensure_dir(ManifestPath),
|
2016-11-03 20:09:38 +00:00
|
|
|
{ok, Filenames} = file:list_dir(ManifestPath),
|
2016-09-21 18:31:42 +01:00
|
|
|
CurrRegex = "nonzero_(?<MSN>[0-9]+)\\." ++ ?CURRENT_FILEX,
|
|
|
|
ValidManSQNs = lists:foldl(fun(FN, Acc) ->
|
|
|
|
case re:run(FN,
|
|
|
|
CurrRegex,
|
|
|
|
[{capture, ['MSN'], list}]) of
|
|
|
|
nomatch ->
|
|
|
|
Acc;
|
|
|
|
{match, [Int]} when is_list(Int) ->
|
2016-11-03 19:34:54 +00:00
|
|
|
Acc ++ [list_to_integer(Int)]
|
|
|
|
end
|
|
|
|
end,
|
2016-09-21 18:31:42 +01:00
|
|
|
[],
|
|
|
|
Filenames),
|
|
|
|
TopManSQN = lists:foldl(fun(X, MaxSQN) -> max(X, MaxSQN) end,
|
|
|
|
0,
|
|
|
|
ValidManSQNs),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0012", [TopManSQN]),
|
2016-10-27 20:56:18 +01:00
|
|
|
ManUpdate = case TopManSQN of
|
|
|
|
0 ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0013", []),
|
2016-10-27 20:56:18 +01:00
|
|
|
{[], 0};
|
|
|
|
_ ->
|
|
|
|
CurrManFile = filepath(InitState#state.root_path,
|
|
|
|
TopManSQN,
|
|
|
|
current_manifest),
|
|
|
|
{ok, Bin} = file:read_file(CurrManFile),
|
|
|
|
Manifest = binary_to_term(Bin),
|
|
|
|
open_all_filesinmanifest(Manifest)
|
|
|
|
end,
|
|
|
|
|
|
|
|
{UpdManifest, MaxSQN} = ManUpdate,
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0014", [MaxSQN]),
|
2016-09-21 18:31:42 +01:00
|
|
|
|
2016-10-27 20:56:18 +01:00
|
|
|
%% Find any L0 files
|
|
|
|
L0FN = filepath(RootPath, TopManSQN, new_merge_files) ++ "_0_0.sft",
|
|
|
|
case filelib:is_file(L0FN) of
|
|
|
|
true ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0015", [L0FN]),
|
2016-10-27 20:56:18 +01:00
|
|
|
{ok,
|
|
|
|
L0Pid,
|
|
|
|
{L0StartKey, L0EndKey}} = leveled_sft:sft_open(L0FN),
|
|
|
|
L0SQN = leveled_sft:sft_getmaxsequencenumber(L0Pid),
|
|
|
|
ManifestEntry = #manifest_entry{start_key=L0StartKey,
|
|
|
|
end_key=L0EndKey,
|
|
|
|
owner=L0Pid,
|
|
|
|
filename=L0FN},
|
|
|
|
UpdManifest2 = lists:keystore(0,
|
|
|
|
1,
|
|
|
|
UpdManifest,
|
|
|
|
{0, [ManifestEntry]}),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0016", [L0SQN]),
|
2016-10-30 18:25:30 +00:00
|
|
|
LedgerSQN = max(MaxSQN, L0SQN),
|
2016-10-27 20:56:18 +01:00
|
|
|
{ok,
|
|
|
|
InitState#state{manifest=UpdManifest2,
|
|
|
|
manifest_sqn=TopManSQN,
|
2016-10-30 18:25:30 +00:00
|
|
|
ledger_sqn=LedgerSQN,
|
|
|
|
persisted_sqn=LedgerSQN}};
|
2016-10-27 20:56:18 +01:00
|
|
|
false ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0017", []),
|
2016-10-27 20:56:18 +01:00
|
|
|
{ok,
|
|
|
|
InitState#state{manifest=UpdManifest,
|
|
|
|
manifest_sqn=TopManSQN,
|
2016-10-30 18:25:30 +00:00
|
|
|
ledger_sqn=MaxSQN,
|
|
|
|
persisted_sqn=MaxSQN}}
|
2016-09-21 18:31:42 +01:00
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-10-30 18:25:30 +00:00
|
|
|
|
|
|
|
update_levelzero(L0Index, L0Size, PushedTree, LedgerSQN, L0Cache, State) ->
|
|
|
|
Update = leveled_pmem:add_to_index(L0Index,
|
|
|
|
L0Size,
|
|
|
|
PushedTree,
|
|
|
|
LedgerSQN,
|
|
|
|
L0Cache),
|
|
|
|
{MaxSQN, NewL0Size, UpdL0Index, UpdL0Cache} = Update,
|
|
|
|
if
|
|
|
|
MaxSQN >= LedgerSQN ->
|
|
|
|
UpdState = State#state{levelzero_cache=UpdL0Cache,
|
|
|
|
levelzero_index=UpdL0Index,
|
|
|
|
levelzero_size=NewL0Size,
|
|
|
|
ledger_sqn=MaxSQN},
|
|
|
|
CacheTooBig = NewL0Size > State#state.levelzero_maxcachesize,
|
|
|
|
Level0Free = length(get_item(0, State#state.manifest, [])) == 0,
|
|
|
|
case {CacheTooBig, Level0Free} of
|
|
|
|
{true, true} ->
|
2016-10-31 01:33:33 +00:00
|
|
|
L0Constructor = roll_memory(UpdState, false),
|
2016-10-30 18:25:30 +00:00
|
|
|
UpdState#state{levelzero_pending=true,
|
|
|
|
levelzero_constructor=L0Constructor};
|
|
|
|
_ ->
|
|
|
|
UpdState
|
|
|
|
end;
|
|
|
|
NewL0Size == L0Size ->
|
|
|
|
State#state{levelzero_cache=L0Cache,
|
|
|
|
levelzero_index=L0Index,
|
|
|
|
levelzero_size=L0Size,
|
|
|
|
ledger_sqn=LedgerSQN}
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-10-31 01:33:33 +00:00
|
|
|
%% Casting a large object (the levelzero cache) to the gen_server did not lead
|
|
|
|
%% to an immediate return as expected. With 32K keys in the TreeList it could
|
|
|
|
%% take around 35-40ms.
|
|
|
|
%%
|
2016-11-03 16:05:43 +00:00
|
|
|
%% To avoid blocking this gen_server, the SFT file can request each item of the
|
2016-10-31 01:33:33 +00:00
|
|
|
%% cache one at a time.
|
|
|
|
%%
|
|
|
|
%% The Wait is set to false to use a cast when calling this in normal operation
|
|
|
|
%% where as the Wait of true is used at shutdown
|
|
|
|
|
|
|
|
roll_memory(State, false) ->
|
|
|
|
FileName = levelzero_filename(State),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0019", [FileName]),
|
2016-11-05 11:22:27 +00:00
|
|
|
Opts = #sft_options{wait=false, penciller=self()},
|
2016-10-31 01:33:33 +00:00
|
|
|
PCL = self(),
|
|
|
|
FetchFun = fun(Slot) -> pcl_fetchlevelzero(PCL, Slot) end,
|
|
|
|
% FetchFun = fun(Slot) -> lists:nth(Slot, State#state.levelzero_cache) end,
|
|
|
|
R = leveled_sft:sft_newfroml0cache(FileName,
|
|
|
|
length(State#state.levelzero_cache),
|
|
|
|
FetchFun,
|
|
|
|
Opts),
|
|
|
|
{ok, Constructor, _} = R,
|
|
|
|
Constructor;
|
|
|
|
roll_memory(State, true) ->
|
|
|
|
FileName = levelzero_filename(State),
|
|
|
|
Opts = #sft_options{wait=true},
|
|
|
|
FetchFun = fun(Slot) -> lists:nth(Slot, State#state.levelzero_cache) end,
|
|
|
|
R = leveled_sft:sft_newfroml0cache(FileName,
|
|
|
|
length(State#state.levelzero_cache),
|
|
|
|
FetchFun,
|
|
|
|
Opts),
|
|
|
|
{ok, Constructor, _} = R,
|
|
|
|
Constructor.
|
2016-10-20 02:23:45 +01:00
|
|
|
|
2016-10-31 01:33:33 +00:00
|
|
|
levelzero_filename(State) ->
|
2016-10-27 20:56:18 +01:00
|
|
|
MSN = State#state.manifest_sqn,
|
|
|
|
FileName = State#state.root_path
|
|
|
|
++ "/" ++ ?FILES_FP ++ "/"
|
|
|
|
++ integer_to_list(MSN) ++ "_0_0",
|
2016-10-31 01:33:33 +00:00
|
|
|
FileName.
|
2016-09-21 18:31:42 +01:00
|
|
|
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-10-30 18:25:30 +00:00
|
|
|
fetch_mem(Key, Manifest, L0Index, L0Cache) ->
|
|
|
|
L0Check = leveled_pmem:check_levelzero(Key, L0Index, L0Cache),
|
|
|
|
case L0Check of
|
|
|
|
{false, not_found} ->
|
|
|
|
fetch(Key, Manifest, 0, fun leveled_sft:sft_get/2);
|
|
|
|
{true, KV} ->
|
|
|
|
KV
|
2016-08-09 16:09:29 +01:00
|
|
|
end.
|
|
|
|
|
|
|
|
fetch(_Key, _Manifest, ?MAX_LEVELS + 1, _FetchFun) ->
|
|
|
|
not_present;
|
|
|
|
fetch(Key, Manifest, Level, FetchFun) ->
|
|
|
|
LevelManifest = get_item(Level, Manifest, []),
|
|
|
|
case lists:foldl(fun(File, Acc) ->
|
|
|
|
case Acc of
|
|
|
|
not_present when
|
|
|
|
Key >= File#manifest_entry.start_key,
|
|
|
|
File#manifest_entry.end_key >= Key ->
|
|
|
|
File#manifest_entry.owner;
|
|
|
|
PidFound ->
|
|
|
|
PidFound
|
|
|
|
end end,
|
|
|
|
not_present,
|
|
|
|
LevelManifest) of
|
|
|
|
not_present ->
|
|
|
|
fetch(Key, Manifest, Level + 1, FetchFun);
|
|
|
|
FileToCheck ->
|
|
|
|
case FetchFun(FileToCheck, Key) of
|
|
|
|
not_present ->
|
|
|
|
fetch(Key, Manifest, Level + 1, FetchFun);
|
|
|
|
ObjectFound ->
|
|
|
|
ObjectFound
|
|
|
|
end
|
|
|
|
end.
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
compare_to_sqn(Obj, SQN) ->
|
|
|
|
case Obj of
|
|
|
|
not_present ->
|
|
|
|
false;
|
|
|
|
Obj ->
|
2016-10-13 21:02:15 +01:00
|
|
|
SQNToCompare = leveled_codec:strip_to_seqonly(Obj),
|
2016-10-07 10:04:48 +01:00
|
|
|
if
|
|
|
|
SQNToCompare > SQN ->
|
|
|
|
false;
|
|
|
|
true ->
|
|
|
|
true
|
|
|
|
end
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-07-27 18:03:44 +01:00
|
|
|
%% Work out what the current work queue should be
|
|
|
|
%%
|
|
|
|
%% The work queue should have a lower level work at the front, and no work
|
|
|
|
%% should be added to the queue if a compaction worker has already been asked
|
|
|
|
%% to look at work at that level
|
2016-08-16 12:45:48 +01:00
|
|
|
%%
|
|
|
|
%% The full queue is calculated for logging purposes only
|
2016-07-27 18:03:44 +01:00
|
|
|
|
|
|
|
return_work(State, From) ->
|
2016-10-20 16:00:08 +01:00
|
|
|
{WorkQ, BasementL} = assess_workqueue([], 0, State#state.manifest, 0),
|
|
|
|
case length(WorkQ) of
|
2016-08-09 16:09:29 +01:00
|
|
|
L when L > 0 ->
|
2016-11-05 15:10:21 +00:00
|
|
|
Excess = lists:foldl(fun({_, _, OH}, Acc) -> Acc+OH end, 0, WorkQ),
|
|
|
|
[{SrcLevel, Manifest, _Overhead}|_OtherWork] = WorkQ,
|
|
|
|
leveled_log:log("P0020", [SrcLevel, From, Excess]),
|
2016-10-20 16:00:08 +01:00
|
|
|
IsBasement = if
|
|
|
|
SrcLevel + 1 == BasementL ->
|
|
|
|
true;
|
|
|
|
true ->
|
|
|
|
false
|
|
|
|
end,
|
2016-11-05 15:10:21 +00:00
|
|
|
Backlog = Excess >= ?WORKQUEUE_BACKLOG_TOLERANCE,
|
2016-10-27 20:56:18 +01:00
|
|
|
case State#state.levelzero_pending of
|
2016-10-19 20:51:30 +01:00
|
|
|
true ->
|
2016-10-19 17:34:58 +01:00
|
|
|
% Once the L0 file is completed there will be more work
|
|
|
|
% - so don't be busy doing other work now
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0021", []),
|
2016-11-05 14:04:45 +00:00
|
|
|
{State#state{work_backlog=Backlog}, none};
|
2016-10-27 20:56:18 +01:00
|
|
|
false ->
|
2016-08-09 16:09:29 +01:00
|
|
|
%% No work currently outstanding
|
|
|
|
%% Can allocate work
|
|
|
|
NextSQN = State#state.manifest_sqn + 1,
|
|
|
|
FP = filepath(State#state.root_path,
|
|
|
|
NextSQN,
|
|
|
|
new_merge_files),
|
|
|
|
ManFile = filepath(State#state.root_path,
|
|
|
|
NextSQN,
|
|
|
|
pending_manifest),
|
|
|
|
WI = #penciller_work{next_sqn=NextSQN,
|
|
|
|
clerk=From,
|
|
|
|
src_level=SrcLevel,
|
|
|
|
manifest=Manifest,
|
|
|
|
start_time = os:timestamp(),
|
|
|
|
ledger_filepath = FP,
|
2016-10-20 16:00:08 +01:00
|
|
|
manifest_file = ManFile,
|
|
|
|
target_is_basement = IsBasement},
|
2016-11-05 14:04:45 +00:00
|
|
|
{State#state{ongoing_work=[WI], work_backlog=Backlog}, WI}
|
2016-08-02 17:51:43 +01:00
|
|
|
end;
|
2016-08-09 16:09:29 +01:00
|
|
|
_ ->
|
2016-11-05 14:04:45 +00:00
|
|
|
{State#state{work_backlog=false}, none}
|
2016-07-27 18:03:44 +01:00
|
|
|
end.
|
|
|
|
|
2016-09-23 18:50:29 +01:00
|
|
|
|
2016-08-15 16:43:39 +01:00
|
|
|
close_files(?MAX_LEVELS - 1, _Manifest) ->
|
|
|
|
ok;
|
|
|
|
close_files(Level, Manifest) ->
|
|
|
|
LevelList = get_item(Level, Manifest, []),
|
2016-10-21 21:26:28 +01:00
|
|
|
lists:foreach(fun(F) ->
|
|
|
|
ok = leveled_sft:sft_close(F#manifest_entry.owner) end,
|
2016-08-15 16:43:39 +01:00
|
|
|
LevelList),
|
|
|
|
close_files(Level + 1, Manifest).
|
|
|
|
|
|
|
|
|
|
|
|
open_all_filesinmanifest(Manifest) ->
|
|
|
|
open_all_filesinmanifest({Manifest, 0}, 0).
|
|
|
|
|
|
|
|
open_all_filesinmanifest(Result, ?MAX_LEVELS - 1) ->
|
|
|
|
Result;
|
|
|
|
open_all_filesinmanifest({Manifest, TopSQN}, Level) ->
|
|
|
|
LevelList = get_item(Level, Manifest, []),
|
|
|
|
%% The Pids in the saved manifest related to now closed references
|
|
|
|
%% Need to roll over the manifest at this level starting new processes to
|
|
|
|
%5 replace them
|
|
|
|
LvlR = lists:foldl(fun(F, {FL, FL_SQN}) ->
|
|
|
|
FN = F#manifest_entry.filename,
|
|
|
|
{ok, P, _Keys} = leveled_sft:sft_open(FN),
|
|
|
|
F_SQN = leveled_sft:sft_getmaxsequencenumber(P),
|
|
|
|
{lists:append(FL,
|
|
|
|
[F#manifest_entry{owner = P}]),
|
|
|
|
max(FL_SQN, F_SQN)}
|
|
|
|
end,
|
|
|
|
{[], 0},
|
|
|
|
LevelList),
|
|
|
|
%% Result is tuple of revised file list for this level in manifest, and
|
|
|
|
%% the maximum sequence number seen at this level
|
|
|
|
{LvlFL, LvlSQN} = LvlR,
|
|
|
|
UpdManifest = lists:keystore(Level, 1, Manifest, {Level, LvlFL}),
|
|
|
|
open_all_filesinmanifest({UpdManifest, max(TopSQN, LvlSQN)}, Level + 1).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-10-05 09:54:53 +01:00
|
|
|
print_manifest(Manifest) ->
|
|
|
|
lists:foreach(fun(L) ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0022", [L]),
|
2016-10-05 09:54:53 +01:00
|
|
|
Level = get_item(L, Manifest, []),
|
2016-10-29 00:52:49 +01:00
|
|
|
lists:foreach(fun print_manifest_entry/1, Level)
|
2016-10-05 09:54:53 +01:00
|
|
|
end,
|
2016-10-14 22:58:01 +01:00
|
|
|
lists:seq(0, ?MAX_LEVELS - 1)),
|
|
|
|
ok.
|
2016-10-05 09:54:53 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
print_manifest_entry(Entry) ->
|
2016-11-02 18:14:46 +00:00
|
|
|
{S1, S2, S3} = leveled_codec:print_key(Entry#manifest_entry.start_key),
|
|
|
|
{E1, E2, E3} = leveled_codec:print_key(Entry#manifest_entry.end_key),
|
|
|
|
leveled_log:log("P0023",
|
|
|
|
[S1, S2, S3, E1, E2, E3, Entry#manifest_entry.filename]).
|
2016-10-12 17:12:49 +01:00
|
|
|
|
|
|
|
initiate_rangequery_frommanifest(StartKey, EndKey, Manifest) ->
|
|
|
|
CompareFun = fun(M) ->
|
2016-10-13 21:02:15 +01:00
|
|
|
C1 = StartKey > M#manifest_entry.end_key,
|
|
|
|
C2 = leveled_codec:endkey_passed(EndKey,
|
|
|
|
M#manifest_entry.start_key),
|
2016-10-12 17:12:49 +01:00
|
|
|
not (C1 or C2) end,
|
|
|
|
lists:foldl(fun(L, AccL) ->
|
|
|
|
Level = get_item(L, Manifest, []),
|
|
|
|
FL = lists:foldl(fun(M, Acc) ->
|
|
|
|
case CompareFun(M) of
|
|
|
|
true ->
|
|
|
|
Acc ++ [{next_file, M}];
|
|
|
|
false ->
|
|
|
|
Acc
|
|
|
|
end end,
|
|
|
|
[],
|
|
|
|
Level),
|
|
|
|
case FL of
|
|
|
|
[] -> AccL;
|
|
|
|
FL -> AccL ++ [{L, FL}]
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
[],
|
2016-10-13 17:51:47 +01:00
|
|
|
lists:seq(0, ?MAX_LEVELS - 1)).
|
2016-10-12 17:12:49 +01:00
|
|
|
|
|
|
|
%% Looks to find the best choice for the next key across the levels (other
|
|
|
|
%% than in-memory table)
|
|
|
|
%% In finding the best choice, the next key in a given level may be a next
|
|
|
|
%% block or next file pointer which will need to be expanded
|
|
|
|
|
|
|
|
find_nextkey(QueryArray, StartKey, EndKey) ->
|
|
|
|
find_nextkey(QueryArray,
|
2016-10-13 17:51:47 +01:00
|
|
|
0,
|
2016-10-12 17:12:49 +01:00
|
|
|
{null, null},
|
|
|
|
{fun leveled_sft:sft_getkvrange/4, StartKey, EndKey, 1}).
|
|
|
|
|
|
|
|
find_nextkey(_QueryArray, LCnt, {null, null}, _QueryFunT)
|
|
|
|
when LCnt > ?MAX_LEVELS ->
|
|
|
|
% The array has been scanned wihtout finding a best key - must be
|
|
|
|
% exhausted - respond to indicate no more keys to be found by the
|
|
|
|
% iterator
|
|
|
|
no_more_keys;
|
|
|
|
find_nextkey(QueryArray, LCnt, {BKL, BestKV}, _QueryFunT)
|
|
|
|
when LCnt > ?MAX_LEVELS ->
|
|
|
|
% All levels have been scanned, so need to remove the best result from
|
|
|
|
% the array, and return that array along with the best key/sqn/status
|
|
|
|
% combination
|
|
|
|
{BKL, [BestKV|Tail]} = lists:keyfind(BKL, 1, QueryArray),
|
|
|
|
{lists:keyreplace(BKL, 1, QueryArray, {BKL, Tail}), BestKV};
|
|
|
|
find_nextkey(QueryArray, LCnt, {BestKeyLevel, BestKV}, QueryFunT) ->
|
|
|
|
% Get the next key at this level
|
|
|
|
{NextKey, RestOfKeys} = case lists:keyfind(LCnt, 1, QueryArray) of
|
|
|
|
false ->
|
|
|
|
{null, null};
|
|
|
|
{LCnt, []} ->
|
|
|
|
{null, null};
|
|
|
|
{LCnt, [NK|ROfKs]} ->
|
|
|
|
{NK, ROfKs}
|
|
|
|
end,
|
|
|
|
% Compare the next key at this level with the best key
|
|
|
|
case {NextKey, BestKeyLevel, BestKV} of
|
|
|
|
{null, BKL, BKV} ->
|
|
|
|
% There is no key at this level - go to the next level
|
|
|
|
find_nextkey(QueryArray, LCnt + 1, {BKL, BKV}, QueryFunT);
|
|
|
|
{{next_file, ManifestEntry}, BKL, BKV} ->
|
|
|
|
% The first key at this level is pointer to a file - need to query
|
|
|
|
% the file to expand this level out before proceeding
|
|
|
|
Owner = ManifestEntry#manifest_entry.owner,
|
|
|
|
{QueryFun, StartKey, EndKey, ScanSize} = QueryFunT,
|
|
|
|
QueryResult = QueryFun(Owner, StartKey, EndKey, ScanSize),
|
|
|
|
NewEntry = {LCnt, QueryResult ++ RestOfKeys},
|
|
|
|
% Need to loop around at this level (LCnt) as we have not yet
|
|
|
|
% examined a real key at this level
|
|
|
|
find_nextkey(lists:keyreplace(LCnt, 1, QueryArray, NewEntry),
|
|
|
|
LCnt,
|
|
|
|
{BKL, BKV},
|
|
|
|
QueryFunT);
|
|
|
|
{{next, SFTpid, NewStartKey}, BKL, BKV} ->
|
|
|
|
% The first key at this level is pointer within a file - need to
|
|
|
|
% query the file to expand this level out before proceeding
|
|
|
|
{QueryFun, _StartKey, EndKey, ScanSize} = QueryFunT,
|
|
|
|
QueryResult = QueryFun(SFTpid, NewStartKey, EndKey, ScanSize),
|
|
|
|
NewEntry = {LCnt, QueryResult ++ RestOfKeys},
|
|
|
|
% Need to loop around at this level (LCnt) as we have not yet
|
|
|
|
% examined a real key at this level
|
|
|
|
find_nextkey(lists:keyreplace(LCnt, 1, QueryArray, NewEntry),
|
|
|
|
LCnt,
|
|
|
|
{BKL, BKV},
|
|
|
|
QueryFunT);
|
|
|
|
{{Key, Val}, null, null} ->
|
|
|
|
% No best key set - so can assume that this key is the best key,
|
2016-10-20 16:00:08 +01:00
|
|
|
% and check the lower levels
|
2016-10-12 17:12:49 +01:00
|
|
|
find_nextkey(QueryArray,
|
|
|
|
LCnt + 1,
|
|
|
|
{LCnt, {Key, Val}},
|
|
|
|
QueryFunT);
|
|
|
|
{{Key, Val}, _BKL, {BestKey, _BestVal}} when Key < BestKey ->
|
|
|
|
% There is a real key and a best key to compare, and the real key
|
|
|
|
% at this level is before the best key, and so is now the new best
|
|
|
|
% key
|
|
|
|
% The QueryArray is not modified until we have checked all levels
|
|
|
|
find_nextkey(QueryArray,
|
|
|
|
LCnt + 1,
|
|
|
|
{LCnt, {Key, Val}},
|
|
|
|
QueryFunT);
|
|
|
|
{{Key, Val}, BKL, {BestKey, BestVal}} when Key == BestKey ->
|
2016-10-13 21:02:15 +01:00
|
|
|
SQN = leveled_codec:strip_to_seqonly({Key, Val}),
|
|
|
|
BestSQN = leveled_codec:strip_to_seqonly({BestKey, BestVal}),
|
2016-10-12 17:12:49 +01:00
|
|
|
if
|
|
|
|
SQN =< BestSQN ->
|
|
|
|
% This is a dominated key, so we need to skip over it
|
|
|
|
NewEntry = {LCnt, RestOfKeys},
|
|
|
|
find_nextkey(lists:keyreplace(LCnt, 1, QueryArray, NewEntry),
|
|
|
|
LCnt + 1,
|
|
|
|
{BKL, {BestKey, BestVal}},
|
|
|
|
QueryFunT);
|
|
|
|
SQN > BestSQN ->
|
|
|
|
% There is a real key at the front of this level and it has
|
|
|
|
% a higher SQN than the best key, so we should use this as
|
|
|
|
% the best key
|
|
|
|
% But we also need to remove the dominated key from the
|
|
|
|
% lower level in the query array
|
|
|
|
OldBestEntry = lists:keyfind(BKL, 1, QueryArray),
|
|
|
|
{BKL, [{BestKey, BestVal}|BestTail]} = OldBestEntry,
|
|
|
|
find_nextkey(lists:keyreplace(BKL,
|
|
|
|
1,
|
|
|
|
QueryArray,
|
|
|
|
{BKL, BestTail}),
|
|
|
|
LCnt + 1,
|
|
|
|
{LCnt, {Key, Val}},
|
|
|
|
QueryFunT)
|
|
|
|
end;
|
|
|
|
{_, BKL, BKV} ->
|
|
|
|
% This is not the best key
|
|
|
|
find_nextkey(QueryArray, LCnt + 1, {BKL, BKV}, QueryFunT)
|
|
|
|
end.
|
|
|
|
|
|
|
|
|
2016-11-20 21:21:31 +00:00
|
|
|
keyfolder(IMMiter, SFTiter, StartKey, EndKey, {AccFun, Acc}) ->
|
|
|
|
keyfolder({IMMiter, SFTiter}, {StartKey, EndKey}, {AccFun, Acc}, -1).
|
|
|
|
|
|
|
|
keyfolder(_Iterators, _KeyRange, {_AccFun, Acc}, MaxKeys) when MaxKeys == 0 ->
|
|
|
|
Acc;
|
2016-11-25 14:50:13 +00:00
|
|
|
keyfolder({[], SFTiter}, KeyRange, {AccFun, Acc}, MaxKeys) ->
|
2016-11-20 21:21:31 +00:00
|
|
|
{StartKey, EndKey} = KeyRange,
|
|
|
|
case find_nextkey(SFTiter, StartKey, EndKey) of
|
2016-10-12 17:12:49 +01:00
|
|
|
no_more_keys ->
|
|
|
|
Acc;
|
2016-11-20 21:21:31 +00:00
|
|
|
{NxSFTiter, {SFTKey, SFTVal}} ->
|
2016-10-12 17:12:49 +01:00
|
|
|
Acc1 = AccFun(SFTKey, SFTVal, Acc),
|
2016-11-25 14:50:13 +00:00
|
|
|
keyfolder({[], NxSFTiter}, KeyRange, {AccFun, Acc1}, MaxKeys - 1)
|
2016-10-12 17:12:49 +01:00
|
|
|
end;
|
2016-11-25 14:50:13 +00:00
|
|
|
keyfolder({[{IMMKey, IMMVal}|NxIMMiterator], SFTiterator}, KeyRange,
|
|
|
|
{AccFun, Acc}, MaxKeys) ->
|
2016-11-20 21:21:31 +00:00
|
|
|
{StartKey, EndKey} = KeyRange,
|
2016-11-25 14:50:13 +00:00
|
|
|
case {IMMKey < StartKey, leveled_codec:endkey_passed(EndKey, IMMKey)} of
|
|
|
|
{true, _} ->
|
|
|
|
|
2016-11-20 21:21:31 +00:00
|
|
|
% Normally everything is pre-filterd, but the IMM iterator can
|
2016-11-25 14:50:13 +00:00
|
|
|
% be re-used and so may be behind the StartKey if the StartKey has
|
2016-11-20 21:21:31 +00:00
|
|
|
% advanced from the previous use
|
|
|
|
keyfolder({NxIMMiterator, SFTiterator},
|
|
|
|
KeyRange,
|
|
|
|
{AccFun, Acc},
|
|
|
|
MaxKeys);
|
2016-11-25 14:50:13 +00:00
|
|
|
{false, true} ->
|
|
|
|
% There are no more keys in-range in the in-memory
|
|
|
|
% iterator, so take action as if this iterator is empty
|
|
|
|
% (see above)
|
|
|
|
keyfolder({[], SFTiterator},
|
|
|
|
KeyRange,
|
|
|
|
{AccFun, Acc},
|
|
|
|
MaxKeys);
|
|
|
|
{false, false} ->
|
|
|
|
case find_nextkey(SFTiterator, StartKey, EndKey) of
|
|
|
|
no_more_keys ->
|
|
|
|
% No more keys in range in the persisted store, so use the
|
|
|
|
% in-memory KV as the next
|
|
|
|
Acc1 = AccFun(IMMKey, IMMVal, Acc),
|
|
|
|
keyfolder({NxIMMiterator, SFTiterator},
|
2016-11-20 21:21:31 +00:00
|
|
|
KeyRange,
|
2016-11-25 14:50:13 +00:00
|
|
|
{AccFun, Acc1},
|
|
|
|
MaxKeys - 1);
|
|
|
|
{NxSFTiterator, {SFTKey, SFTVal}} ->
|
|
|
|
% There is a next key, so need to know which is the
|
|
|
|
% next key between the two (and handle two keys
|
|
|
|
% with different sequence numbers).
|
|
|
|
case leveled_codec:key_dominates({IMMKey,
|
|
|
|
IMMVal},
|
|
|
|
{SFTKey,
|
|
|
|
SFTVal}) of
|
|
|
|
left_hand_first ->
|
2016-10-12 17:12:49 +01:00
|
|
|
Acc1 = AccFun(IMMKey, IMMVal, Acc),
|
2016-11-20 21:21:31 +00:00
|
|
|
keyfolder({NxIMMiterator, SFTiterator},
|
|
|
|
KeyRange,
|
|
|
|
{AccFun, Acc1},
|
|
|
|
MaxKeys - 1);
|
2016-11-25 14:50:13 +00:00
|
|
|
right_hand_first ->
|
|
|
|
Acc1 = AccFun(SFTKey, SFTVal, Acc),
|
|
|
|
keyfolder({[{IMMKey, IMMVal}|NxIMMiterator],
|
|
|
|
NxSFTiterator},
|
|
|
|
KeyRange,
|
|
|
|
{AccFun, Acc1},
|
|
|
|
MaxKeys - 1);
|
|
|
|
left_hand_dominant ->
|
|
|
|
Acc1 = AccFun(IMMKey, IMMVal, Acc),
|
|
|
|
keyfolder({NxIMMiterator, NxSFTiterator},
|
|
|
|
KeyRange,
|
|
|
|
{AccFun, Acc1},
|
|
|
|
MaxKeys - 1)
|
2016-10-12 17:12:49 +01:00
|
|
|
end
|
2016-10-13 17:51:47 +01:00
|
|
|
end
|
2016-10-12 17:12:49 +01:00
|
|
|
end.
|
|
|
|
|
2016-10-05 09:54:53 +01:00
|
|
|
|
2016-10-20 16:00:08 +01:00
|
|
|
assess_workqueue(WorkQ, ?MAX_LEVELS - 1, _Man, BasementLevel) ->
|
|
|
|
{WorkQ, BasementLevel};
|
|
|
|
assess_workqueue(WorkQ, LevelToAssess, Man, BasementLevel) ->
|
2016-07-28 17:22:50 +01:00
|
|
|
MaxFiles = get_item(LevelToAssess, ?LEVEL_SCALEFACTOR, 0),
|
2016-10-20 16:00:08 +01:00
|
|
|
case length(get_item(LevelToAssess, Man, [])) of
|
|
|
|
FileCount when FileCount > 0 ->
|
|
|
|
NewWQ = maybe_append_work(WorkQ,
|
|
|
|
LevelToAssess,
|
|
|
|
Man,
|
|
|
|
MaxFiles,
|
|
|
|
FileCount),
|
|
|
|
assess_workqueue(NewWQ, LevelToAssess + 1, Man, LevelToAssess);
|
|
|
|
0 ->
|
|
|
|
assess_workqueue(WorkQ, LevelToAssess + 1, Man, BasementLevel)
|
|
|
|
end.
|
2016-07-27 18:03:44 +01:00
|
|
|
|
|
|
|
|
2016-08-02 17:51:43 +01:00
|
|
|
maybe_append_work(WorkQ, Level, Manifest,
|
2016-08-09 16:09:29 +01:00
|
|
|
MaxFiles, FileCount)
|
2016-07-27 18:03:44 +01:00
|
|
|
when FileCount > MaxFiles ->
|
2016-11-05 15:10:21 +00:00
|
|
|
Overhead = FileCount - MaxFiles,
|
|
|
|
leveled_log:log("P0024", [Overhead, Level]),
|
|
|
|
lists:append(WorkQ, [{Level, Manifest, Overhead}]);
|
2016-08-12 01:05:59 +01:00
|
|
|
maybe_append_work(WorkQ, _Level, _Manifest,
|
|
|
|
_MaxFiles, _FileCount) ->
|
2016-07-27 18:03:44 +01:00
|
|
|
WorkQ.
|
|
|
|
|
|
|
|
|
2016-07-28 17:22:50 +01:00
|
|
|
get_item(Index, List, Default) ->
|
|
|
|
case lists:keysearch(Index, 1, List) of
|
|
|
|
{value, {Index, Value}} ->
|
|
|
|
Value;
|
|
|
|
false ->
|
|
|
|
Default
|
|
|
|
end.
|
2016-07-27 18:03:44 +01:00
|
|
|
|
|
|
|
|
|
|
|
%% Request a manifest change
|
2016-08-09 16:09:29 +01:00
|
|
|
%% The clerk should have completed the work, and created a new manifest
|
|
|
|
%% and persisted the new view of the manifest
|
|
|
|
%%
|
|
|
|
%% To complete the change of manifest:
|
|
|
|
%% - the state of the manifest file needs to be changed from pending to current
|
|
|
|
%% - the list of unreferenced files needs to be updated on State
|
|
|
|
%% - the current manifest needs to be update don State
|
|
|
|
%% - the list of ongoing work needs to be cleared of this item
|
|
|
|
|
|
|
|
|
2016-08-10 13:02:08 +01:00
|
|
|
commit_manifest_change(ReturnedWorkItem, State) ->
|
2016-07-27 18:03:44 +01:00
|
|
|
NewMSN = State#state.manifest_sqn + 1,
|
2016-08-09 16:09:29 +01:00
|
|
|
[SentWorkItem] = State#state.ongoing_work,
|
2016-07-28 17:22:50 +01:00
|
|
|
RootPath = State#state.root_path,
|
2016-08-02 17:51:43 +01:00
|
|
|
UnreferencedFiles = State#state.unreferenced_files,
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-11-02 18:14:46 +00:00
|
|
|
if
|
|
|
|
NewMSN == SentWorkItem#penciller_work.next_sqn ->
|
2016-10-19 17:34:58 +01:00
|
|
|
WISrcLevel = SentWorkItem#penciller_work.src_level,
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log_timer("P0025",
|
|
|
|
[SentWorkItem#penciller_work.next_sqn,
|
|
|
|
WISrcLevel],
|
|
|
|
SentWorkItem#penciller_work.start_time),
|
2016-08-09 16:09:29 +01:00
|
|
|
ok = rename_manifest_files(RootPath, NewMSN),
|
|
|
|
FilesToDelete = ReturnedWorkItem#penciller_work.unreferenced_files,
|
|
|
|
UnreferencedFilesUpd = update_deletions(FilesToDelete,
|
2016-08-02 17:51:43 +01:00
|
|
|
NewMSN,
|
|
|
|
UnreferencedFiles),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0026", [NewMSN]),
|
2016-08-09 16:09:29 +01:00
|
|
|
NewManifest = ReturnedWorkItem#penciller_work.new_manifest,
|
2016-10-19 17:34:58 +01:00
|
|
|
|
|
|
|
CurrL0 = get_item(0, State#state.manifest, []),
|
|
|
|
% If the work isn't L0 work, then we may have an uncommitted
|
|
|
|
% manifest change at L0 - so add this back into the Manifest loop
|
|
|
|
% state
|
|
|
|
RevisedManifest = case {WISrcLevel, CurrL0} of
|
|
|
|
{0, _} ->
|
|
|
|
NewManifest;
|
|
|
|
{_, []} ->
|
|
|
|
NewManifest;
|
|
|
|
{_, [L0ManEntry]} ->
|
|
|
|
lists:keystore(0,
|
|
|
|
1,
|
|
|
|
NewManifest,
|
|
|
|
{0, [L0ManEntry]})
|
|
|
|
end,
|
2016-08-10 13:02:08 +01:00
|
|
|
{ok, State#state{ongoing_work=[],
|
2016-07-28 17:22:50 +01:00
|
|
|
manifest_sqn=NewMSN,
|
2016-10-19 17:34:58 +01:00
|
|
|
manifest=RevisedManifest,
|
2016-11-02 18:14:46 +00:00
|
|
|
unreferenced_files=UnreferencedFilesUpd}}
|
2016-08-09 16:09:29 +01:00
|
|
|
end.
|
2016-07-27 18:03:44 +01:00
|
|
|
|
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
rename_manifest_files(RootPath, NewMSN) ->
|
2016-10-12 17:12:49 +01:00
|
|
|
OldFN = filepath(RootPath, NewMSN, pending_manifest),
|
|
|
|
NewFN = filepath(RootPath, NewMSN, current_manifest),
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0027", [OldFN, filelib:is_file(OldFN),
|
|
|
|
NewFN, filelib:is_file(NewFN)]),
|
2016-10-19 00:10:48 +01:00
|
|
|
ok = file:rename(OldFN,NewFN).
|
2016-07-27 18:03:44 +01:00
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
filepath(RootPath, manifest) ->
|
|
|
|
RootPath ++ "/" ++ ?MANIFEST_FP;
|
|
|
|
filepath(RootPath, files) ->
|
|
|
|
RootPath ++ "/" ++ ?FILES_FP.
|
|
|
|
|
2016-08-09 16:09:29 +01:00
|
|
|
filepath(RootPath, NewMSN, pending_manifest) ->
|
2016-09-08 14:21:30 +01:00
|
|
|
filepath(RootPath, manifest) ++ "/" ++ "nonzero_"
|
2016-08-09 16:09:29 +01:00
|
|
|
++ integer_to_list(NewMSN) ++ "." ++ ?PENDING_FILEX;
|
|
|
|
filepath(RootPath, NewMSN, current_manifest) ->
|
2016-09-08 14:21:30 +01:00
|
|
|
filepath(RootPath, manifest) ++ "/" ++ "nonzero_"
|
2016-08-09 16:09:29 +01:00
|
|
|
++ integer_to_list(NewMSN) ++ "." ++ ?CURRENT_FILEX;
|
|
|
|
filepath(RootPath, NewMSN, new_merge_files) ->
|
2016-09-08 14:21:30 +01:00
|
|
|
filepath(RootPath, files) ++ "/" ++ integer_to_list(NewMSN).
|
2016-08-09 16:09:29 +01:00
|
|
|
|
2016-07-28 17:22:50 +01:00
|
|
|
update_deletions([], _NewMSN, UnreferencedFiles) ->
|
|
|
|
UnreferencedFiles;
|
|
|
|
update_deletions([ClearedFile|Tail], MSN, UnreferencedFiles) ->
|
2016-11-02 18:14:46 +00:00
|
|
|
leveled_log:log("P0028", [ClearedFile#manifest_entry.filename]),
|
2016-07-28 17:22:50 +01:00
|
|
|
update_deletions(Tail,
|
|
|
|
MSN,
|
2016-08-10 13:02:08 +01:00
|
|
|
lists:append(UnreferencedFiles,
|
2016-08-15 16:43:39 +01:00
|
|
|
[{ClearedFile#manifest_entry.filename,
|
|
|
|
ClearedFile#manifest_entry.owner,
|
|
|
|
MSN}])).
|
2016-08-10 13:02:08 +01:00
|
|
|
|
2016-09-21 18:31:42 +01:00
|
|
|
confirm_delete(Filename, UnreferencedFiles, RegisteredSnapshots) ->
|
2016-08-10 13:02:08 +01:00
|
|
|
case lists:keyfind(Filename, 1, UnreferencedFiles) of
|
2016-10-21 12:18:06 +01:00
|
|
|
{Filename, Pid, MSN} ->
|
2016-08-10 13:02:08 +01:00
|
|
|
LowSQN = lists:foldl(fun({_, SQN}, MinSQN) -> min(SQN, MinSQN) end,
|
|
|
|
infinity,
|
2016-09-21 18:31:42 +01:00
|
|
|
RegisteredSnapshots),
|
2016-08-10 13:02:08 +01:00
|
|
|
if
|
2016-08-15 16:43:39 +01:00
|
|
|
MSN >= LowSQN ->
|
|
|
|
false;
|
|
|
|
true ->
|
2016-10-21 12:18:06 +01:00
|
|
|
{true, Pid}
|
2016-08-10 13:02:08 +01:00
|
|
|
end
|
|
|
|
end.
|
2016-07-27 18:03:44 +01:00
|
|
|
|
2016-08-12 01:05:59 +01:00
|
|
|
|
|
|
|
|
2016-07-27 18:03:44 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% Test
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
-ifdef(TEST).
|
|
|
|
|
|
|
|
clean_testdir(RootPath) ->
|
|
|
|
clean_subdir(filepath(RootPath, manifest)),
|
|
|
|
clean_subdir(filepath(RootPath, files)).
|
|
|
|
|
|
|
|
clean_subdir(DirPath) ->
|
|
|
|
case filelib:is_dir(DirPath) of
|
|
|
|
true ->
|
|
|
|
{ok, Files} = file:list_dir(DirPath),
|
2016-10-13 17:51:47 +01:00
|
|
|
lists:foreach(fun(FN) ->
|
|
|
|
File = filename:join(DirPath, FN),
|
2016-11-03 16:46:25 +00:00
|
|
|
ok = file:delete(File),
|
|
|
|
io:format("Success deleting ~s~n", [File])
|
|
|
|
end,
|
2016-09-08 14:21:30 +01:00
|
|
|
Files);
|
|
|
|
false ->
|
|
|
|
ok
|
|
|
|
end.
|
2016-07-27 18:03:44 +01:00
|
|
|
|
2016-10-18 19:41:33 +01:00
|
|
|
|
2016-07-27 18:03:44 +01:00
|
|
|
compaction_work_assessment_test() ->
|
2016-10-12 17:12:49 +01:00
|
|
|
L0 = [{{o, "B1", "K1", null}, {o, "B3", "K3", null}, dummy_pid}],
|
|
|
|
L1 = [{{o, "B1", "K1", null}, {o, "B2", "K2", null}, dummy_pid},
|
|
|
|
{{o, "B2", "K3", null}, {o, "B4", "K4", null}, dummy_pid}],
|
2016-08-02 17:51:43 +01:00
|
|
|
Manifest = [{0, L0}, {1, L1}],
|
2016-10-20 16:00:08 +01:00
|
|
|
{WorkQ1, 1} = assess_workqueue([], 0, Manifest, 0),
|
2016-11-05 15:10:21 +00:00
|
|
|
?assertMatch([{0, Manifest, 1}], WorkQ1),
|
2016-07-27 18:03:44 +01:00
|
|
|
L1Alt = lists:append(L1,
|
2016-10-12 17:12:49 +01:00
|
|
|
[{{o, "B5", "K0001", null}, {o, "B5", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "B6", "K0001", null}, {o, "B6", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "B7", "K0001", null}, {o, "B7", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "B8", "K0001", null}, {o, "B8", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "B9", "K0001", null}, {o, "B9", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "BA", "K0001", null}, {o, "BA", "K9999", null},
|
|
|
|
dummy_pid},
|
|
|
|
{{o, "BB", "K0001", null}, {o, "BB", "K9999", null},
|
|
|
|
dummy_pid}]),
|
2016-08-02 17:51:43 +01:00
|
|
|
Manifest3 = [{0, []}, {1, L1Alt}],
|
2016-10-20 16:00:08 +01:00
|
|
|
{WorkQ3, 1} = assess_workqueue([], 0, Manifest3, 0),
|
2016-11-05 15:10:21 +00:00
|
|
|
?assertMatch([{1, Manifest3, 1}], WorkQ3).
|
2016-08-10 13:02:08 +01:00
|
|
|
|
|
|
|
confirm_delete_test() ->
|
|
|
|
Filename = 'test.sft',
|
2016-08-15 16:43:39 +01:00
|
|
|
UnreferencedFiles = [{'other.sft', dummy_owner, 15},
|
|
|
|
{Filename, dummy_owner, 10}],
|
2016-08-10 13:02:08 +01:00
|
|
|
RegisteredIterators1 = [{dummy_pid, 16}, {dummy_pid, 12}],
|
|
|
|
R1 = confirm_delete(Filename, UnreferencedFiles, RegisteredIterators1),
|
2016-10-21 12:18:06 +01:00
|
|
|
?assertMatch(R1, {true, dummy_owner}),
|
2016-08-10 13:02:08 +01:00
|
|
|
RegisteredIterators2 = [{dummy_pid, 10}, {dummy_pid, 12}],
|
|
|
|
R2 = confirm_delete(Filename, UnreferencedFiles, RegisteredIterators2),
|
|
|
|
?assertMatch(R2, false),
|
|
|
|
RegisteredIterators3 = [{dummy_pid, 9}, {dummy_pid, 12}],
|
|
|
|
R3 = confirm_delete(Filename, UnreferencedFiles, RegisteredIterators3),
|
2016-09-08 14:21:30 +01:00
|
|
|
?assertMatch(R3, false).
|
|
|
|
|
|
|
|
|
2016-10-26 21:03:50 +01:00
|
|
|
maybe_pause_push(PCL, KL) ->
|
2016-11-25 14:50:13 +00:00
|
|
|
T0 = leveled_skiplist:empty(),
|
|
|
|
T1 = lists:foldl(fun({K, V}, Acc) -> leveled_skiplist:enter(K, V, Acc) end,
|
2016-10-27 20:56:18 +01:00
|
|
|
T0,
|
|
|
|
KL),
|
|
|
|
case pcl_pushmem(PCL, T1) of
|
2016-10-30 18:25:30 +00:00
|
|
|
returned ->
|
2016-10-27 20:56:18 +01:00
|
|
|
timer:sleep(50),
|
2016-10-26 21:03:50 +01:00
|
|
|
maybe_pause_push(PCL, KL);
|
2016-10-27 20:56:18 +01:00
|
|
|
ok ->
|
2016-10-08 22:15:48 +01:00
|
|
|
ok
|
|
|
|
end.
|
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
simple_server_test() ->
|
|
|
|
RootPath = "../test/ledger",
|
|
|
|
clean_testdir(RootPath),
|
|
|
|
{ok, PCL} = pcl_start(#penciller_options{root_path=RootPath,
|
|
|
|
max_inmemory_tablesize=1000}),
|
2016-11-07 10:11:57 +00:00
|
|
|
Key1 = {{o,"Bucket0001", "Key0001", null},
|
|
|
|
{1, {active, infinity}, null}},
|
2016-10-18 19:41:33 +01:00
|
|
|
KL1 = leveled_sft:generate_randomkeys({1000, 2}),
|
2016-11-07 10:11:57 +00:00
|
|
|
Key2 = {{o,"Bucket0002", "Key0002", null},
|
|
|
|
{1002, {active, infinity}, null}},
|
|
|
|
KL2 = leveled_sft:generate_randomkeys({900, 1003}),
|
|
|
|
% Keep below the max table size by having 900 not 1000
|
|
|
|
Key3 = {{o,"Bucket0003", "Key0003", null},
|
|
|
|
{2003, {active, infinity}, null}},
|
|
|
|
KL3 = leveled_sft:generate_randomkeys({1000, 2004}),
|
|
|
|
Key4 = {{o,"Bucket0004", "Key0004", null},
|
|
|
|
{3004, {active, infinity}, null}},
|
2016-10-27 20:56:18 +01:00
|
|
|
KL4 = leveled_sft:generate_randomkeys({1000, 3005}),
|
|
|
|
ok = maybe_pause_push(PCL, [Key1]),
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCL, {o,"Bucket0001", "Key0001", null})),
|
2016-10-27 20:56:18 +01:00
|
|
|
ok = maybe_pause_push(PCL, KL1),
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCL, {o,"Bucket0001", "Key0001", null})),
|
2016-10-26 21:03:50 +01:00
|
|
|
ok = maybe_pause_push(PCL, [Key2]),
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCL, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
?assertMatch(Key2, pcl_fetch(PCL, {o,"Bucket0002", "Key0002", null})),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-10-26 21:03:50 +01:00
|
|
|
ok = maybe_pause_push(PCL, KL2),
|
2016-10-27 20:56:18 +01:00
|
|
|
?assertMatch(Key2, pcl_fetch(PCL, {o,"Bucket0002", "Key0002", null})),
|
2016-10-26 21:03:50 +01:00
|
|
|
ok = maybe_pause_push(PCL, [Key3]),
|
2016-10-19 17:34:58 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCL, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
?assertMatch(Key2, pcl_fetch(PCL, {o,"Bucket0002", "Key0002", null})),
|
|
|
|
?assertMatch(Key3, pcl_fetch(PCL, {o,"Bucket0003", "Key0003", null})),
|
2016-11-07 10:11:57 +00:00
|
|
|
timer:sleep(200),
|
|
|
|
% This sleep should make sure that the merge to L1 has occurred
|
|
|
|
% This will free up the L0 slot for the remainder to be written in shutdown
|
2016-09-08 14:21:30 +01:00
|
|
|
ok = pcl_close(PCL),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-09-08 14:21:30 +01:00
|
|
|
{ok, PCLr} = pcl_start(#penciller_options{root_path=RootPath,
|
|
|
|
max_inmemory_tablesize=1000}),
|
2016-11-07 10:11:57 +00:00
|
|
|
?assertMatch(2003, pcl_getstartupsequencenumber(PCLr)),
|
|
|
|
% ok = maybe_pause_push(PCLr, [Key2] ++ KL2 ++ [Key3]),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCLr, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
?assertMatch(Key2, pcl_fetch(PCLr, {o,"Bucket0002", "Key0002", null})),
|
|
|
|
?assertMatch(Key3, pcl_fetch(PCLr, {o,"Bucket0003", "Key0003", null})),
|
2016-10-26 21:03:50 +01:00
|
|
|
ok = maybe_pause_push(PCLr, KL3),
|
|
|
|
ok = maybe_pause_push(PCLr, [Key4]),
|
|
|
|
ok = maybe_pause_push(PCLr, KL4),
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCLr, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
?assertMatch(Key2, pcl_fetch(PCLr, {o,"Bucket0002", "Key0002", null})),
|
|
|
|
?assertMatch(Key3, pcl_fetch(PCLr, {o,"Bucket0003", "Key0003", null})),
|
|
|
|
?assertMatch(Key4, pcl_fetch(PCLr, {o,"Bucket0004", "Key0004", null})),
|
2016-09-26 10:55:08 +01:00
|
|
|
SnapOpts = #penciller_options{start_snapshot = true,
|
2016-10-07 10:04:48 +01:00
|
|
|
source_penciller = PCLr},
|
2016-09-26 10:55:08 +01:00
|
|
|
{ok, PclSnap} = pcl_start(SnapOpts),
|
2016-11-25 14:50:13 +00:00
|
|
|
ok = pcl_loadsnapshot(PclSnap, leveled_skiplist:empty()),
|
2016-10-12 17:12:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PclSnap, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
?assertMatch(Key2, pcl_fetch(PclSnap, {o,"Bucket0002", "Key0002", null})),
|
|
|
|
?assertMatch(Key3, pcl_fetch(PclSnap, {o,"Bucket0003", "Key0003", null})),
|
|
|
|
?assertMatch(Key4, pcl_fetch(PclSnap, {o,"Bucket0004", "Key0004", null})),
|
2016-09-26 10:55:08 +01:00
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0001",
|
|
|
|
"Key0001",
|
|
|
|
null},
|
2016-09-26 10:55:08 +01:00
|
|
|
1)),
|
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0002",
|
|
|
|
"Key0002",
|
|
|
|
null},
|
2016-09-26 10:55:08 +01:00
|
|
|
1002)),
|
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0003",
|
|
|
|
"Key0003",
|
|
|
|
null},
|
2016-10-27 20:56:18 +01:00
|
|
|
2003)),
|
2016-09-26 10:55:08 +01:00
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0004",
|
|
|
|
"Key0004",
|
|
|
|
null},
|
2016-10-27 20:56:18 +01:00
|
|
|
3004)),
|
2016-10-20 02:23:45 +01:00
|
|
|
% Add some more keys and confirm that check sequence number still
|
2016-09-26 10:55:08 +01:00
|
|
|
% sees the old version in the previous snapshot, but will see the new version
|
|
|
|
% in a new snapshot
|
2016-10-27 20:56:18 +01:00
|
|
|
Key1A = {{o,"Bucket0001", "Key0001", null}, {4005, {active, infinity}, null}},
|
|
|
|
KL1A = leveled_sft:generate_randomkeys({2000, 4006}),
|
2016-10-26 21:03:50 +01:00
|
|
|
ok = maybe_pause_push(PCLr, [Key1A]),
|
|
|
|
ok = maybe_pause_push(PCLr, KL1A),
|
2016-09-26 10:55:08 +01:00
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0001",
|
|
|
|
"Key0001",
|
|
|
|
null},
|
2016-09-26 10:55:08 +01:00
|
|
|
1)),
|
|
|
|
ok = pcl_close(PclSnap),
|
2016-10-27 20:56:18 +01:00
|
|
|
|
2016-11-03 16:05:43 +00:00
|
|
|
% Ignore a fake pending mnaifest on startup
|
|
|
|
ok = file:write_file(RootPath ++ "/" ++ ?MANIFEST_FP ++ "nonzero_99.pnd",
|
|
|
|
term_to_binary("Hello")),
|
|
|
|
|
2016-09-26 10:55:08 +01:00
|
|
|
{ok, PclSnap2} = pcl_start(SnapOpts),
|
2016-11-25 14:50:13 +00:00
|
|
|
ok = pcl_loadsnapshot(PclSnap2, leveled_skiplist:empty()),
|
2016-09-26 10:55:08 +01:00
|
|
|
?assertMatch(false, pcl_checksequencenumber(PclSnap2,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0001",
|
|
|
|
"Key0001",
|
|
|
|
null},
|
2016-09-26 10:55:08 +01:00
|
|
|
1)),
|
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap2,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0001",
|
|
|
|
"Key0001",
|
|
|
|
null},
|
2016-10-27 20:56:18 +01:00
|
|
|
4005)),
|
2016-09-26 10:55:08 +01:00
|
|
|
?assertMatch(true, pcl_checksequencenumber(PclSnap2,
|
2016-10-12 17:12:49 +01:00
|
|
|
{o,
|
|
|
|
"Bucket0002",
|
|
|
|
"Key0002",
|
|
|
|
null},
|
2016-09-26 10:55:08 +01:00
|
|
|
1002)),
|
|
|
|
ok = pcl_close(PclSnap2),
|
2016-09-08 14:21:30 +01:00
|
|
|
ok = pcl_close(PCLr),
|
|
|
|
clean_testdir(RootPath).
|
|
|
|
|
2016-09-21 18:31:42 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
rangequery_manifest_test() ->
|
|
|
|
{E1,
|
|
|
|
E2,
|
|
|
|
E3} = {#manifest_entry{start_key={i, "Bucket1", {"Idx1", "Fld1"}, "K8"},
|
|
|
|
end_key={i, "Bucket1", {"Idx1", "Fld9"}, "K93"},
|
|
|
|
filename="Z1"},
|
|
|
|
#manifest_entry{start_key={i, "Bucket1", {"Idx1", "Fld9"}, "K97"},
|
|
|
|
end_key={o, "Bucket1", "K71", null},
|
|
|
|
filename="Z2"},
|
|
|
|
#manifest_entry{start_key={o, "Bucket1", "K75", null},
|
|
|
|
end_key={o, "Bucket1", "K993", null},
|
|
|
|
filename="Z3"}},
|
|
|
|
{E4,
|
|
|
|
E5,
|
|
|
|
E6} = {#manifest_entry{start_key={i, "Bucket1", {"Idx1", "Fld1"}, "K8"},
|
|
|
|
end_key={i, "Bucket1", {"Idx1", "Fld7"}, "K93"},
|
|
|
|
filename="Z4"},
|
|
|
|
#manifest_entry{start_key={i, "Bucket1", {"Idx1", "Fld7"}, "K97"},
|
|
|
|
end_key={o, "Bucket1", "K78", null},
|
|
|
|
filename="Z5"},
|
|
|
|
#manifest_entry{start_key={o, "Bucket1", "K81", null},
|
|
|
|
end_key={o, "Bucket1", "K996", null},
|
|
|
|
filename="Z6"}},
|
|
|
|
Man = [{1, [E1, E2, E3]}, {2, [E4, E5, E6]}],
|
|
|
|
R1 = initiate_rangequery_frommanifest({o, "Bucket1", "K711", null},
|
|
|
|
{o, "Bucket1", "K999", null},
|
|
|
|
Man),
|
|
|
|
?assertMatch([{1, [{next_file, E3}]},
|
|
|
|
{2, [{next_file, E5}, {next_file, E6}]}],
|
|
|
|
R1),
|
|
|
|
R2 = initiate_rangequery_frommanifest({i, "Bucket1", {"Idx1", "Fld8"}, null},
|
|
|
|
{i, "Bucket1", {"Idx1", "Fld8"}, null},
|
|
|
|
Man),
|
|
|
|
?assertMatch([{1, [{next_file, E1}]}, {2, [{next_file, E5}]}],
|
|
|
|
R2),
|
|
|
|
R3 = initiate_rangequery_frommanifest({i, "Bucket1", {"Idx0", "Fld8"}, null},
|
|
|
|
{i, "Bucket1", {"Idx0", "Fld9"}, null},
|
|
|
|
Man),
|
|
|
|
?assertMatch([], R3).
|
|
|
|
|
2016-10-14 22:58:01 +01:00
|
|
|
print_manifest_test() ->
|
|
|
|
M1 = #manifest_entry{start_key={i, "Bucket1", {<<"Idx1">>, "Fld1"}, "K8"},
|
|
|
|
end_key={i, 4565, {"Idx1", "Fld9"}, "K93"},
|
|
|
|
filename="Z1"},
|
|
|
|
M2 = #manifest_entry{start_key={i, self(), {null, "Fld1"}, "K8"},
|
|
|
|
end_key={i, <<200:32/integer>>, {"Idx1", "Fld9"}, "K93"},
|
|
|
|
filename="Z1"},
|
2016-10-29 00:52:49 +01:00
|
|
|
M3 = #manifest_entry{start_key={?STD_TAG, self(), {null, "Fld1"}, "K8"},
|
|
|
|
end_key={?RIAK_TAG, <<200:32/integer>>, {"Idx1", "Fld9"}, "K93"},
|
|
|
|
filename="Z1"},
|
|
|
|
print_manifest([{1, [M1, M2, M3]}]).
|
2016-10-14 22:58:01 +01:00
|
|
|
|
2016-10-12 17:12:49 +01:00
|
|
|
simple_findnextkey_test() ->
|
|
|
|
QueryArray = [
|
|
|
|
{2, [{{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}},
|
|
|
|
{{o, "Bucket1", "Key5"}, {4, {active, infinity}, null}}]},
|
|
|
|
{3, [{{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}]},
|
|
|
|
{5, [{{o, "Bucket1", "Key2"}, {2, {active, infinity}, null}}]}
|
|
|
|
],
|
|
|
|
{Array2, KV1} = find_nextkey(QueryArray,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}}, KV1),
|
|
|
|
{Array3, KV2} = find_nextkey(Array2,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key2"}, {2, {active, infinity}, null}}, KV2),
|
|
|
|
{Array4, KV3} = find_nextkey(Array3,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}, KV3),
|
|
|
|
{Array5, KV4} = find_nextkey(Array4,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key5"}, {4, {active, infinity}, null}}, KV4),
|
|
|
|
ER = find_nextkey(Array5,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch(no_more_keys, ER).
|
|
|
|
|
|
|
|
sqnoverlap_findnextkey_test() ->
|
|
|
|
QueryArray = [
|
|
|
|
{2, [{{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}},
|
|
|
|
{{o, "Bucket1", "Key5"}, {4, {active, infinity}, null}}]},
|
|
|
|
{3, [{{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}]},
|
|
|
|
{5, [{{o, "Bucket1", "Key5"}, {2, {active, infinity}, null}}]}
|
|
|
|
],
|
|
|
|
{Array2, KV1} = find_nextkey(QueryArray,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}}, KV1),
|
|
|
|
{Array3, KV2} = find_nextkey(Array2,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}, KV2),
|
|
|
|
{Array4, KV3} = find_nextkey(Array3,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key5"}, {4, {active, infinity}, null}}, KV3),
|
|
|
|
ER = find_nextkey(Array4,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch(no_more_keys, ER).
|
|
|
|
|
|
|
|
sqnoverlap_otherway_findnextkey_test() ->
|
|
|
|
QueryArray = [
|
|
|
|
{2, [{{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}},
|
|
|
|
{{o, "Bucket1", "Key5"}, {1, {active, infinity}, null}}]},
|
|
|
|
{3, [{{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}]},
|
|
|
|
{5, [{{o, "Bucket1", "Key5"}, {2, {active, infinity}, null}}]}
|
|
|
|
],
|
|
|
|
{Array2, KV1} = find_nextkey(QueryArray,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}}, KV1),
|
|
|
|
{Array3, KV2} = find_nextkey(Array2,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}, KV2),
|
|
|
|
{Array4, KV3} = find_nextkey(Array3,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch({{o, "Bucket1", "Key5"}, {2, {active, infinity}, null}}, KV3),
|
|
|
|
ER = find_nextkey(Array4,
|
|
|
|
{o, "Bucket1", "Key0"},
|
|
|
|
{o, "Bucket1", "Key5"}),
|
|
|
|
?assertMatch(no_more_keys, ER).
|
|
|
|
|
|
|
|
foldwithimm_simple_test() ->
|
|
|
|
QueryArray = [
|
|
|
|
{2, [{{o, "Bucket1", "Key1"}, {5, {active, infinity}, null}},
|
|
|
|
{{o, "Bucket1", "Key5"}, {1, {active, infinity}, null}}]},
|
|
|
|
{3, [{{o, "Bucket1", "Key3"}, {3, {active, infinity}, null}}]},
|
|
|
|
{5, [{{o, "Bucket1", "Key5"}, {2, {active, infinity}, null}}]}
|
|
|
|
],
|
2016-11-25 14:50:13 +00:00
|
|
|
IMM0 = leveled_skiplist:enter({o, "Bucket1", "Key6"},
|
|
|
|
{7, {active, infinity}, null},
|
|
|
|
leveled_skiplist:empty()),
|
|
|
|
IMM1 = leveled_skiplist:enter({o, "Bucket1", "Key1"},
|
|
|
|
{8, {active, infinity}, null},
|
|
|
|
IMM0),
|
|
|
|
IMM2 = leveled_skiplist:enter({o, "Bucket1", "Key8"},
|
|
|
|
{9, {active, infinity}, null},
|
|
|
|
IMM1),
|
|
|
|
IMMiter = leveled_skiplist:to_range(IMM2, {o, "Bucket1", "Key1"}),
|
2016-10-13 21:02:15 +01:00
|
|
|
AccFun = fun(K, V, Acc) -> SQN = leveled_codec:strip_to_seqonly({K, V}),
|
2016-10-12 17:12:49 +01:00
|
|
|
Acc ++ [{K, SQN}] end,
|
|
|
|
Acc = keyfolder(IMMiter,
|
|
|
|
QueryArray,
|
|
|
|
{o, "Bucket1", "Key1"}, {o, "Bucket1", "Key6"},
|
|
|
|
{AccFun, []}),
|
|
|
|
?assertMatch([{{o, "Bucket1", "Key1"}, 8},
|
|
|
|
{{o, "Bucket1", "Key3"}, 3},
|
|
|
|
{{o, "Bucket1", "Key5"}, 2},
|
|
|
|
{{o, "Bucket1", "Key6"}, 7}], Acc),
|
|
|
|
|
2016-11-25 14:50:13 +00:00
|
|
|
IMM1A = leveled_skiplist:enter({o, "Bucket1", "Key1"},
|
|
|
|
{8, {active, infinity}, null},
|
|
|
|
leveled_skiplist:empty()),
|
|
|
|
IMMiterA = leveled_skiplist:to_range(IMM1A, {o, "Bucket1", "Key1"}),
|
2016-10-12 17:12:49 +01:00
|
|
|
AccA = keyfolder(IMMiterA,
|
|
|
|
QueryArray,
|
|
|
|
{o, "Bucket1", "Key1"}, {o, "Bucket1", "Key6"},
|
|
|
|
{AccFun, []}),
|
|
|
|
?assertMatch([{{o, "Bucket1", "Key1"}, 8},
|
|
|
|
{{o, "Bucket1", "Key3"}, 3},
|
|
|
|
{{o, "Bucket1", "Key5"}, 2}], AccA),
|
|
|
|
|
2016-11-25 14:50:13 +00:00
|
|
|
IMM3 = leveled_skiplist:enter({o, "Bucket1", "Key4"},
|
|
|
|
{10, {active, infinity}, null},
|
|
|
|
IMM2),
|
|
|
|
IMMiterB = leveled_skiplist:to_range(IMM3, {o, "Bucket1", "Key1"}),
|
2016-10-12 17:12:49 +01:00
|
|
|
AccB = keyfolder(IMMiterB,
|
|
|
|
QueryArray,
|
|
|
|
{o, "Bucket1", "Key1"}, {o, "Bucket1", "Key6"},
|
|
|
|
{AccFun, []}),
|
|
|
|
?assertMatch([{{o, "Bucket1", "Key1"}, 8},
|
|
|
|
{{o, "Bucket1", "Key3"}, 3},
|
|
|
|
{{o, "Bucket1", "Key4"}, 10},
|
|
|
|
{{o, "Bucket1", "Key5"}, 2},
|
|
|
|
{{o, "Bucket1", "Key6"}, 7}], AccB).
|
|
|
|
|
2016-10-29 00:52:49 +01:00
|
|
|
create_file_test() ->
|
|
|
|
Filename = "../test/new_file.sft",
|
|
|
|
ok = file:write_file(Filename, term_to_binary("hello")),
|
2016-10-30 18:25:30 +00:00
|
|
|
KVL = lists:usort(leveled_sft:generate_randomkeys(10000)),
|
2016-11-25 14:50:13 +00:00
|
|
|
Tree = leveled_skiplist:from_list(KVL),
|
2016-10-31 01:33:33 +00:00
|
|
|
FetchFun = fun(Slot) -> lists:nth(Slot, [Tree]) end,
|
2016-10-30 18:25:30 +00:00
|
|
|
{ok,
|
|
|
|
SP,
|
|
|
|
noreply} = leveled_sft:sft_newfroml0cache(Filename,
|
2016-10-31 01:33:33 +00:00
|
|
|
1,
|
|
|
|
FetchFun,
|
2016-10-30 18:25:30 +00:00
|
|
|
#sft_options{wait=false}),
|
2016-10-29 00:52:49 +01:00
|
|
|
lists:foreach(fun(X) ->
|
|
|
|
case checkready(SP) of
|
|
|
|
timeout ->
|
|
|
|
timer:sleep(X);
|
|
|
|
_ ->
|
|
|
|
ok
|
|
|
|
end end,
|
|
|
|
[50, 50, 50, 50, 50]),
|
|
|
|
{ok, SrcFN, StartKey, EndKey} = checkready(SP),
|
|
|
|
io:format("StartKey ~w EndKey ~w~n", [StartKey, EndKey]),
|
|
|
|
?assertMatch({o, _, _, _}, StartKey),
|
|
|
|
?assertMatch({o, _, _, _}, EndKey),
|
|
|
|
?assertMatch("../test/new_file.sft", SrcFN),
|
|
|
|
ok = leveled_sft:sft_clear(SP),
|
|
|
|
{ok, Bin} = file:read_file("../test/new_file.sft.discarded"),
|
|
|
|
?assertMatch("hello", binary_to_term(Bin)).
|
|
|
|
|
2016-11-14 17:18:28 +00:00
|
|
|
commit_manifest_test() ->
|
|
|
|
Sent_WI = #penciller_work{next_sqn=1,
|
|
|
|
src_level=0,
|
|
|
|
start_time=os:timestamp()},
|
|
|
|
Resp_WI = #penciller_work{next_sqn=1,
|
|
|
|
src_level=0},
|
2016-11-14 19:34:11 +00:00
|
|
|
State = #state{ongoing_work = [Sent_WI],
|
|
|
|
root_path = "test",
|
|
|
|
manifest_sqn = 0},
|
2016-11-14 17:18:28 +00:00
|
|
|
ManifestFP = "test" ++ "/" ++ ?MANIFEST_FP ++ "/",
|
|
|
|
ok = filelib:ensure_dir(ManifestFP),
|
|
|
|
ok = file:write_file(ManifestFP ++ "nonzero_1.pnd",
|
|
|
|
term_to_binary("dummy data")),
|
|
|
|
|
|
|
|
L1_0 = [{1, [#manifest_entry{filename="1.sft"}]}],
|
|
|
|
Resp_WI0 = Resp_WI#penciller_work{new_manifest=L1_0,
|
|
|
|
unreferenced_files=[]},
|
|
|
|
{ok, State0} = commit_manifest_change(Resp_WI0, State),
|
|
|
|
?assertMatch(1, State0#state.manifest_sqn),
|
|
|
|
?assertMatch([], get_item(0, State0#state.manifest, [])),
|
|
|
|
|
|
|
|
L0Entry = [#manifest_entry{filename="0.sft"}],
|
|
|
|
ManifestPlus = [{0, L0Entry}|State0#state.manifest],
|
|
|
|
|
|
|
|
NxtSent_WI = #penciller_work{next_sqn=2,
|
|
|
|
src_level=1,
|
|
|
|
start_time=os:timestamp()},
|
|
|
|
NxtResp_WI = #penciller_work{next_sqn=2,
|
|
|
|
src_level=1},
|
|
|
|
State1 = State0#state{ongoing_work=[NxtSent_WI],
|
|
|
|
manifest = ManifestPlus},
|
|
|
|
|
|
|
|
ok = file:write_file(ManifestFP ++ "nonzero_2.pnd",
|
|
|
|
term_to_binary("dummy data")),
|
|
|
|
|
|
|
|
L2_0 = [#manifest_entry{filename="2.sft"}],
|
|
|
|
NxtResp_WI0 = NxtResp_WI#penciller_work{new_manifest=[{2, L2_0}],
|
|
|
|
unreferenced_files=[]},
|
|
|
|
{ok, State2} = commit_manifest_change(NxtResp_WI0, State1),
|
|
|
|
|
|
|
|
?assertMatch(1, State1#state.manifest_sqn),
|
|
|
|
?assertMatch(2, State2#state.manifest_sqn),
|
|
|
|
?assertMatch(L0Entry, get_item(0, State2#state.manifest, [])),
|
|
|
|
?assertMatch(L2_0, get_item(2, State2#state.manifest, [])),
|
|
|
|
|
|
|
|
clean_testdir(State#state.root_path).
|
|
|
|
|
|
|
|
|
2016-11-14 20:43:38 +00:00
|
|
|
badmanifest_test() ->
|
2016-10-29 00:52:49 +01:00
|
|
|
RootPath = "../test/ledger",
|
|
|
|
clean_testdir(RootPath),
|
|
|
|
{ok, PCL} = pcl_start(#penciller_options{root_path=RootPath,
|
|
|
|
max_inmemory_tablesize=1000}),
|
2016-11-14 17:18:28 +00:00
|
|
|
Key1 = {{o,"Bucket0001", "Key0001", null},
|
|
|
|
{1001, {active, infinity}, null}},
|
2016-11-03 19:02:50 +00:00
|
|
|
KL1 = leveled_sft:generate_randomkeys({1000, 1}),
|
|
|
|
|
|
|
|
ok = maybe_pause_push(PCL, KL1 ++ [Key1]),
|
|
|
|
%% Added together, as split apart there will be a race between the close
|
|
|
|
%% call to the penciller and the second fetch of the cache entry
|
2016-10-29 00:52:49 +01:00
|
|
|
?assertMatch(Key1, pcl_fetch(PCL, {o,"Bucket0001", "Key0001", null})),
|
2016-11-03 19:02:50 +00:00
|
|
|
|
2016-11-14 17:18:28 +00:00
|
|
|
timer:sleep(100), % Avoids confusion if L0 file not written before close
|
2016-10-29 00:52:49 +01:00
|
|
|
ok = pcl_close(PCL),
|
2016-11-03 19:02:50 +00:00
|
|
|
|
2016-10-29 00:52:49 +01:00
|
|
|
ManifestFP = filepath(RootPath, manifest),
|
2016-11-14 17:18:28 +00:00
|
|
|
ok = file:write_file(filename:join(ManifestFP, "yeszero_123.man"),
|
|
|
|
term_to_binary("hello")),
|
2016-10-29 00:52:49 +01:00
|
|
|
{ok, PCLr} = pcl_start(#penciller_options{root_path=RootPath,
|
|
|
|
max_inmemory_tablesize=1000}),
|
|
|
|
?assertMatch(Key1, pcl_fetch(PCLr, {o,"Bucket0001", "Key0001", null})),
|
|
|
|
ok = pcl_close(PCLr),
|
|
|
|
clean_testdir(RootPath).
|
|
|
|
|
2016-11-05 13:42:44 +00:00
|
|
|
checkready(Pid) ->
|
|
|
|
try
|
|
|
|
leveled_sft:sft_checkready(Pid)
|
|
|
|
catch
|
|
|
|
exit:{timeout, _} ->
|
|
|
|
timeout
|
|
|
|
end.
|
|
|
|
|
2016-11-14 20:43:38 +00:00
|
|
|
coverage_cheat_test() ->
|
|
|
|
{noreply, _State0} = handle_info(timeout, #state{}),
|
|
|
|
{ok, _State1} = code_change(null, #state{}, null).
|
2016-11-05 13:42:44 +00:00
|
|
|
|
2016-11-05 15:59:31 +00:00
|
|
|
-endif.
|