2016-10-25 01:57:12 +01:00
|
|
|
%% -------- Inker's Clerk ---------
|
|
|
|
%%
|
|
|
|
%% The Inker's clerk runs compaction jobs on behalf of the Inker, informing the
|
|
|
|
%% Inker of any manifest changes when complete.
|
|
|
|
%%
|
|
|
|
%% -------- Value Compaction ---------
|
|
|
|
%%
|
|
|
|
%% Compaction requires the Inker to have four different types of keys
|
|
|
|
%% * stnd - A standard key of the form {SQN, stnd, LedgerKey} which maps to a
|
|
|
|
%% value of {Object, KeyDeltas}
|
|
|
|
%% * tomb - A tombstone for a LedgerKey {SQN, tomb, LedgerKey}
|
|
|
|
%% * keyd - An object containing key deltas only of the form
|
|
|
|
%% {SQN, keyd, LedgerKey} which maps to a value of {KeyDeltas}
|
|
|
|
%%
|
|
|
|
%% Each LedgerKey has a Tag, and for each Tag there should be a compaction
|
|
|
|
%% strategy, which will be set to one of the following:
|
|
|
|
%% * retain - KeyDeltas must be retained permanently, only values can be
|
|
|
|
%% compacted (if replaced or not_present in the ledger)
|
|
|
|
%% * recalc - The full object can be removed through comapction (if replaced or
|
|
|
|
%% not_present in the ledger), as each object with that tag can have the Key
|
|
|
|
%% Deltas recreated by passing into an assigned recalc function {LedgerKey,
|
|
|
|
%% SQN, Object, KeyDeltas, PencillerSnapshot}
|
|
|
|
%% * recovr - At compaction time this is equivalent to recalc, only KeyDeltas
|
|
|
|
%% are lost when reloading the Ledger from the Journal, and it is assumed that
|
|
|
|
%% those deltas will be resolved through external anti-entropy (e.g. read
|
|
|
|
%% repair or AAE) - or alternatively the risk of loss of persisted data from
|
|
|
|
%% the ledger is accepted for this data type
|
|
|
|
%%
|
|
|
|
%% During the compaction process for the Journal, the file chosen for
|
|
|
|
%% compaction is scanned in SQN order, and a FilterFun is passed (which will
|
|
|
|
%% normally perform a check against a snapshot of the persisted part of the
|
|
|
|
%% Ledger). If the given key is of type stnd, and this object is no longer the
|
|
|
|
%% active object under the LedgerKey, then the object can be compacted out of
|
|
|
|
%% the journal. This will lead to either its removal (if the strategy for the
|
|
|
|
%% Tag is recovr or recalc), or its replacement with a KeyDelta object.
|
|
|
|
%%
|
|
|
|
%% Tombstones cannot be reaped through this compaction process.
|
|
|
|
%%
|
|
|
|
%% Currently, KeyDeltas are also reaped if the LedgerKey has been updated and
|
|
|
|
%% the Tag has a recovr strategy. This may be the case when KeyDeltas are used
|
|
|
|
%% as a way of directly representing a change, and where anti-entropy can
|
|
|
|
%% recover from a loss.
|
|
|
|
%%
|
2016-11-14 11:17:14 +00:00
|
|
|
%% -------- Removing Compacted Files ---------
|
|
|
|
%%
|
|
|
|
%% Once a compaction job is complete, and the manifest change has been
|
|
|
|
%% committed, the individual journal files will get a deletion prompt. The
|
|
|
|
%% Journal processes should copy the file to the waste folder, before erasing
|
|
|
|
%% themselves.
|
|
|
|
%%
|
|
|
|
%% The Inker will have a waste duration setting, and before running compaction
|
|
|
|
%% should delete all over-age items (using the file modified date) from the
|
|
|
|
%% waste.
|
|
|
|
%%
|
2016-10-25 01:57:12 +01:00
|
|
|
%% -------- Tombstone Reaping ---------
|
|
|
|
%%
|
|
|
|
%% Value compaction does not remove tombstones from the database, and so a
|
|
|
|
%% separate compaction job is required for this.
|
|
|
|
%%
|
|
|
|
%% Tombstones can only be reaped for Tags set to recovr or recalc.
|
|
|
|
%%
|
|
|
|
%% The tombstone reaping process should select a file to compact, and then
|
|
|
|
%% take that file and discover the LedgerKeys of all reapable tombstones.
|
|
|
|
%% The lesger should then be scanned from SQN 0 looking for unreaped objects
|
|
|
|
%% before the tombstone. If no ushc objects exist for that tombstone, it can
|
|
|
|
%% now be reaped as part of the compaction job.
|
|
|
|
%%
|
2016-11-14 11:17:14 +00:00
|
|
|
%% Other tombstones cannot be reaped, as otherwise on laoding a ledger an old
|
2016-10-25 01:57:12 +01:00
|
|
|
%% version of the object may re-emerge.
|
2016-09-20 16:13:36 +01:00
|
|
|
|
|
|
|
-module(leveled_iclerk).
|
|
|
|
|
|
|
|
-behaviour(gen_server).
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
-include("include/leveled.hrl").
|
2016-09-20 16:13:36 +01:00
|
|
|
|
|
|
|
-export([init/1,
|
|
|
|
handle_call/3,
|
|
|
|
handle_cast/2,
|
|
|
|
handle_info/2,
|
|
|
|
terminate/2,
|
|
|
|
clerk_new/1,
|
2017-05-26 10:51:30 +01:00
|
|
|
clerk_compact/7,
|
2016-10-14 13:36:12 +01:00
|
|
|
clerk_hashtablecalc/3,
|
2018-02-15 16:14:46 +00:00
|
|
|
clerk_trim/3,
|
2016-09-20 16:13:36 +01:00
|
|
|
clerk_stop/1,
|
|
|
|
code_change/3]).
|
|
|
|
|
2017-03-30 15:46:37 +01:00
|
|
|
-export([schedule_compaction/3]).
|
|
|
|
|
2016-09-20 16:13:36 +01:00
|
|
|
-include_lib("eunit/include/eunit.hrl").
|
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
-define(JOURNAL_FILEX, "cdb").
|
|
|
|
-define(PENDING_FILEX, "pnd").
|
2017-11-10 14:40:07 +00:00
|
|
|
-define(SAMPLE_SIZE, 100).
|
2016-10-26 11:50:59 +01:00
|
|
|
-define(BATCH_SIZE, 32).
|
2016-09-21 18:31:42 +01:00
|
|
|
-define(BATCHES_TO_CHECK, 8).
|
2016-10-08 22:15:48 +01:00
|
|
|
-define(CRC_SIZE, 4).
|
2016-10-25 23:13:14 +01:00
|
|
|
-define(DEFAULT_RELOAD_STRATEGY, leveled_codec:inker_reload_strategy([])).
|
2017-03-30 15:46:37 +01:00
|
|
|
-define(INTERVALS_PER_HOUR, 4).
|
2018-07-23 12:46:42 +01:00
|
|
|
-define(MAX_COMPACTION_RUN, 8).
|
|
|
|
-define(SINGLEFILE_COMPACTION_TARGET, 50.0).
|
|
|
|
-define(MAXRUNLENGTH_COMPACTION_TARGET, 75.0).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2017-07-31 19:53:01 +02:00
|
|
|
-record(state, {inker :: pid() | undefined,
|
|
|
|
max_run_length :: integer() | undefined,
|
|
|
|
cdb_options,
|
|
|
|
waste_retention_period :: integer() | undefined,
|
|
|
|
waste_path :: string() | undefined,
|
2017-11-06 15:54:58 +00:00
|
|
|
reload_strategy = ?DEFAULT_RELOAD_STRATEGY :: list(),
|
2018-07-23 12:46:42 +01:00
|
|
|
singlefile_compactionperc = ?SINGLEFILE_COMPACTION_TARGET :: float(),
|
|
|
|
maxrunlength_compactionperc = ?MAXRUNLENGTH_COMPACTION_TARGET ::float(),
|
2017-11-13 14:02:39 +00:00
|
|
|
compression_method = native :: lz4|native}).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2017-07-31 19:53:01 +02:00
|
|
|
-record(candidate, {low_sqn :: integer() | undefined,
|
|
|
|
filename :: string() | undefined,
|
|
|
|
journal :: pid() | undefined,
|
|
|
|
compaction_perc :: float() | undefined}).
|
2016-09-20 16:13:36 +01:00
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
-type iclerk_options() :: #iclerk_options{}.
|
2016-09-20 16:13:36 +01:00
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% API
|
|
|
|
%%%============================================================================
|
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
-spec clerk_new(iclerk_options()) -> {ok, pid()}.
|
|
|
|
%% @doc
|
|
|
|
%% Generate a new clerk
|
2016-09-27 14:58:26 +01:00
|
|
|
clerk_new(InkerClerkOpts) ->
|
2018-06-28 12:16:43 +01:00
|
|
|
gen_server:start_link(?MODULE, [InkerClerkOpts], []).
|
2017-11-09 12:42:49 +00:00
|
|
|
|
|
|
|
-spec clerk_compact(pid(), pid(),
|
|
|
|
fun(), fun(), fun(),
|
|
|
|
pid(), integer()) -> ok.
|
|
|
|
%% @doc
|
|
|
|
%% Trigger a compaction for this clerk if the threshold of data recovery has
|
|
|
|
%% been met
|
2017-05-26 10:51:30 +01:00
|
|
|
clerk_compact(Pid, Checker, InitiateFun, CloseFun, FilterFun, Inker, TimeO) ->
|
2016-09-28 18:26:52 +01:00
|
|
|
gen_server:cast(Pid,
|
|
|
|
{compact,
|
|
|
|
Checker,
|
|
|
|
InitiateFun,
|
2017-05-26 10:51:30 +01:00
|
|
|
CloseFun,
|
2016-09-28 18:26:52 +01:00
|
|
|
FilterFun,
|
|
|
|
Inker,
|
2017-05-26 10:51:30 +01:00
|
|
|
TimeO}).
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2018-02-15 16:14:46 +00:00
|
|
|
-spec clerk_trim(pid(), pid(), integer()) -> ok.
|
|
|
|
%% @doc
|
|
|
|
%% Trim the Inker back to the persisted SQN
|
|
|
|
clerk_trim(Pid, Inker, PersistedSQN) ->
|
|
|
|
gen_server:cast(Pid, {trim, Inker, PersistedSQN}).
|
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
-spec clerk_hashtablecalc(ets:tid(), integer(), pid()) -> ok.
|
|
|
|
%% @doc
|
|
|
|
%% Spawn a dedicated clerk for the process of calculating the binary view
|
|
|
|
%% of the hastable in the CDB file - so that the file is not blocked during
|
|
|
|
%% this calculation
|
2016-10-14 13:36:12 +01:00
|
|
|
clerk_hashtablecalc(HashTree, StartPos, CDBpid) ->
|
|
|
|
{ok, Clerk} = gen_server:start(?MODULE, [#iclerk_options{}], []),
|
|
|
|
gen_server:cast(Clerk, {hashtable_calc, HashTree, StartPos, CDBpid}).
|
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
-spec clerk_stop(pid()) -> ok.
|
|
|
|
%% @doc
|
|
|
|
%% Stop the clerk
|
2016-09-20 16:13:36 +01:00
|
|
|
clerk_stop(Pid) ->
|
|
|
|
gen_server:cast(Pid, stop).
|
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% gen_server callbacks
|
|
|
|
%%%============================================================================
|
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
init([IClerkOpts]) ->
|
2016-10-25 23:13:14 +01:00
|
|
|
ReloadStrategy = IClerkOpts#iclerk_options.reload_strategy,
|
2016-11-14 11:17:14 +00:00
|
|
|
CDBopts = IClerkOpts#iclerk_options.cdb_options,
|
|
|
|
WP = CDBopts#cdb_options.waste_path,
|
2017-11-08 12:58:09 +00:00
|
|
|
WRP = IClerkOpts#iclerk_options.waste_retention_period,
|
|
|
|
|
|
|
|
MRL =
|
|
|
|
case IClerkOpts#iclerk_options.max_run_length of
|
|
|
|
undefined ->
|
|
|
|
?MAX_COMPACTION_RUN;
|
|
|
|
MRL0 ->
|
|
|
|
MRL0
|
|
|
|
end,
|
2018-07-23 12:46:42 +01:00
|
|
|
|
|
|
|
SFL_CompPerc =
|
|
|
|
case IClerkOpts#iclerk_options.singlefile_compactionperc of
|
|
|
|
undefined ->
|
|
|
|
?SINGLEFILE_COMPACTION_TARGET;
|
|
|
|
SFLCP when is_float(SFLCP) ->
|
|
|
|
SFLCP
|
|
|
|
end,
|
|
|
|
MRL_CompPerc =
|
|
|
|
case IClerkOpts#iclerk_options.maxrunlength_compactionperc of
|
|
|
|
undefined ->
|
|
|
|
?MAXRUNLENGTH_COMPACTION_TARGET;
|
|
|
|
MRLCP when is_float(MRLCP) ->
|
|
|
|
MRLCP
|
|
|
|
end,
|
|
|
|
|
2016-11-14 11:17:14 +00:00
|
|
|
{ok, #state{max_run_length = MRL,
|
2016-09-27 14:58:26 +01:00
|
|
|
inker = IClerkOpts#iclerk_options.inker,
|
2016-11-14 11:17:14 +00:00
|
|
|
cdb_options = CDBopts,
|
|
|
|
reload_strategy = ReloadStrategy,
|
|
|
|
waste_path = WP,
|
2017-11-06 15:54:58 +00:00
|
|
|
waste_retention_period = WRP,
|
2018-07-23 12:46:42 +01:00
|
|
|
singlefile_compactionperc = SFL_CompPerc,
|
|
|
|
maxrunlength_compactionperc = MRL_CompPerc,
|
2017-11-06 15:54:58 +00:00
|
|
|
compression_method =
|
|
|
|
IClerkOpts#iclerk_options.compression_method}}.
|
2016-09-20 16:13:36 +01:00
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
handle_call(_Msg, _From, State) ->
|
2016-10-03 23:34:28 +01:00
|
|
|
{reply, not_supported, State}.
|
2016-09-20 16:13:36 +01:00
|
|
|
|
2017-05-26 10:51:30 +01:00
|
|
|
handle_cast({compact, Checker, InitiateFun, CloseFun, FilterFun, Inker, _TO},
|
2016-09-28 18:26:52 +01:00
|
|
|
State) ->
|
2016-11-14 11:17:14 +00:00
|
|
|
% Empty the waste folder
|
|
|
|
clear_waste(State),
|
2016-09-27 14:58:26 +01:00
|
|
|
% Need to fetch manifest at start rather than have it be passed in
|
|
|
|
% Don't want to process a queued call waiting on an old manifest
|
2016-11-01 00:46:14 +00:00
|
|
|
[_Active|Manifest] = leveled_inker:ink_getmanifest(Inker),
|
2016-09-27 14:58:26 +01:00
|
|
|
MaxRunLength = State#state.max_run_length,
|
2016-10-05 18:28:31 +01:00
|
|
|
{FilterServer, MaxSQN} = InitiateFun(Checker),
|
2016-09-28 11:41:56 +01:00
|
|
|
CDBopts = State#state.cdb_options,
|
|
|
|
|
2016-10-05 18:28:31 +01:00
|
|
|
Candidates = scan_all_files(Manifest, FilterFun, FilterServer, MaxSQN),
|
2018-07-23 12:46:42 +01:00
|
|
|
ScoreParams =
|
|
|
|
{MaxRunLength,
|
|
|
|
State#state.maxrunlength_compactionperc,
|
|
|
|
State#state.singlefile_compactionperc},
|
|
|
|
BestRun0 = assess_candidates(Candidates, ScoreParams),
|
|
|
|
case score_run(BestRun0, ScoreParams) of
|
2016-10-30 22:06:44 +00:00
|
|
|
Score when Score > 0.0 ->
|
2016-10-27 00:57:19 +01:00
|
|
|
BestRun1 = sort_run(BestRun0),
|
2018-07-23 12:46:42 +01:00
|
|
|
print_compaction_run(BestRun1, ScoreParams),
|
2016-11-14 11:40:02 +00:00
|
|
|
ManifestSlice = compact_files(BestRun1,
|
|
|
|
CDBopts,
|
|
|
|
FilterFun,
|
|
|
|
FilterServer,
|
|
|
|
MaxSQN,
|
2017-11-06 21:16:46 +00:00
|
|
|
State#state.reload_strategy,
|
|
|
|
State#state.compression_method),
|
2016-09-27 14:58:26 +01:00
|
|
|
FilesToDelete = lists:map(fun(C) ->
|
|
|
|
{C#candidate.low_sqn,
|
|
|
|
C#candidate.filename,
|
2017-01-18 15:23:06 +00:00
|
|
|
C#candidate.journal,
|
|
|
|
undefined}
|
2016-09-27 14:58:26 +01:00
|
|
|
end,
|
2016-10-27 00:57:19 +01:00
|
|
|
BestRun1),
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC002", [length(FilesToDelete)]),
|
|
|
|
case is_process_alive(Inker) of
|
2016-10-03 23:34:28 +01:00
|
|
|
true ->
|
2016-11-03 16:05:43 +00:00
|
|
|
update_inker(Inker,
|
|
|
|
ManifestSlice,
|
2016-11-14 11:40:02 +00:00
|
|
|
FilesToDelete),
|
2017-05-26 10:51:30 +01:00
|
|
|
ok = CloseFun(FilterServer),
|
2016-11-14 19:34:11 +00:00
|
|
|
{noreply, State}
|
2016-10-03 23:34:28 +01:00
|
|
|
end;
|
2016-09-27 14:58:26 +01:00
|
|
|
Score ->
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC003", [Score]),
|
2016-10-03 23:34:28 +01:00
|
|
|
ok = leveled_inker:ink_compactioncomplete(Inker),
|
2017-05-26 10:51:30 +01:00
|
|
|
ok = CloseFun(FilterServer),
|
2016-09-27 14:58:26 +01:00
|
|
|
{noreply, State}
|
|
|
|
end;
|
2018-02-15 16:14:46 +00:00
|
|
|
handle_cast({trim, Inker, PersistedSQN}, State) ->
|
2018-02-16 14:16:28 +00:00
|
|
|
ManifestAsList = leveled_inker:ink_getmanifest(Inker),
|
2018-02-15 16:14:46 +00:00
|
|
|
FilesToDelete =
|
2018-02-16 14:16:28 +00:00
|
|
|
leveled_imanifest:find_persistedentries(PersistedSQN, ManifestAsList),
|
2018-02-15 16:14:46 +00:00
|
|
|
ok = update_inker(Inker, [], FilesToDelete),
|
|
|
|
{noreply, State};
|
2016-10-14 13:36:12 +01:00
|
|
|
handle_cast({hashtable_calc, HashTree, StartPos, CDBpid}, State) ->
|
|
|
|
{IndexList, HashTreeBin} = leveled_cdb:hashtable_calc(HashTree, StartPos),
|
|
|
|
ok = leveled_cdb:cdb_returnhashtable(CDBpid, IndexList, HashTreeBin),
|
|
|
|
{stop, normal, State};
|
2016-09-20 16:13:36 +01:00
|
|
|
handle_cast(stop, State) ->
|
|
|
|
{stop, normal, State}.
|
|
|
|
|
|
|
|
handle_info(_Info, State) ->
|
|
|
|
{noreply, State}.
|
|
|
|
|
2016-11-14 19:34:11 +00:00
|
|
|
terminate(normal, _State) ->
|
|
|
|
ok;
|
|
|
|
terminate(Reason, _State) ->
|
2016-11-18 21:35:45 +00:00
|
|
|
leveled_log:log("IC001", [Reason]).
|
2016-09-20 16:13:36 +01:00
|
|
|
|
|
|
|
code_change(_OldVsn, State, _Extra) ->
|
|
|
|
{ok, State}.
|
|
|
|
|
|
|
|
|
2017-03-30 15:46:37 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% External functions
|
|
|
|
%%%============================================================================
|
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
-spec schedule_compaction(list(integer()),
|
|
|
|
integer(),
|
|
|
|
{integer(), integer(), integer()}) -> integer().
|
|
|
|
%% @doc
|
2017-03-30 15:46:37 +01:00
|
|
|
%% Schedule the next compaction event for this store. Chooses a random
|
|
|
|
%% interval, and then a random start time within the first third
|
|
|
|
%% of the interval.
|
|
|
|
%%
|
|
|
|
%% The number of Compaction runs per day can be set. This doesn't guaranteee
|
|
|
|
%% those runs, but uses the assumption there will be n runs when scheduling
|
|
|
|
%% the next one
|
|
|
|
%%
|
|
|
|
%% Compaction Hours should be the list of hours during the day (based on local
|
|
|
|
%% time when compcation can be scheduled to run)
|
|
|
|
%% e.g. [0, 1, 2, 3, 4, 21, 22, 23]
|
2017-05-18 14:09:45 +01:00
|
|
|
%% Runs per day is the number of compaction runs per day that should be
|
2017-03-30 15:46:37 +01:00
|
|
|
%% scheduled - expected to be a small integer, probably 1
|
|
|
|
%%
|
|
|
|
%% Current TS should be the outcome of os:timestamp()
|
|
|
|
%%
|
|
|
|
schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) ->
|
2017-05-18 14:09:45 +01:00
|
|
|
% We chedule the next interval by acting as if we were scheduing all
|
|
|
|
% n intervals at random, but then only chose the next one. After each
|
|
|
|
% event is occurred the random process is repeated to determine the next
|
|
|
|
% event to schedule i.e. the unused schedule is discarded.
|
|
|
|
|
2017-03-30 15:46:37 +01:00
|
|
|
IntervalLength = 60 div ?INTERVALS_PER_HOUR,
|
|
|
|
TotalHours = length(CompactionHours),
|
|
|
|
|
|
|
|
LocalTime = calendar:now_to_local_time(CurrentTS),
|
|
|
|
{{NowY, NowMon, NowD},
|
|
|
|
{NowH, NowMin, _NowS}} = LocalTime,
|
|
|
|
CurrentInterval = {NowH, NowMin div IntervalLength + 1},
|
|
|
|
|
2017-05-18 14:09:45 +01:00
|
|
|
% Randomly select an hour and an interval for each of the runs expected
|
|
|
|
% today.
|
2017-03-30 15:46:37 +01:00
|
|
|
RandSelect =
|
|
|
|
fun(_X) ->
|
2017-07-31 20:20:39 +02:00
|
|
|
{lists:nth(leveled_rand:uniform(TotalHours), CompactionHours),
|
|
|
|
leveled_rand:uniform(?INTERVALS_PER_HOUR)}
|
2017-03-30 15:46:37 +01:00
|
|
|
end,
|
|
|
|
RandIntervals = lists:sort(lists:map(RandSelect,
|
|
|
|
lists:seq(1, RunsPerDay))),
|
|
|
|
|
2017-05-18 14:09:45 +01:00
|
|
|
% Pick the next interval from the list. The intervals before current time
|
|
|
|
% are considered as intervals tomorrow, so will only be next if there are
|
|
|
|
% no other today
|
2017-03-30 15:46:37 +01:00
|
|
|
CheckNotBefore = fun(A) -> A =< CurrentInterval end,
|
|
|
|
{TooEarly, MaybeOK} = lists:splitwith(CheckNotBefore, RandIntervals),
|
|
|
|
{NextDate, {NextH, NextI}} =
|
|
|
|
case MaybeOK of
|
|
|
|
[] ->
|
2017-05-18 14:09:45 +01:00
|
|
|
% Use first interval picked tomorrow if none of selected run times
|
2017-03-30 15:46:37 +01:00
|
|
|
% are today
|
|
|
|
Tmrw = calendar:date_to_gregorian_days(NowY, NowMon, NowD) + 1,
|
|
|
|
{calendar:gregorian_days_to_date(Tmrw),
|
|
|
|
lists:nth(1, TooEarly)};
|
|
|
|
_ ->
|
|
|
|
{{NowY, NowMon, NowD}, lists:nth(1, MaybeOK)}
|
|
|
|
end,
|
2017-05-18 14:09:45 +01:00
|
|
|
|
|
|
|
% Calculate the offset in seconds to this next interval
|
2017-03-30 15:46:37 +01:00
|
|
|
NextS0 = NextI * (IntervalLength * 60)
|
2017-07-31 20:20:39 +02:00
|
|
|
- leveled_rand:uniform(IntervalLength * 60),
|
2017-03-30 15:46:37 +01:00
|
|
|
NextM = NextS0 div 60,
|
|
|
|
NextS = NextS0 rem 60,
|
|
|
|
TimeDiff = calendar:time_difference(LocalTime,
|
|
|
|
{NextDate, {NextH, NextM, NextS}}),
|
|
|
|
{Days, {Hours, Mins, Secs}} = TimeDiff,
|
|
|
|
Days * 86400 + Hours * 3600 + Mins * 60 + Secs.
|
|
|
|
|
|
|
|
|
2016-09-20 16:13:36 +01:00
|
|
|
%%%============================================================================
|
|
|
|
%%% Internal functions
|
|
|
|
%%%============================================================================
|
|
|
|
|
|
|
|
|
2017-11-09 12:48:48 +00:00
|
|
|
%% @doc
|
|
|
|
%% Get a score for a single CDB file in the journal. This will pull out a bunch
|
|
|
|
%% of keys and sizes at random in an efficient way (by scanning the hashtable
|
2017-11-09 17:12:47 +00:00
|
|
|
%% then just picking the key and size information of disk).
|
2017-11-09 12:48:48 +00:00
|
|
|
%%
|
|
|
|
%% The score should represent a percentage which is the size of the file by
|
|
|
|
%% comparison to the original file if compaction was to be run. So if a file
|
|
|
|
%% can be reduced in size by 30% the score will be 70%.
|
|
|
|
%%
|
|
|
|
%% The score is based on a random sample - so will not be consistent between
|
|
|
|
%% calls.
|
2016-10-05 18:28:31 +01:00
|
|
|
check_single_file(CDB, FilterFun, FilterServer, MaxSQN, SampleSize, BatchSize) ->
|
|
|
|
FN = leveled_cdb:cdb_filename(CDB),
|
2016-09-21 18:31:42 +01:00
|
|
|
PositionList = leveled_cdb:cdb_getpositions(CDB, SampleSize),
|
|
|
|
KeySizeList = fetch_inbatches(PositionList, BatchSize, CDB, []),
|
2017-11-09 12:42:49 +00:00
|
|
|
Score =
|
|
|
|
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN),
|
|
|
|
leveled_log:log("IC004", [FN, Score]),
|
|
|
|
Score.
|
|
|
|
|
|
|
|
size_comparison_score(KeySizeList, FilterFun, FilterServer, MaxSQN) ->
|
2016-12-11 06:53:25 +00:00
|
|
|
FoldFunForSizeCompare =
|
|
|
|
fun(KS, {ActSize, RplSize}) ->
|
|
|
|
case KS of
|
|
|
|
{{SQN, _Type, PK}, Size} ->
|
|
|
|
Check = FilterFun(FilterServer, PK, SQN),
|
|
|
|
case {Check, SQN > MaxSQN} of
|
|
|
|
{true, _} ->
|
|
|
|
{ActSize + Size - ?CRC_SIZE, RplSize};
|
|
|
|
{false, true} ->
|
|
|
|
{ActSize + Size - ?CRC_SIZE, RplSize};
|
|
|
|
_ ->
|
|
|
|
{ActSize, RplSize + Size - ?CRC_SIZE}
|
|
|
|
end;
|
|
|
|
_ ->
|
2017-11-09 12:42:49 +00:00
|
|
|
% There is a key which is not in expected format
|
|
|
|
% Not that the key-size list has been filtered for
|
|
|
|
% errors by leveled_cdb - but this doesn't know the
|
|
|
|
% expected format of the key
|
2016-12-11 06:53:25 +00:00
|
|
|
{ActSize, RplSize}
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
|
|
|
|
R0 = lists:foldl(FoldFunForSizeCompare, {0, 0}, KeySizeList),
|
2016-09-26 10:55:08 +01:00
|
|
|
{ActiveSize, ReplacedSize} = R0,
|
2017-11-09 12:42:49 +00:00
|
|
|
case ActiveSize + ReplacedSize of
|
|
|
|
0 ->
|
|
|
|
100.0;
|
|
|
|
_ ->
|
|
|
|
100 * ActiveSize / (ActiveSize + ReplacedSize)
|
|
|
|
end.
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-10-05 18:28:31 +01:00
|
|
|
scan_all_files(Manifest, FilterFun, FilterServer, MaxSQN) ->
|
|
|
|
scan_all_files(Manifest, FilterFun, FilterServer, MaxSQN, []).
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-10-05 18:28:31 +01:00
|
|
|
scan_all_files([], _FilterFun, _FilterServer, _MaxSQN, CandidateList) ->
|
2016-09-26 10:55:08 +01:00
|
|
|
CandidateList;
|
2016-10-05 18:28:31 +01:00
|
|
|
scan_all_files([Entry|Tail], FilterFun, FilterServer, MaxSQN, CandidateList) ->
|
2017-01-18 15:23:06 +00:00
|
|
|
{LowSQN, FN, JournalP, _LK} = Entry,
|
2016-09-27 14:58:26 +01:00
|
|
|
CpctPerc = check_single_file(JournalP,
|
|
|
|
FilterFun,
|
|
|
|
FilterServer,
|
2016-10-05 18:28:31 +01:00
|
|
|
MaxSQN,
|
2016-09-27 14:58:26 +01:00
|
|
|
?SAMPLE_SIZE,
|
|
|
|
?BATCH_SIZE),
|
|
|
|
scan_all_files(Tail,
|
|
|
|
FilterFun,
|
|
|
|
FilterServer,
|
2016-10-05 18:28:31 +01:00
|
|
|
MaxSQN,
|
2016-09-27 14:58:26 +01:00
|
|
|
CandidateList ++
|
|
|
|
[#candidate{low_sqn = LowSQN,
|
|
|
|
filename = FN,
|
|
|
|
journal = JournalP,
|
|
|
|
compaction_perc = CpctPerc}]).
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-09-21 18:31:42 +01:00
|
|
|
fetch_inbatches([], _BatchSize, _CDB, CheckedList) ->
|
|
|
|
CheckedList;
|
|
|
|
fetch_inbatches(PositionList, BatchSize, CDB, CheckedList) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
{Batch, Tail} = if
|
|
|
|
length(PositionList) >= BatchSize ->
|
|
|
|
lists:split(BatchSize, PositionList);
|
|
|
|
true ->
|
|
|
|
{PositionList, []}
|
|
|
|
end,
|
|
|
|
KL_List = leveled_cdb:cdb_directfetch(CDB, Batch, key_size),
|
2016-09-21 18:31:42 +01:00
|
|
|
fetch_inbatches(Tail, BatchSize, CDB, CheckedList ++ KL_List).
|
|
|
|
|
2017-06-02 08:37:57 +01:00
|
|
|
|
|
|
|
assess_candidates(AllCandidates, Params) ->
|
|
|
|
NaiveBestRun = assess_candidates(AllCandidates, Params, [], []),
|
|
|
|
MaxRunLength = element(1, Params),
|
2016-09-27 14:58:26 +01:00
|
|
|
case length(AllCandidates) of
|
|
|
|
L when L > MaxRunLength, MaxRunLength > 1 ->
|
|
|
|
%% Assess with different offsets from the start
|
2017-06-02 08:37:57 +01:00
|
|
|
AssessFold =
|
|
|
|
fun(Counter, BestRun) ->
|
|
|
|
SubList = lists:nthtail(Counter, AllCandidates),
|
|
|
|
assess_candidates(SubList, Params, [], BestRun)
|
|
|
|
end,
|
|
|
|
|
|
|
|
lists:foldl(AssessFold,
|
|
|
|
NaiveBestRun,
|
|
|
|
lists:seq(1, MaxRunLength - 1));
|
2016-09-27 14:58:26 +01:00
|
|
|
_ ->
|
|
|
|
NaiveBestRun
|
|
|
|
end.
|
|
|
|
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates([], _Params, _CurrentRun0, BestAssessment) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
BestAssessment;
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates([HeadC|Tail], Params, CurrentRun0, BestAssessment) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
CurrentRun1 = choose_best_assessment(CurrentRun0 ++ [HeadC],
|
|
|
|
[HeadC],
|
2017-06-02 08:37:57 +01:00
|
|
|
Params),
|
2016-09-27 14:58:26 +01:00
|
|
|
assess_candidates(Tail,
|
2017-06-02 08:37:57 +01:00
|
|
|
Params,
|
2016-09-27 14:58:26 +01:00
|
|
|
CurrentRun1,
|
|
|
|
choose_best_assessment(CurrentRun1,
|
|
|
|
BestAssessment,
|
2017-06-02 08:37:57 +01:00
|
|
|
Params)).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
|
|
|
|
2017-06-02 08:37:57 +01:00
|
|
|
choose_best_assessment(RunToAssess, BestRun, Params) ->
|
|
|
|
{MaxRunLength, _MR_CT, _SF_CT} = Params,
|
2016-09-27 14:58:26 +01:00
|
|
|
case length(RunToAssess) of
|
|
|
|
LR1 when LR1 > MaxRunLength ->
|
|
|
|
BestRun;
|
|
|
|
_ ->
|
2017-06-02 08:37:57 +01:00
|
|
|
AssessScore = score_run(RunToAssess, Params),
|
|
|
|
BestScore = score_run(BestRun, Params),
|
2016-09-27 14:58:26 +01:00
|
|
|
if
|
|
|
|
AssessScore > BestScore ->
|
|
|
|
RunToAssess;
|
|
|
|
true ->
|
|
|
|
BestRun
|
|
|
|
end
|
|
|
|
end.
|
2017-06-02 08:37:57 +01:00
|
|
|
|
2018-07-23 12:46:42 +01:00
|
|
|
|
2017-06-02 08:37:57 +01:00
|
|
|
score_run([], _Params) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
0.0;
|
2017-06-02 08:37:57 +01:00
|
|
|
score_run(Run, {MaxRunLength, MR_CT, SF_CT}) ->
|
|
|
|
TargetIncr =
|
|
|
|
case MaxRunLength of
|
|
|
|
1 ->
|
|
|
|
0.0;
|
|
|
|
MaxRunSize ->
|
|
|
|
(MR_CT - SF_CT) / (MaxRunSize - 1)
|
|
|
|
end,
|
|
|
|
Target = SF_CT + TargetIncr * (length(Run) - 1),
|
2016-09-27 14:58:26 +01:00
|
|
|
RunTotal = lists:foldl(fun(Cand, Acc) ->
|
|
|
|
Acc + Cand#candidate.compaction_perc end,
|
|
|
|
0.0,
|
|
|
|
Run),
|
|
|
|
Target - RunTotal / length(Run).
|
|
|
|
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2018-07-23 12:46:42 +01:00
|
|
|
print_compaction_run(BestRun, ScoreParams) ->
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC005", [length(BestRun),
|
2018-07-23 12:46:42 +01:00
|
|
|
score_run(BestRun, ScoreParams)]),
|
2016-09-27 14:58:26 +01:00
|
|
|
lists:foreach(fun(File) ->
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC006", [File#candidate.filename])
|
2016-09-27 14:58:26 +01:00
|
|
|
end,
|
|
|
|
BestRun).
|
2016-09-26 10:55:08 +01:00
|
|
|
|
2016-10-27 00:57:19 +01:00
|
|
|
sort_run(RunOfFiles) ->
|
|
|
|
CompareFun = fun(Cand1, Cand2) ->
|
|
|
|
Cand1#candidate.low_sqn =< Cand2#candidate.low_sqn end,
|
|
|
|
lists:sort(CompareFun, RunOfFiles).
|
|
|
|
|
2016-11-14 11:40:02 +00:00
|
|
|
update_inker(Inker, ManifestSlice, FilesToDelete) ->
|
2016-11-03 16:05:43 +00:00
|
|
|
{ok, ManSQN} = leveled_inker:ink_updatemanifest(Inker,
|
|
|
|
ManifestSlice,
|
|
|
|
FilesToDelete),
|
|
|
|
ok = leveled_inker:ink_compactioncomplete(Inker),
|
|
|
|
leveled_log:log("IC007", []),
|
2017-01-18 15:23:06 +00:00
|
|
|
lists:foreach(fun({_SQN, _FN, J2D, _LK}) ->
|
2016-11-14 11:40:02 +00:00
|
|
|
leveled_cdb:cdb_deletepending(J2D,
|
|
|
|
ManSQN,
|
|
|
|
Inker)
|
|
|
|
end,
|
|
|
|
FilesToDelete),
|
2018-02-15 16:14:46 +00:00
|
|
|
ok.
|
2016-10-27 00:57:19 +01:00
|
|
|
|
2017-11-06 21:16:46 +00:00
|
|
|
compact_files(BestRun, CDBopts, FilterFun, FilterServer,
|
|
|
|
MaxSQN, RStrategy, PressMethod) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
BatchesOfPositions = get_all_positions(BestRun, []),
|
|
|
|
compact_files(BatchesOfPositions,
|
|
|
|
CDBopts,
|
|
|
|
null,
|
|
|
|
FilterFun,
|
|
|
|
FilterServer,
|
2016-10-05 18:28:31 +01:00
|
|
|
MaxSQN,
|
2016-10-25 23:13:14 +01:00
|
|
|
RStrategy,
|
2017-11-06 21:16:46 +00:00
|
|
|
PressMethod,
|
2016-11-14 11:40:02 +00:00
|
|
|
[]).
|
2016-09-20 16:13:36 +01:00
|
|
|
|
2016-09-28 18:26:52 +01:00
|
|
|
|
2016-10-05 18:28:31 +01:00
|
|
|
compact_files([], _CDBopts, null, _FilterFun, _FilterServer, _MaxSQN,
|
2017-11-06 21:16:46 +00:00
|
|
|
_RStrategy, _PressMethod, ManSlice0) ->
|
2016-11-14 11:40:02 +00:00
|
|
|
ManSlice0;
|
2016-10-05 18:28:31 +01:00
|
|
|
compact_files([], _CDBopts, ActiveJournal0, _FilterFun, _FilterServer, _MaxSQN,
|
2017-11-06 21:16:46 +00:00
|
|
|
_RStrategy, _PressMethod, ManSlice0) ->
|
2017-01-17 16:30:04 +00:00
|
|
|
ManSlice1 = ManSlice0 ++ leveled_imanifest:generate_entry(ActiveJournal0),
|
2016-11-14 11:40:02 +00:00
|
|
|
ManSlice1;
|
2016-10-05 18:28:31 +01:00
|
|
|
compact_files([Batch|T], CDBopts, ActiveJournal0,
|
|
|
|
FilterFun, FilterServer, MaxSQN,
|
2017-11-06 21:16:46 +00:00
|
|
|
RStrategy, PressMethod, ManSlice0) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
{SrcJournal, PositionList} = Batch,
|
|
|
|
KVCs0 = leveled_cdb:cdb_directfetch(SrcJournal,
|
|
|
|
PositionList,
|
|
|
|
key_value_check),
|
2016-11-14 11:40:02 +00:00
|
|
|
KVCs1 = filter_output(KVCs0,
|
|
|
|
FilterFun,
|
|
|
|
FilterServer,
|
|
|
|
MaxSQN,
|
|
|
|
RStrategy),
|
2016-09-27 14:58:26 +01:00
|
|
|
{ActiveJournal1, ManSlice1} = write_values(KVCs1,
|
|
|
|
CDBopts,
|
|
|
|
ActiveJournal0,
|
2017-11-06 21:16:46 +00:00
|
|
|
ManSlice0,
|
|
|
|
PressMethod),
|
2016-10-05 18:28:31 +01:00
|
|
|
compact_files(T, CDBopts, ActiveJournal1, FilterFun, FilterServer, MaxSQN,
|
2017-11-06 21:16:46 +00:00
|
|
|
RStrategy, PressMethod, ManSlice1).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
|
|
|
get_all_positions([], PositionBatches) ->
|
|
|
|
PositionBatches;
|
|
|
|
get_all_positions([HeadRef|RestOfBest], PositionBatches) ->
|
|
|
|
SrcJournal = HeadRef#candidate.journal,
|
|
|
|
Positions = leveled_cdb:cdb_getpositions(SrcJournal, all),
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC008", [HeadRef#candidate.filename, length(Positions)]),
|
2016-09-28 11:41:56 +01:00
|
|
|
Batches = split_positions_into_batches(lists:sort(Positions),
|
|
|
|
SrcJournal,
|
|
|
|
[]),
|
2016-09-27 14:58:26 +01:00
|
|
|
get_all_positions(RestOfBest, PositionBatches ++ Batches).
|
|
|
|
|
|
|
|
split_positions_into_batches([], _Journal, Batches) ->
|
|
|
|
Batches;
|
|
|
|
split_positions_into_batches(Positions, Journal, Batches) ->
|
2016-09-28 11:41:56 +01:00
|
|
|
{ThisBatch, Tail} = if
|
|
|
|
length(Positions) > ?BATCH_SIZE ->
|
|
|
|
lists:split(?BATCH_SIZE, Positions);
|
|
|
|
true ->
|
|
|
|
{Positions, []}
|
|
|
|
end,
|
2016-09-27 14:58:26 +01:00
|
|
|
split_positions_into_batches(Tail,
|
|
|
|
Journal,
|
|
|
|
Batches ++ [{Journal, ThisBatch}]).
|
|
|
|
|
|
|
|
|
2016-10-25 23:13:14 +01:00
|
|
|
filter_output(KVCs, FilterFun, FilterServer, MaxSQN, ReloadStrategy) ->
|
2016-11-14 11:40:02 +00:00
|
|
|
lists:foldl(fun(KVC0, Acc) ->
|
2016-10-25 23:13:14 +01:00
|
|
|
R = leveled_codec:compact_inkerkvc(KVC0, ReloadStrategy),
|
|
|
|
case R of
|
|
|
|
skip ->
|
2016-11-14 11:40:02 +00:00
|
|
|
Acc;
|
2016-10-25 23:13:14 +01:00
|
|
|
{TStrat, KVC1} ->
|
|
|
|
{K, _V, CrcCheck} = KVC0,
|
|
|
|
{SQN, LedgerKey} = leveled_codec:from_journalkey(K),
|
|
|
|
KeyValid = FilterFun(FilterServer, LedgerKey, SQN),
|
|
|
|
case {KeyValid, CrcCheck, SQN > MaxSQN, TStrat} of
|
|
|
|
{false, true, false, retain} ->
|
2016-11-14 11:40:02 +00:00
|
|
|
Acc ++ [KVC1];
|
2016-10-25 23:13:14 +01:00
|
|
|
{false, true, false, _} ->
|
2016-11-14 11:40:02 +00:00
|
|
|
Acc;
|
|
|
|
_ ->
|
|
|
|
Acc ++ [KVC0]
|
2016-10-25 23:13:14 +01:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end,
|
2016-11-14 11:40:02 +00:00
|
|
|
[],
|
2016-10-25 23:13:14 +01:00
|
|
|
KVCs).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
|
|
|
|
2017-11-06 21:16:46 +00:00
|
|
|
write_values([], _CDBopts, Journal0, ManSlice0, _PressMethod) ->
|
2016-09-28 11:41:56 +01:00
|
|
|
{Journal0, ManSlice0};
|
2017-11-06 21:16:46 +00:00
|
|
|
write_values(KVCList, CDBopts, Journal0, ManSlice0, PressMethod) ->
|
2016-10-26 11:39:27 +01:00
|
|
|
KVList = lists:map(fun({K, V, _C}) ->
|
2017-03-20 15:43:54 +00:00
|
|
|
% Compress the value as part of compaction
|
2017-11-06 21:16:46 +00:00
|
|
|
{K, leveled_codec:maybe_compress(V, PressMethod)}
|
2016-10-26 11:39:27 +01:00
|
|
|
end,
|
|
|
|
KVCList),
|
2016-09-28 11:41:56 +01:00
|
|
|
{ok, Journal1} = case Journal0 of
|
|
|
|
null ->
|
2016-10-26 11:39:27 +01:00
|
|
|
{TK, _TV} = lists:nth(1, KVList),
|
|
|
|
{SQN, _LK} = leveled_codec:from_journalkey(TK),
|
2016-09-28 11:41:56 +01:00
|
|
|
FP = CDBopts#cdb_options.file_path,
|
|
|
|
FN = leveled_inker:filepath(FP,
|
|
|
|
SQN,
|
|
|
|
compact_journal),
|
2016-11-03 16:05:43 +00:00
|
|
|
leveled_log:log("IC009", [FN]),
|
2016-09-28 11:41:56 +01:00
|
|
|
leveled_cdb:cdb_open_writer(FN,
|
|
|
|
CDBopts);
|
|
|
|
_ ->
|
|
|
|
{ok, Journal0}
|
|
|
|
end,
|
2016-10-26 11:39:27 +01:00
|
|
|
R = leveled_cdb:cdb_mput(Journal1, KVList),
|
2016-09-27 14:58:26 +01:00
|
|
|
case R of
|
|
|
|
ok ->
|
2016-10-26 11:39:27 +01:00
|
|
|
{Journal1, ManSlice0};
|
2016-09-27 14:58:26 +01:00
|
|
|
roll ->
|
2017-01-17 16:30:04 +00:00
|
|
|
ManSlice1 = ManSlice0 ++ leveled_imanifest:generate_entry(Journal1),
|
2017-11-06 21:16:46 +00:00
|
|
|
write_values(KVCList, CDBopts, null, ManSlice1, PressMethod)
|
2016-09-27 14:58:26 +01:00
|
|
|
end.
|
|
|
|
|
2016-11-14 11:17:14 +00:00
|
|
|
clear_waste(State) ->
|
2017-11-08 12:58:09 +00:00
|
|
|
case State#state.waste_path of
|
|
|
|
undefined ->
|
|
|
|
ok;
|
|
|
|
WP ->
|
|
|
|
WRP = State#state.waste_retention_period,
|
|
|
|
{ok, ClearedJournals} = file:list_dir(WP),
|
|
|
|
N = calendar:datetime_to_gregorian_seconds(calendar:local_time()),
|
|
|
|
DeleteJournalFun =
|
|
|
|
fun(DelJ) ->
|
|
|
|
LMD = filelib:last_modified(WP ++ DelJ),
|
|
|
|
case N - calendar:datetime_to_gregorian_seconds(LMD) of
|
|
|
|
LMD_Delta when LMD_Delta >= WRP ->
|
|
|
|
ok = file:delete(WP ++ DelJ),
|
|
|
|
leveled_log:log("IC010", [WP ++ DelJ]);
|
|
|
|
LMD_Delta ->
|
|
|
|
leveled_log:log("IC011", [WP ++ DelJ, LMD_Delta]),
|
|
|
|
ok
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
lists:foreach(DeleteJournalFun, ClearedJournals)
|
|
|
|
end.
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2016-09-20 16:13:36 +01:00
|
|
|
|
|
|
|
%%%============================================================================
|
|
|
|
%%% Test
|
|
|
|
%%%============================================================================
|
2016-09-27 14:58:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
-ifdef(TEST).
|
|
|
|
|
2017-03-30 15:46:37 +01:00
|
|
|
schedule_test() ->
|
|
|
|
schedule_test_bycount(1),
|
|
|
|
schedule_test_bycount(2),
|
|
|
|
schedule_test_bycount(4).
|
|
|
|
|
|
|
|
schedule_test_bycount(N) ->
|
2018-07-24 12:09:59 +01:00
|
|
|
CurrentTS = {1490,884020,0}, % Actually 30th March 2017 15:27
|
2017-03-30 15:46:37 +01:00
|
|
|
SecondsToCompaction0 = schedule_compaction([16], N, CurrentTS),
|
|
|
|
io:format("Seconds to compaction ~w~n", [SecondsToCompaction0]),
|
|
|
|
?assertMatch(true, SecondsToCompaction0 > 1800),
|
|
|
|
?assertMatch(true, SecondsToCompaction0 < 5700),
|
2017-05-18 14:09:45 +01:00
|
|
|
SecondsToCompaction1 = schedule_compaction([14], N, CurrentTS), % tomorrow!
|
2018-07-24 12:09:59 +01:00
|
|
|
io:format("Seconds to compaction ~w for count ~w~n",
|
|
|
|
[SecondsToCompaction1, N]),
|
2017-10-24 14:32:04 +01:00
|
|
|
?assertMatch(true, SecondsToCompaction1 >= 81180),
|
|
|
|
?assertMatch(true, SecondsToCompaction1 =< 84780).
|
2017-03-30 15:46:37 +01:00
|
|
|
|
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
simple_score_test() ->
|
|
|
|
Run1 = [#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 76.0},
|
|
|
|
#candidate{compaction_perc = 70.0}],
|
2018-07-23 12:46:42 +01:00
|
|
|
?assertMatch(-4.0, score_run(Run1, {4, 70.0, 40.0})),
|
2016-09-27 14:58:26 +01:00
|
|
|
Run2 = [#candidate{compaction_perc = 75.0}],
|
2018-07-23 12:46:42 +01:00
|
|
|
?assertMatch(-35.0, score_run(Run2, {4, 70.0, 40.0})),
|
|
|
|
?assertMatch(0.0, score_run([], {4, 40.0, 70.0})),
|
2016-10-05 18:28:31 +01:00
|
|
|
Run3 = [#candidate{compaction_perc = 100.0}],
|
2018-07-23 12:46:42 +01:00
|
|
|
?assertMatch(-60.0, score_run(Run3, {4, 70.0, 40.0})).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
|
|
|
score_compare_test() ->
|
2017-03-30 12:15:36 +01:00
|
|
|
Run1 = [#candidate{compaction_perc = 55.0},
|
|
|
|
#candidate{compaction_perc = 55.0},
|
|
|
|
#candidate{compaction_perc = 56.0},
|
|
|
|
#candidate{compaction_perc = 50.0}],
|
2018-07-23 12:46:42 +01:00
|
|
|
?assertMatch(16.0, score_run(Run1, {4, 70.0, 40.0})),
|
2017-03-30 12:15:36 +01:00
|
|
|
Run2 = [#candidate{compaction_perc = 55.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
?assertMatch(Run1,
|
|
|
|
choose_best_assessment(Run1,
|
|
|
|
Run2,
|
|
|
|
{4, 60.0, 40.0})),
|
|
|
|
?assertMatch(Run2,
|
|
|
|
choose_best_assessment(Run1 ++ Run2,
|
|
|
|
Run2,
|
|
|
|
{4, 60.0, 40.0})).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2016-11-14 11:17:14 +00:00
|
|
|
file_gc_test() ->
|
|
|
|
State = #state{waste_path="test/waste/",
|
|
|
|
waste_retention_period=1},
|
|
|
|
ok = filelib:ensure_dir(State#state.waste_path),
|
|
|
|
file:write_file(State#state.waste_path ++ "1.cdb", term_to_binary("Hello")),
|
|
|
|
timer:sleep(1100),
|
|
|
|
file:write_file(State#state.waste_path ++ "2.cdb", term_to_binary("Hello")),
|
|
|
|
clear_waste(State),
|
|
|
|
{ok, ClearedJournals} = file:list_dir(State#state.waste_path),
|
|
|
|
?assertMatch(["2.cdb"], ClearedJournals),
|
|
|
|
timer:sleep(1100),
|
|
|
|
clear_waste(State),
|
|
|
|
{ok, ClearedJournals2} = file:list_dir(State#state.waste_path),
|
|
|
|
?assertMatch([], ClearedJournals2).
|
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
find_bestrun_test() ->
|
|
|
|
%% Tests dependent on these defaults
|
|
|
|
%% -define(MAX_COMPACTION_RUN, 4).
|
2017-03-30 12:15:36 +01:00
|
|
|
%% -define(SINGLEFILE_COMPACTION_TARGET, 40.0).
|
2018-07-23 12:46:42 +01:00
|
|
|
%% -define(MAXRUNLENGTH_COMPACTION_TARGET, 60.0).
|
2016-09-27 14:58:26 +01:00
|
|
|
%% Tested first with blocks significant as no back-tracking
|
2017-06-02 08:37:57 +01:00
|
|
|
Params = {4, 60.0, 40.0},
|
2017-03-30 12:15:36 +01:00
|
|
|
Block1 = [#candidate{compaction_perc = 55.0},
|
|
|
|
#candidate{compaction_perc = 65.0},
|
|
|
|
#candidate{compaction_perc = 42.0},
|
|
|
|
#candidate{compaction_perc = 50.0}],
|
|
|
|
Block2 = [#candidate{compaction_perc = 38.0},
|
|
|
|
#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 45.0}],
|
|
|
|
Block3 = [#candidate{compaction_perc = 70.0},
|
2016-09-27 14:58:26 +01:00
|
|
|
#candidate{compaction_perc = 100.0},
|
|
|
|
#candidate{compaction_perc = 100.0},
|
|
|
|
#candidate{compaction_perc = 100.0}],
|
2017-03-30 12:15:36 +01:00
|
|
|
Block4 = [#candidate{compaction_perc = 55.0},
|
|
|
|
#candidate{compaction_perc = 56.0},
|
2017-06-02 08:37:57 +01:00
|
|
|
#candidate{compaction_perc = 57.0},
|
2017-03-30 12:15:36 +01:00
|
|
|
#candidate{compaction_perc = 40.0}],
|
|
|
|
Block5 = [#candidate{compaction_perc = 60.0},
|
2016-09-27 14:58:26 +01:00
|
|
|
#candidate{compaction_perc = 60.0}],
|
|
|
|
CList0 = Block1 ++ Block2 ++ Block3 ++ Block4 ++ Block5,
|
2017-06-02 08:37:57 +01:00
|
|
|
?assertMatch(Block4, assess_candidates(CList0, Params, [], [])),
|
2016-09-27 14:58:26 +01:00
|
|
|
CList1 = CList0 ++ [#candidate{compaction_perc = 20.0}],
|
|
|
|
?assertMatch([#candidate{compaction_perc = 20.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates(CList1, Params, [], [])),
|
2016-09-27 14:58:26 +01:00
|
|
|
CList2 = Block4 ++ Block3 ++ Block2 ++ Block1 ++ Block5,
|
2017-06-02 08:37:57 +01:00
|
|
|
?assertMatch(Block4, assess_candidates(CList2, Params, [], [])),
|
2016-09-27 14:58:26 +01:00
|
|
|
CList3 = Block5 ++ Block1 ++ Block2 ++ Block3 ++ Block4,
|
2017-03-30 12:15:36 +01:00
|
|
|
?assertMatch([#candidate{compaction_perc = 42.0},
|
|
|
|
#candidate{compaction_perc = 50.0},
|
|
|
|
#candidate{compaction_perc = 38.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates(CList3, Params)),
|
2016-09-27 14:58:26 +01:00
|
|
|
%% Now do some back-tracking to get a genuinely optimal solution without
|
|
|
|
%% needing to re-order
|
2017-03-30 12:15:36 +01:00
|
|
|
?assertMatch([#candidate{compaction_perc = 42.0},
|
|
|
|
#candidate{compaction_perc = 50.0},
|
|
|
|
#candidate{compaction_perc = 38.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates(CList0, Params)),
|
2017-03-30 12:15:36 +01:00
|
|
|
?assertMatch([#candidate{compaction_perc = 42.0},
|
|
|
|
#candidate{compaction_perc = 50.0},
|
|
|
|
#candidate{compaction_perc = 38.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates(CList0, setelement(1, Params, 5))),
|
2017-03-30 12:15:36 +01:00
|
|
|
?assertMatch([#candidate{compaction_perc = 42.0},
|
|
|
|
#candidate{compaction_perc = 50.0},
|
|
|
|
#candidate{compaction_perc = 38.0},
|
|
|
|
#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 75.0},
|
|
|
|
#candidate{compaction_perc = 45.0}],
|
2017-06-02 08:37:57 +01:00
|
|
|
assess_candidates(CList0, setelement(1, Params, 6))).
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2016-10-25 23:13:14 +01:00
|
|
|
test_ledgerkey(Key) ->
|
|
|
|
{o, "Bucket", Key, null}.
|
|
|
|
|
|
|
|
test_inkerkv(SQN, Key, V, IdxSpecs) ->
|
2017-11-06 18:44:08 +00:00
|
|
|
leveled_codec:to_inkerkv(test_ledgerkey(Key), SQN, V, IdxSpecs,
|
|
|
|
native, false).
|
2016-10-25 23:13:14 +01:00
|
|
|
|
2016-09-28 11:41:56 +01:00
|
|
|
fetch_testcdb(RP) ->
|
2016-09-27 14:58:26 +01:00
|
|
|
FN1 = leveled_inker:filepath(RP, 1, new_journal),
|
2017-03-20 15:43:54 +00:00
|
|
|
{ok,
|
|
|
|
CDB1} = leveled_cdb:cdb_open_writer(FN1,
|
|
|
|
#cdb_options{binary_mode=true}),
|
2018-05-04 11:19:37 +01:00
|
|
|
{K1, V1} = test_inkerkv(1, "Key1", "Value1", {[], infinity}),
|
|
|
|
{K2, V2} = test_inkerkv(2, "Key2", "Value2", {[], infinity}),
|
|
|
|
{K3, V3} = test_inkerkv(3, "Key3", "Value3", {[], infinity}),
|
|
|
|
{K4, V4} = test_inkerkv(4, "Key1", "Value4", {[], infinity}),
|
|
|
|
{K5, V5} = test_inkerkv(5, "Key1", "Value5", {[], infinity}),
|
|
|
|
{K6, V6} = test_inkerkv(6, "Key1", "Value6", {[], infinity}),
|
|
|
|
{K7, V7} = test_inkerkv(7, "Key1", "Value7", {[], infinity}),
|
|
|
|
{K8, V8} = test_inkerkv(8, "Key1", "Value8", {[], infinity}),
|
2016-09-27 14:58:26 +01:00
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K1, V1),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K2, V2),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K3, V3),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K4, V4),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K5, V5),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K6, V6),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K7, V7),
|
|
|
|
ok = leveled_cdb:cdb_put(CDB1, K8, V8),
|
|
|
|
{ok, FN2} = leveled_cdb:cdb_complete(CDB1),
|
2017-03-20 15:43:54 +00:00
|
|
|
leveled_cdb:cdb_open_reader(FN2, #cdb_options{binary_mode=true}).
|
2016-09-28 11:41:56 +01:00
|
|
|
|
|
|
|
check_single_file_test() ->
|
|
|
|
RP = "../test/journal",
|
|
|
|
{ok, CDB} = fetch_testcdb(RP),
|
2016-10-25 23:13:14 +01:00
|
|
|
LedgerSrv1 = [{8, {o, "Bucket", "Key1", null}},
|
|
|
|
{2, {o, "Bucket", "Key2", null}},
|
|
|
|
{3, {o, "Bucket", "Key3", null}}],
|
2016-09-27 14:58:26 +01:00
|
|
|
LedgerFun1 = fun(Srv, Key, ObjSQN) ->
|
|
|
|
case lists:keyfind(ObjSQN, 1, Srv) of
|
|
|
|
{ObjSQN, Key} ->
|
|
|
|
true;
|
|
|
|
_ ->
|
|
|
|
false
|
|
|
|
end end,
|
2016-10-05 18:28:31 +01:00
|
|
|
Score1 = check_single_file(CDB, LedgerFun1, LedgerSrv1, 9, 8, 4),
|
2016-09-27 14:58:26 +01:00
|
|
|
?assertMatch(37.5, Score1),
|
|
|
|
LedgerFun2 = fun(_Srv, _Key, _ObjSQN) -> true end,
|
2016-10-05 18:28:31 +01:00
|
|
|
Score2 = check_single_file(CDB, LedgerFun2, LedgerSrv1, 9, 8, 4),
|
2016-09-27 14:58:26 +01:00
|
|
|
?assertMatch(100.0, Score2),
|
2016-10-05 18:28:31 +01:00
|
|
|
Score3 = check_single_file(CDB, LedgerFun1, LedgerSrv1, 9, 8, 3),
|
2016-09-27 14:58:26 +01:00
|
|
|
?assertMatch(37.5, Score3),
|
2016-10-05 18:28:31 +01:00
|
|
|
Score4 = check_single_file(CDB, LedgerFun1, LedgerSrv1, 4, 8, 4),
|
|
|
|
?assertMatch(75.0, Score4),
|
2016-10-26 20:39:16 +01:00
|
|
|
ok = leveled_cdb:cdb_deletepending(CDB),
|
2016-09-28 11:41:56 +01:00
|
|
|
ok = leveled_cdb:cdb_destroy(CDB).
|
2016-10-05 18:28:31 +01:00
|
|
|
|
|
|
|
|
2016-10-25 23:13:14 +01:00
|
|
|
compact_single_file_setup() ->
|
2016-09-28 11:41:56 +01:00
|
|
|
RP = "../test/journal",
|
|
|
|
{ok, CDB} = fetch_testcdb(RP),
|
|
|
|
Candidate = #candidate{journal = CDB,
|
|
|
|
low_sqn = 1,
|
|
|
|
filename = "test",
|
|
|
|
compaction_perc = 37.5},
|
2016-10-25 23:13:14 +01:00
|
|
|
LedgerSrv1 = [{8, {o, "Bucket", "Key1", null}},
|
|
|
|
{2, {o, "Bucket", "Key2", null}},
|
|
|
|
{3, {o, "Bucket", "Key3", null}}],
|
2016-09-28 11:41:56 +01:00
|
|
|
LedgerFun1 = fun(Srv, Key, ObjSQN) ->
|
|
|
|
case lists:keyfind(ObjSQN, 1, Srv) of
|
|
|
|
{ObjSQN, Key} ->
|
|
|
|
true;
|
|
|
|
_ ->
|
|
|
|
false
|
|
|
|
end end,
|
|
|
|
CompactFP = leveled_inker:filepath(RP, journal_compact_dir),
|
|
|
|
ok = filelib:ensure_dir(CompactFP),
|
2016-10-25 23:13:14 +01:00
|
|
|
{Candidate, LedgerSrv1, LedgerFun1, CompactFP, CDB}.
|
|
|
|
|
|
|
|
compact_single_file_recovr_test() ->
|
|
|
|
{Candidate,
|
|
|
|
LedgerSrv1,
|
|
|
|
LedgerFun1,
|
|
|
|
CompactFP,
|
|
|
|
CDB} = compact_single_file_setup(),
|
2017-01-17 16:30:04 +00:00
|
|
|
[{LowSQN, FN, PidR, _LastKey}] =
|
|
|
|
compact_files([Candidate],
|
2017-03-20 15:43:54 +00:00
|
|
|
#cdb_options{file_path=CompactFP, binary_mode=true},
|
2017-01-17 16:30:04 +00:00
|
|
|
LedgerFun1,
|
|
|
|
LedgerSrv1,
|
|
|
|
9,
|
2017-11-06 21:16:46 +00:00
|
|
|
[{?STD_TAG, recovr}],
|
|
|
|
native),
|
2016-09-28 11:41:56 +01:00
|
|
|
io:format("FN of ~s~n", [FN]),
|
|
|
|
?assertMatch(2, LowSQN),
|
2016-10-25 23:13:14 +01:00
|
|
|
?assertMatch(probably,
|
|
|
|
leveled_cdb:cdb_keycheck(PidR,
|
|
|
|
{8,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
|
|
|
?assertMatch(missing, leveled_cdb:cdb_get(PidR,
|
|
|
|
{7,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
|
|
|
?assertMatch(missing, leveled_cdb:cdb_get(PidR,
|
|
|
|
{1,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
2017-03-20 15:43:54 +00:00
|
|
|
RKV1 = leveled_cdb:cdb_get(PidR,
|
|
|
|
{2,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key2")}),
|
2018-05-04 11:19:37 +01:00
|
|
|
?assertMatch({{_, _}, {"Value2", {[], infinity}}},
|
|
|
|
leveled_codec:from_inkerkv(RKV1)),
|
2016-10-26 20:39:16 +01:00
|
|
|
ok = leveled_cdb:cdb_deletepending(CDB),
|
2016-09-28 18:26:52 +01:00
|
|
|
ok = leveled_cdb:cdb_destroy(CDB).
|
2016-09-28 11:41:56 +01:00
|
|
|
|
2016-09-27 14:58:26 +01:00
|
|
|
|
2016-10-25 23:13:14 +01:00
|
|
|
compact_single_file_retain_test() ->
|
|
|
|
{Candidate,
|
|
|
|
LedgerSrv1,
|
|
|
|
LedgerFun1,
|
|
|
|
CompactFP,
|
|
|
|
CDB} = compact_single_file_setup(),
|
2017-01-17 16:30:04 +00:00
|
|
|
[{LowSQN, FN, PidR, _LK}] =
|
|
|
|
compact_files([Candidate],
|
2017-03-20 15:43:54 +00:00
|
|
|
#cdb_options{file_path=CompactFP, binary_mode=true},
|
2017-01-17 16:30:04 +00:00
|
|
|
LedgerFun1,
|
|
|
|
LedgerSrv1,
|
|
|
|
9,
|
2017-11-06 21:16:46 +00:00
|
|
|
[{?STD_TAG, retain}],
|
|
|
|
native),
|
2016-10-25 23:13:14 +01:00
|
|
|
io:format("FN of ~s~n", [FN]),
|
|
|
|
?assertMatch(1, LowSQN),
|
|
|
|
?assertMatch(probably,
|
|
|
|
leveled_cdb:cdb_keycheck(PidR,
|
|
|
|
{8,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
|
|
|
?assertMatch(missing, leveled_cdb:cdb_get(PidR,
|
|
|
|
{7,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
|
|
|
?assertMatch(missing, leveled_cdb:cdb_get(PidR,
|
|
|
|
{1,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key1")})),
|
2017-03-20 15:43:54 +00:00
|
|
|
RKV1 = leveled_cdb:cdb_get(PidR,
|
2016-10-25 23:13:14 +01:00
|
|
|
{2,
|
|
|
|
stnd,
|
|
|
|
test_ledgerkey("Key2")}),
|
2018-05-04 11:19:37 +01:00
|
|
|
?assertMatch({{_, _}, {"Value2", {[], infinity}}},
|
|
|
|
leveled_codec:from_inkerkv(RKV1)),
|
2016-10-26 20:39:16 +01:00
|
|
|
ok = leveled_cdb:cdb_deletepending(CDB),
|
2016-10-25 23:13:14 +01:00
|
|
|
ok = leveled_cdb:cdb_destroy(CDB).
|
|
|
|
|
2016-10-08 22:15:48 +01:00
|
|
|
compact_empty_file_test() ->
|
|
|
|
RP = "../test/journal",
|
|
|
|
FN1 = leveled_inker:filepath(RP, 1, new_journal),
|
|
|
|
CDBopts = #cdb_options{binary_mode=true},
|
|
|
|
{ok, CDB1} = leveled_cdb:cdb_open_writer(FN1, CDBopts),
|
2016-10-25 23:13:14 +01:00
|
|
|
ok = leveled_cdb:cdb_put(CDB1, {1, stnd, test_ledgerkey("Key1")}, <<>>),
|
2016-10-08 22:15:48 +01:00
|
|
|
{ok, FN2} = leveled_cdb:cdb_complete(CDB1),
|
|
|
|
{ok, CDB2} = leveled_cdb:cdb_open_reader(FN2),
|
2016-10-25 23:13:14 +01:00
|
|
|
LedgerSrv1 = [{8, {o, "Bucket", "Key1", null}},
|
|
|
|
{2, {o, "Bucket", "Key2", null}},
|
|
|
|
{3, {o, "Bucket", "Key3", null}}],
|
2016-11-04 19:33:11 +00:00
|
|
|
LedgerFun1 = fun(_Srv, _Key, _ObjSQN) -> false end,
|
2016-10-08 22:15:48 +01:00
|
|
|
Score1 = check_single_file(CDB2, LedgerFun1, LedgerSrv1, 9, 8, 4),
|
2017-11-08 11:20:22 +00:00
|
|
|
?assertMatch(100.0, Score1),
|
|
|
|
ok = leveled_cdb:cdb_deletepending(CDB2),
|
|
|
|
ok = leveled_cdb:cdb_destroy(CDB2).
|
2016-10-08 22:15:48 +01:00
|
|
|
|
2016-10-27 00:57:19 +01:00
|
|
|
compare_candidate_test() ->
|
|
|
|
Candidate1 = #candidate{low_sqn=1},
|
|
|
|
Candidate2 = #candidate{low_sqn=2},
|
|
|
|
Candidate3 = #candidate{low_sqn=3},
|
|
|
|
Candidate4 = #candidate{low_sqn=4},
|
|
|
|
?assertMatch([Candidate1, Candidate2, Candidate3, Candidate4],
|
|
|
|
sort_run([Candidate3, Candidate2, Candidate4, Candidate1])).
|
|
|
|
|
2017-09-15 15:10:04 +01:00
|
|
|
compact_singlefile_totwosmallfiles_test_() ->
|
|
|
|
{timeout, 60, fun compact_singlefile_totwosmallfiles_testto/0}.
|
|
|
|
|
|
|
|
compact_singlefile_totwosmallfiles_testto() ->
|
2016-11-04 12:22:15 +00:00
|
|
|
RP = "../test/journal",
|
|
|
|
CP = "../test/journal/journal_file/post_compact/",
|
|
|
|
ok = filelib:ensure_dir(CP),
|
|
|
|
FN1 = leveled_inker:filepath(RP, 1, new_journal),
|
|
|
|
CDBoptsLarge = #cdb_options{binary_mode=true, max_size=30000000},
|
|
|
|
{ok, CDB1} = leveled_cdb:cdb_open_writer(FN1, CDBoptsLarge),
|
|
|
|
lists:foreach(fun(X) ->
|
|
|
|
LK = test_ledgerkey("Key" ++ integer_to_list(X)),
|
2017-07-31 20:20:39 +02:00
|
|
|
Value = leveled_rand:rand_bytes(1024),
|
2017-11-06 15:54:58 +00:00
|
|
|
{IK, IV} =
|
2018-05-04 11:19:37 +01:00
|
|
|
leveled_codec:to_inkerkv(LK, X, Value,
|
|
|
|
{[], infinity},
|
2017-11-06 18:44:08 +00:00
|
|
|
native, true),
|
2017-03-20 15:43:54 +00:00
|
|
|
ok = leveled_cdb:cdb_put(CDB1, IK, IV)
|
2016-11-04 12:22:15 +00:00
|
|
|
end,
|
|
|
|
lists:seq(1, 1000)),
|
|
|
|
{ok, NewName} = leveled_cdb:cdb_complete(CDB1),
|
|
|
|
{ok, CDBr} = leveled_cdb:cdb_open_reader(NewName),
|
2017-11-06 15:54:58 +00:00
|
|
|
CDBoptsSmall =
|
|
|
|
#cdb_options{binary_mode=true, max_size=400000, file_path=CP},
|
2016-11-04 12:22:15 +00:00
|
|
|
BestRun1 = [#candidate{low_sqn=1,
|
|
|
|
filename=leveled_cdb:cdb_filename(CDBr),
|
|
|
|
journal=CDBr,
|
|
|
|
compaction_perc=50.0}],
|
|
|
|
FakeFilterFun = fun(_FS, _LK, SQN) -> SQN rem 2 == 0 end,
|
|
|
|
|
2016-11-14 11:40:02 +00:00
|
|
|
ManifestSlice = compact_files(BestRun1,
|
|
|
|
CDBoptsSmall,
|
|
|
|
FakeFilterFun,
|
|
|
|
null,
|
|
|
|
900,
|
2017-11-06 21:16:46 +00:00
|
|
|
[{?STD_TAG, recovr}],
|
|
|
|
native),
|
2016-11-04 12:22:15 +00:00
|
|
|
?assertMatch(2, length(ManifestSlice)),
|
2017-01-17 16:30:04 +00:00
|
|
|
lists:foreach(fun({_SQN, _FN, CDB, _LK}) ->
|
2016-11-04 12:22:15 +00:00
|
|
|
ok = leveled_cdb:cdb_deletepending(CDB),
|
|
|
|
ok = leveled_cdb:cdb_destroy(CDB)
|
|
|
|
end,
|
|
|
|
ManifestSlice),
|
|
|
|
ok = leveled_cdb:cdb_deletepending(CDBr),
|
|
|
|
ok = leveled_cdb:cdb_destroy(CDBr).
|
2016-11-14 20:43:38 +00:00
|
|
|
|
2017-11-09 12:42:49 +00:00
|
|
|
size_score_test() ->
|
|
|
|
KeySizeList =
|
|
|
|
[{{1, "INK", "Key1"}, 104},
|
|
|
|
{{2, "INK", "Key2"}, 124},
|
|
|
|
{{3, "INK", "Key3"}, 144},
|
|
|
|
{{4, "INK", "Key4"}, 154},
|
|
|
|
{{5, "INK", "Key5", "Subk1"}, 164},
|
|
|
|
{{6, "INK", "Key6"}, 174},
|
|
|
|
{{7, "INK", "Key7"}, 184}],
|
|
|
|
MaxSQN = 6,
|
|
|
|
CurrentList = ["Key1", "Key4", "Key5", "Key6"],
|
|
|
|
FilterFun = fun(L, K, _SQN) -> lists:member(K, L) end,
|
|
|
|
Score = size_comparison_score(KeySizeList, FilterFun, CurrentList, MaxSQN),
|
|
|
|
?assertMatch(true, Score > 69.0),
|
|
|
|
?assertMatch(true, Score < 70.0).
|
|
|
|
|
2016-11-14 20:43:38 +00:00
|
|
|
coverage_cheat_test() ->
|
|
|
|
{noreply, _State0} = handle_info(timeout, #state{}),
|
2016-11-14 20:56:59 +00:00
|
|
|
{ok, _State1} = code_change(null, #state{}, null),
|
2016-11-18 21:35:45 +00:00
|
|
|
{reply, not_supported, _State2} = handle_call(null, null, #state{}),
|
|
|
|
terminate(error, #state{}).
|
2016-11-04 12:22:15 +00:00
|
|
|
|
2017-07-31 19:53:01 +02:00
|
|
|
-endif.
|