Merge pull request #276 from martinsumner/mas-i275-logging

Mas i275 logging
This commit is contained in:
Martin Sumner 2019-05-11 16:00:33 +01:00 committed by GitHub
commit 984a2aba5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 59 additions and 44 deletions

View file

@ -1,7 +1,7 @@
{application, leveled, {application, leveled,
[ [
{description, "Key Value store based on LSM-Tree and designed for larger values"}, {description, "Key Value store based on LSM-Tree and designed for larger values"},
{vsn, "0.9.13"}, {vsn, "0.9.14"},
{registered, []}, {registered, []},
{applications, [ {applications, [
kernel, kernel,

View file

@ -109,13 +109,13 @@
-define(CACHE_SIZE_JITTER, 25). -define(CACHE_SIZE_JITTER, 25).
-define(JOURNAL_SIZE_JITTER, 20). -define(JOURNAL_SIZE_JITTER, 20).
-define(ABSOLUTEMAX_JOURNALSIZE, 4000000000). -define(ABSOLUTEMAX_JOURNALSIZE, 4000000000).
-define(LONG_RUNNING, 200000). -define(LONG_RUNNING, 1000000).
% An individual task taking > 200ms gets a specific log % An individual task taking > 1s gets a specific log
-define(COMPRESSION_METHOD, lz4). -define(COMPRESSION_METHOD, lz4).
-define(COMPRESSION_POINT, on_receipt). -define(COMPRESSION_POINT, on_receipt).
-define(LOG_LEVEL, info). -define(LOG_LEVEL, info).
-define(TIMING_SAMPLESIZE, 100). -define(TIMING_SAMPLESIZE, 100).
-define(TIMING_SAMPLECOUNTDOWN, 10000). -define(TIMING_SAMPLECOUNTDOWN, 50000).
-define(DUMMY, dummy). % Dummy key used for mput operations -define(DUMMY, dummy). % Dummy key used for mput operations
-define(MAX_KEYCHECK_FREQUENCY, 100). -define(MAX_KEYCHECK_FREQUENCY, 100).
-define(MIN_KEYCHECK_FREQUENCY, 1). -define(MIN_KEYCHECK_FREQUENCY, 1).

View file

@ -128,7 +128,7 @@
-define(WRITE_OPS, [binary, raw, read, write]). -define(WRITE_OPS, [binary, raw, read, write]).
-define(PENDING_ROLL_WAIT, 30). -define(PENDING_ROLL_WAIT, 30).
-define(DELETE_TIMEOUT, 10000). -define(DELETE_TIMEOUT, 10000).
-define(TIMING_SAMPLECOUNTDOWN, 1000). -define(TIMING_SAMPLECOUNTDOWN, 5000).
-define(TIMING_SAMPLESIZE, 100). -define(TIMING_SAMPLESIZE, 100).
-define(MAX_OBJECT_SIZE, 1000000000). -define(MAX_OBJECT_SIZE, 1000000000).
% 1GB but really should be much smaller than this % 1GB but really should be much smaller than this

View file

@ -733,10 +733,14 @@ handle_cast({clerk_complete, ManifestSnippet, FilesToDelete}, State) ->
pending_removals=FilesToDelete, pending_removals=FilesToDelete,
compaction_pending=false}}; compaction_pending=false}};
handle_cast({release_snapshot, Snapshot}, State) -> handle_cast({release_snapshot, Snapshot}, State) ->
Rs = lists:keydelete(Snapshot, 1, State#state.registered_snapshots),
leveled_log:log("I0003", [Snapshot]), leveled_log:log("I0003", [Snapshot]),
case lists:keydelete(Snapshot, 1, State#state.registered_snapshots) of
[] ->
{noreply, State#state{registered_snapshots=[]}};
Rs ->
leveled_log:log("I0004", [length(Rs)]), leveled_log:log("I0004", [length(Rs)]),
{noreply, State#state{registered_snapshots=Rs}}; {noreply, State#state{registered_snapshots=Rs}}
end;
handle_cast({log_level, LogLevel}, State) -> handle_cast({log_level, LogLevel}, State) ->
INC = State#state.clerk, INC = State#state.clerk,
ok = leveled_iclerk:clerk_loglevel(INC, LogLevel), ok = leveled_iclerk:clerk_loglevel(INC, LogLevel),

View file

@ -56,7 +56,7 @@
{"B0008", {"B0008",
{info, "Bucket list finds no more results"}}, {info, "Bucket list finds no more results"}},
{"B0009", {"B0009",
{info, "Bucket list finds Bucket ~w"}}, {debug, "Bucket list finds Bucket ~w"}},
{"B0011", {"B0011",
{warn, "Call to destroy the store and so all files to be removed"}}, {warn, "Call to destroy the store and so all files to be removed"}},
{"B0013", {"B0013",
@ -191,7 +191,7 @@
{"PC006", {"PC006",
{debug, "Work prompted but none needed"}}, {debug, "Work prompted but none needed"}},
{"PC007", {"PC007",
{info, "Clerk prompting Penciller regarding manifest change"}}, {debug, "Clerk prompting Penciller regarding manifest change"}},
{"PC008", {"PC008",
{info, "Merge from level ~w to merge into ~w files below"}}, {info, "Merge from level ~w to merge into ~w files below"}},
{"PC009", {"PC009",
@ -252,7 +252,7 @@
++ "pmem_fetch=~w merge_lists=~w build_slots=~w " ++ "pmem_fetch=~w merge_lists=~w build_slots=~w "
++ "build_summary=~w read_switch=~w"}}, ++ "build_summary=~w read_switch=~w"}},
{"SST12", {"SST12",
{info, "SST Timings for sample_count=~w" {info, "SST Timings at level=~w for sample_count=~w"
++ " at timing points index_query_time=~w" ++ " at timing points index_query_time=~w"
++ " lookup_cache_time=~w slot_index_time=~w " ++ " lookup_cache_time=~w slot_index_time=~w "
++ " fetch_cache_time=~w slot_fetch_time=~w" ++ " fetch_cache_time=~w slot_fetch_time=~w"
@ -269,9 +269,9 @@
{info, "Unexpected failure to fetch value for Key=~w SQN=~w " {info, "Unexpected failure to fetch value for Key=~w SQN=~w "
++ "with reason ~w"}}, ++ "with reason ~w"}},
{"I0002", {"I0002",
{info, "Journal snapshot ~w registered at SQN ~w"}}, {debug, "Journal snapshot ~w registered at SQN ~w"}},
{"I0003", {"I0003",
{info, "Journal snapshot ~w released"}}, {debug, "Journal snapshot ~w released"}},
{"I0004", {"I0004",
{info, "Remaining number of journal snapshots is ~w"}}, {info, "Remaining number of journal snapshots is ~w"}},
{"I0005", {"I0005",

View file

@ -234,7 +234,7 @@
-define(PROMPT_WAIT_ONL0, 5). -define(PROMPT_WAIT_ONL0, 5).
-define(WORKQUEUE_BACKLOG_TOLERANCE, 4). -define(WORKQUEUE_BACKLOG_TOLERANCE, 4).
-define(COIN_SIDECOUNT, 5). -define(COIN_SIDECOUNT, 5).
-define(SLOW_FETCH, 100000). -define(SLOW_FETCH, 500000). % Log a very slow fetch - longer than 500ms
-define(ITERATOR_SCANWIDTH, 4). -define(ITERATOR_SCANWIDTH, 4).
-define(TIMING_SAMPLECOUNTDOWN, 10000). -define(TIMING_SAMPLECOUNTDOWN, 10000).
-define(TIMING_SAMPLESIZE, 100). -define(TIMING_SAMPLESIZE, 100).
@ -1142,9 +1142,10 @@ start_from_file(PCLopts) ->
%% Open manifest %% Open manifest
Manifest0 = leveled_pmanifest:open_manifest(RootPath), Manifest0 = leveled_pmanifest:open_manifest(RootPath),
OpenFun = OpenFun =
fun(FN) -> fun(FN, Level) ->
{ok, Pid, {_FK, _LK}, Bloom} = {ok, Pid, {_FK, _LK}, Bloom} =
leveled_sst:sst_open(sst_rootpath(RootPath), FN, OptsSST), leveled_sst:sst_open(sst_rootpath(RootPath),
FN, OptsSST, Level),
{Pid, Bloom} {Pid, Bloom}
end, end,
SQNFun = fun leveled_sst:sst_getmaxsequencenumber/1, SQNFun = fun leveled_sst:sst_getmaxsequencenumber/1,
@ -1161,7 +1162,8 @@ start_from_file(PCLopts) ->
leveled_log:log("P0015", [L0FN]), leveled_log:log("P0015", [L0FN]),
L0Open = leveled_sst:sst_open(sst_rootpath(RootPath), L0Open = leveled_sst:sst_open(sst_rootpath(RootPath),
L0FN, L0FN,
OptsSST), OptsSST,
0),
{ok, L0Pid, {L0StartKey, L0EndKey}, Bloom} = L0Open, {ok, L0Pid, {L0StartKey, L0EndKey}, Bloom} = L0Open,
L0SQN = leveled_sst:sst_getmaxsequencenumber(L0Pid), L0SQN = leveled_sst:sst_getmaxsequencenumber(L0Pid),
L0Entry = #manifest_entry{start_key = L0StartKey, L0Entry = #manifest_entry{start_key = L0StartKey,

View file

@ -557,7 +557,7 @@ load_level(LevelIdx, Level, LoadFun, SQNFun) ->
HigherLevelLoadFun = HigherLevelLoadFun =
fun(ME, {L_Out, L_MaxSQN, FileList, BloomL}) -> fun(ME, {L_Out, L_MaxSQN, FileList, BloomL}) ->
FN = ME#manifest_entry.filename, FN = ME#manifest_entry.filename,
{P, Bloom} = LoadFun(FN), {P, Bloom} = LoadFun(FN, LevelIdx),
SQN = SQNFun(P), SQN = SQNFun(P),
{[ME#manifest_entry{owner=P}|L_Out], {[ME#manifest_entry{owner=P}|L_Out],
max(SQN, L_MaxSQN), max(SQN, L_MaxSQN),
@ -567,7 +567,7 @@ load_level(LevelIdx, Level, LoadFun, SQNFun) ->
LowerLevelLoadFun = LowerLevelLoadFun =
fun({EK, ME}, {L_Out, L_MaxSQN, FileList, BloomL}) -> fun({EK, ME}, {L_Out, L_MaxSQN, FileList, BloomL}) ->
FN = ME#manifest_entry.filename, FN = ME#manifest_entry.filename,
{P, Bloom} = LoadFun(FN), {P, Bloom} = LoadFun(FN, LevelIdx),
SQN = SQNFun(P), SQN = SQNFun(P),
{[{EK, ME#manifest_entry{owner=P}}|L_Out], {[{EK, ME#manifest_entry{owner=P}}|L_Out],
max(SQN, L_MaxSQN), max(SQN, L_MaxSQN),

View file

@ -84,7 +84,7 @@
-define(DELETE_TIMEOUT, 10000). -define(DELETE_TIMEOUT, 10000).
-define(TREE_TYPE, idxt). -define(TREE_TYPE, idxt).
-define(TREE_SIZE, 4). -define(TREE_SIZE, 4).
-define(TIMING_SAMPLECOUNTDOWN, 10000). -define(TIMING_SAMPLECOUNTDOWN, 20000).
-define(TIMING_SAMPLESIZE, 100). -define(TIMING_SAMPLESIZE, 100).
-define(CACHE_SIZE, 32). -define(CACHE_SIZE, 32).
-define(BLOCK_LENGTHS_LENGTH, 20). -define(BLOCK_LENGTHS_LENGTH, 20).
@ -113,7 +113,7 @@
-export([sst_new/6, -export([sst_new/6,
sst_new/8, sst_new/8,
sst_newlevelzero/7, sst_newlevelzero/7,
sst_open/3, sst_open/4,
sst_get/2, sst_get/2,
sst_get/3, sst_get/3,
sst_expandpointer/5, sst_expandpointer/5,
@ -192,7 +192,8 @@
starting_pid :: pid()|undefined, starting_pid :: pid()|undefined,
fetch_cache = array:new([{size, ?CACHE_SIZE}]), fetch_cache = array:new([{size, ?CACHE_SIZE}]),
new_slots :: list()|undefined, new_slots :: list()|undefined,
deferred_startup_tuple :: tuple()|undefined}). deferred_startup_tuple :: tuple()|undefined,
level :: non_neg_integer()|undefined}).
-record(sst_timings, -record(sst_timings,
{sample_count = 0 :: integer(), {sample_count = 0 :: integer(),
@ -224,7 +225,7 @@
%%% API %%% API
%%%============================================================================ %%%============================================================================
-spec sst_open(string(), string(), sst_options()) -spec sst_open(string(), string(), sst_options(), non_neg_integer())
-> {ok, pid(), -> {ok, pid(),
{leveled_codec:ledger_key(), leveled_codec:ledger_key()}, {leveled_codec:ledger_key(), leveled_codec:ledger_key()},
binary()}. binary()}.
@ -236,10 +237,11 @@
%% term order. %% term order.
%% %%
%% The filename should include the file extension. %% The filename should include the file extension.
sst_open(RootPath, Filename, OptsSST) -> sst_open(RootPath, Filename, OptsSST, Level) ->
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []), {ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
case gen_fsm:sync_send_event(Pid, case gen_fsm:sync_send_event(Pid,
{sst_open, RootPath, Filename, OptsSST}, {sst_open,
RootPath, Filename, OptsSST, Level},
infinity) of infinity) of
{ok, {SK, EK}, Bloom} -> {ok, {SK, EK}, Bloom} ->
{ok, Pid, {SK, EK}, Bloom} {ok, Pid, {SK, EK}, Bloom}
@ -456,8 +458,8 @@ sst_checkready(Pid) ->
%% This simply prompts a GC on the PID now (as this may now be a long-lived %% This simply prompts a GC on the PID now (as this may now be a long-lived
%% file, so don't want all the startup state to be held on memory - want to %% file, so don't want all the startup state to be held on memory - want to
%% proactively drop it %% proactively drop it
sst_switchlevels(Pid, _NewLevel) -> sst_switchlevels(Pid, NewLevel) ->
gen_fsm:send_event(Pid, switch_levels). gen_fsm:send_event(Pid, {switch_levels, NewLevel}).
-spec sst_close(pid()) -> ok. -spec sst_close(pid()) -> ok.
%% @doc %% @doc
@ -481,7 +483,7 @@ sst_printtimings(Pid) ->
init([]) -> init([]) ->
{ok, starting, #state{}}. {ok, starting, #state{}}.
starting({sst_open, RootPath, Filename, OptsSST}, _From, State) -> starting({sst_open, RootPath, Filename, OptsSST, Level}, _From, State) ->
leveled_log:save(OptsSST#sst_options.log_options), leveled_log:save(OptsSST#sst_options.log_options),
{UpdState, Bloom} = {UpdState, Bloom} =
read_file(Filename, State#state{root_path=RootPath}), read_file(Filename, State#state{root_path=RootPath}),
@ -489,7 +491,7 @@ starting({sst_open, RootPath, Filename, OptsSST}, _From, State) ->
{reply, {reply,
{ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom}, {ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom},
reader, reader,
UpdState}; UpdState#state{level = Level}};
starting({sst_new, starting({sst_new,
RootPath, Filename, Level, RootPath, Filename, Level,
{SlotList, FirstKey}, MaxSQN, {SlotList, FirstKey}, MaxSQN,
@ -519,14 +521,15 @@ starting({sst_new,
{ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom}, {ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom},
reader, reader,
UpdState#state{blockindex_cache = BlockIndex, UpdState#state{blockindex_cache = BlockIndex,
starting_pid = StartingPID}}; starting_pid = StartingPID,
level = Level}};
starting({sst_newlevelzero, RootPath, Filename, starting({sst_newlevelzero, RootPath, Filename,
Penciller, MaxSQN, Penciller, MaxSQN,
OptsSST, IdxModDate}, _From, State) -> OptsSST, IdxModDate}, _From, State) ->
DeferredStartupTuple = DeferredStartupTuple =
{RootPath, Filename, Penciller, MaxSQN, OptsSST, IdxModDate}, {RootPath, Filename, Penciller, MaxSQN, OptsSST, IdxModDate},
{reply, ok, starting, {reply, ok, starting,
State#state{deferred_startup_tuple = DeferredStartupTuple}}; State#state{deferred_startup_tuple = DeferredStartupTuple, level = 0}};
starting(close, _From, State) -> starting(close, _From, State) ->
% No file should have been created, so nothing to close. % No file should have been created, so nothing to close.
{stop, normal, ok, State}. {stop, normal, ok, State}.
@ -630,7 +633,9 @@ reader({get_kv, LedgerKey, Hash}, _From, State) ->
fetch(LedgerKey, Hash, State, State#state.timings), fetch(LedgerKey, Hash, State, State#state.timings),
{UpdTimings0, CountDown} = {UpdTimings0, CountDown} =
update_statetimings(UpdTimings, State#state.timings_countdown), update_statetimings(UpdTimings,
State#state.timings_countdown,
State#state.level),
{reply, Result, reader, UpdState#state{timings = UpdTimings0, {reply, Result, reader, UpdState#state{timings = UpdTimings0,
timings_countdown = CountDown}}; timings_countdown = CountDown}};
@ -689,7 +694,7 @@ reader(get_maxsequencenumber, _From, State) ->
Summary = State#state.summary, Summary = State#state.summary,
{reply, Summary#summary.max_sqn, reader, State}; {reply, Summary#summary.max_sqn, reader, State};
reader(print_timings, _From, State) -> reader(print_timings, _From, State) ->
log_timings(State#state.timings), log_timings(State#state.timings, State#state.level),
{reply, ok, reader, State}; {reply, ok, reader, State};
reader({set_for_delete, Penciller}, _From, State) -> reader({set_for_delete, Penciller}, _From, State) ->
leveled_log:log("SST06", [State#state.filename]), leveled_log:log("SST06", [State#state.filename]),
@ -711,9 +716,9 @@ reader(close, _From, State) ->
ok = file:close(State#state.handle), ok = file:close(State#state.handle),
{stop, normal, ok, State}. {stop, normal, ok, State}.
reader(switch_levels, State) -> reader({switch_levels, NewLevel}, State) ->
erlang:garbage_collect(self()), erlang:garbage_collect(self()),
{next_state, reader, State}. {next_state, reader, State#state{level = NewLevel}}.
delete_pending({get_kv, LedgerKey, Hash}, _From, State) -> delete_pending({get_kv, LedgerKey, Hash}, _From, State) ->
@ -2479,7 +2484,7 @@ log_buildtimings(Timings, LI) ->
element(2, LI)]). element(2, LI)]).
-spec update_statetimings(sst_timings(), integer()) -spec update_statetimings(sst_timings(), integer(), non_neg_integer())
-> {sst_timings(), integer()}. -> {sst_timings(), integer()}.
%% @doc %% @doc
%% %%
@ -2490,23 +2495,27 @@ log_buildtimings(Timings, LI) ->
%% %%
%% Outside of sample windows the timings object should be set to the atom %% Outside of sample windows the timings object should be set to the atom
%% no_timing. no_timing is a valid state for the cdb_timings type. %% no_timing. no_timing is a valid state for the cdb_timings type.
update_statetimings(no_timing, 0) -> update_statetimings(no_timing, 0, _Level) ->
{#sst_timings{}, 0}; {#sst_timings{}, 0};
update_statetimings(Timings, 0) -> update_statetimings(Timings, 0, Level) ->
case Timings#sst_timings.sample_count of case Timings#sst_timings.sample_count of
SC when SC >= ?TIMING_SAMPLESIZE -> SC when SC >= ?TIMING_SAMPLESIZE ->
log_timings(Timings), log_timings(Timings, Level),
{no_timing, leveled_rand:uniform(2 * ?TIMING_SAMPLECOUNTDOWN)}; % If file at lower level wait longer before tsking another
% sample
{no_timing,
leveled_rand:uniform(2 * ?TIMING_SAMPLECOUNTDOWN)};
_SC -> _SC ->
{Timings, 0} {Timings, 0}
end; end;
update_statetimings(no_timing, N) -> update_statetimings(no_timing, N, _Level) ->
{no_timing, N - 1}. {no_timing, N - 1}.
log_timings(no_timing) -> log_timings(no_timing, _Level) ->
ok; ok;
log_timings(Timings) -> log_timings(Timings, Level) ->
leveled_log:log("SST12", [Timings#sst_timings.sample_count, leveled_log:log("SST12", [Level,
Timings#sst_timings.sample_count,
Timings#sst_timings.index_query_time, Timings#sst_timings.index_query_time,
Timings#sst_timings.lookup_cache_time, Timings#sst_timings.lookup_cache_time,
Timings#sst_timings.slot_index_time, Timings#sst_timings.slot_index_time,