Add logging of cache ratio

Two reasons for logging this:

- to assist in sizing the ledger cache;
- to resolve the mystery when there appear to be no fetches from the penciller (as the penciller does not report fetches from the ledger cache)
This commit is contained in:
Martin Sumner 2019-08-29 11:26:29 +01:00
parent e3913a6c07
commit ecda13872a
2 changed files with 42 additions and 19 deletions

View file

@ -129,6 +129,7 @@
-define(SNAPTIMEOUT_LONG, 43200). % 12 hours -define(SNAPTIMEOUT_LONG, 43200). % 12 hours
-define(SST_PAGECACHELEVEL_NOLOOKUP, 1). -define(SST_PAGECACHELEVEL_NOLOOKUP, 1).
-define(SST_PAGECACHELEVEL_LOOKUP, 4). -define(SST_PAGECACHELEVEL_LOOKUP, 4).
-define(CACHE_LOGPOINT, 100000).
-define(OPTION_DEFAULTS, -define(OPTION_DEFAULTS,
[{root_path, undefined}, [{root_path, undefined},
{snapshot_bookie, undefined}, {snapshot_bookie, undefined},
@ -178,6 +179,7 @@
get_countdown = 0 :: integer(), get_countdown = 0 :: integer(),
fold_countdown = 0 :: integer(), fold_countdown = 0 :: integer(),
head_countdown = 0 :: integer(), head_countdown = 0 :: integer(),
cache_ratio = {0, 0} :: cache_ratio(),
get_timings = no_timing :: get_timings(), get_timings = no_timing :: get_timings(),
put_timings = no_timing :: put_timings(), put_timings = no_timing :: put_timings(),
fold_timings = no_timing :: fold_timings(), fold_timings = no_timing :: fold_timings(),
@ -210,6 +212,7 @@
-type fold_timings() :: no_timing|#fold_timings{}. -type fold_timings() :: no_timing|#fold_timings{}.
-type head_timings() :: no_timing|#head_timings{}. -type head_timings() :: no_timing|#head_timings{}.
-type timing_types() :: head|get|put|fold. -type timing_types() :: head|get|put|fold.
-type cache_ratio() :: {non_neg_integer(), non_neg_integer()}.
-type open_options() :: -type open_options() ::
@ -1307,10 +1310,13 @@ handle_call({get, Bucket, Key, Tag}, _From, State)
when State#state.head_only == false -> when State#state.head_only == false ->
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag), LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
SWh = os:timestamp(), SWh = os:timestamp(),
{H0, UpdCR} =
fetch_head(LedgerKey,
State#state.penciller,
State#state.ledger_cache,
State#state.cache_ratio),
HeadResult = HeadResult =
case fetch_head(LedgerKey, case H0 of
State#state.penciller,
State#state.ledger_cache) of
not_present -> not_present ->
not_found; not_found;
Head -> Head ->
@ -1348,15 +1354,18 @@ handle_call({get, Bucket, Key, Tag}, _From, State)
{Timings, CountDown} = {Timings, CountDown} =
update_statetimings(get, Timings2, State#state.get_countdown), update_statetimings(get, Timings2, State#state.get_countdown),
{reply, Reply, State#state{get_timings = Timings, {reply, Reply, State#state{get_timings = Timings,
get_countdown = CountDown}}; get_countdown = CountDown,
cache_ratio = maybelog_cacheratio(UpdCR)}};
handle_call({head, Bucket, Key, Tag, SQNOnly}, _From, State) handle_call({head, Bucket, Key, Tag, SQNOnly}, _From, State)
when State#state.head_lookup == true -> when State#state.head_lookup == true ->
SWp = os:timestamp(), SWp = os:timestamp(),
LK = leveled_codec:to_ledgerkey(Bucket, Key, Tag), LK = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
Head = fetch_head(LK, {Head, UpdCR} =
State#state.penciller, fetch_head(LK,
State#state.ledger_cache, State#state.penciller,
State#state.head_only), State#state.ledger_cache,
State#state.cache_ratio,
State#state.head_only),
{SWr, UpdTimingsP} = {SWr, UpdTimingsP} =
update_timings(SWp, {head, pcl}, State#state.head_timings), update_timings(SWp, {head, pcl}, State#state.head_timings),
{LedgerMD, SQN, JournalCheckFrequency} = {LedgerMD, SQN, JournalCheckFrequency} =
@ -1411,7 +1420,8 @@ handle_call({head, Bucket, Key, Tag, SQNOnly}, _From, State)
Reply, Reply,
State#state{head_timings = UpdTimings, State#state{head_timings = UpdTimings,
head_countdown = CountDown, head_countdown = CountDown,
ink_checking = JournalCheckFrequency}}; ink_checking = JournalCheckFrequency,
cache_ratio = maybelog_cacheratio(UpdCR)}};
handle_call({snapshot, SnapType, Query, LongRunning}, _From, State) -> handle_call({snapshot, SnapType, Query, LongRunning}, _From, State) ->
% Snapshot the store, specifying if the snapshot should be long running % Snapshot the store, specifying if the snapshot should be long running
% (i.e. will the snapshot be queued or be required for an extended period % (i.e. will the snapshot be queued or be required for an extended period
@ -2069,20 +2079,24 @@ scan_table(Table, StartKey, EndKey, Acc, MinSQN, MaxSQN) ->
end. end.
-spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache()) -spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache(),
-> not_present|leveled_codec:ledger_value(). cache_ratio()) ->
{not_present|leveled_codec:ledger_value(),
cache_ratio()}.
%% @doc %% @doc
%% Fetch only the head of the object from the Ledger (or the bookie's recent %% Fetch only the head of the object from the Ledger (or the bookie's recent
%% ledger cache if it has just been updated). not_present is returned if the %% ledger cache if it has just been updated). not_present is returned if the
%% Key is not found %% Key is not found
fetch_head(Key, Penciller, LedgerCache) -> fetch_head(Key, Penciller, LedgerCache, CacheRatio) ->
fetch_head(Key, Penciller, LedgerCache, false). fetch_head(Key, Penciller, LedgerCache, CacheRatio, false).
-spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache(), boolean()) -spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache(),
-> not_present|leveled_codec:ledger_value(). cache_ratio(), boolean())
-> {not_present|leveled_codec:ledger_value(),
cache_ratio()}.
%% doc %% doc
%% The L0Index needs to be bypassed when running head_only %% The L0Index needs to be bypassed when running head_only
fetch_head(Key, Penciller, LedgerCache, HeadOnly) -> fetch_head(Key, Penciller, LedgerCache, {RC, CC}, HeadOnly) ->
SW = os:timestamp(), SW = os:timestamp(),
CacheResult = CacheResult =
case LedgerCache#ledger_cache.mem of case LedgerCache#ledger_cache.mem of
@ -2093,7 +2107,7 @@ fetch_head(Key, Penciller, LedgerCache, HeadOnly) ->
end, end,
case CacheResult of case CacheResult of
[{Key, Head}] -> [{Key, Head}] ->
Head; {Head, {RC + 1, CC + 1}};
[] -> [] ->
Hash = leveled_codec:segment_hash(Key), Hash = leveled_codec:segment_hash(Key),
UseL0Idx = not HeadOnly, UseL0Idx = not HeadOnly,
@ -2102,10 +2116,10 @@ fetch_head(Key, Penciller, LedgerCache, HeadOnly) ->
case leveled_penciller:pcl_fetch(Penciller, Key, Hash, UseL0Idx) of case leveled_penciller:pcl_fetch(Penciller, Key, Hash, UseL0Idx) of
{Key, Head} -> {Key, Head} ->
maybe_longrunning(SW, pcl_head), maybe_longrunning(SW, pcl_head),
Head; {Head, {RC + 1, CC}};
not_present -> not_present ->
maybe_longrunning(SW, pcl_head), maybe_longrunning(SW, pcl_head),
not_present {not_present, {RC + 1, CC}}
end end
end. end.
@ -2436,6 +2450,13 @@ update_timings(SW, {fold, setup}, Timings) ->
Timings0 = Timings#fold_timings{setup_time = FST, sample_count = CNT}, Timings0 = Timings#fold_timings{setup_time = FST, sample_count = CNT},
{no_timing, Timings0}. {no_timing, Timings0}.
-spec maybelog_cacheratio(cache_ratio()) -> cache_ratio().
maybelog_cacheratio({?CACHE_LOGPOINT, CC}) ->
leveled_log:log("B0021", [?CACHE_LOGPOINT, CC]),
{0, 0};
maybelog_cacheratio(CR) ->
CR.
%%%============================================================================ %%%============================================================================
%%% Test %%% Test
%%%============================================================================ %%%============================================================================

View file

@ -80,6 +80,8 @@
{"B0020", {"B0020",
{warn, "Ratio of penciller cache size ~w to bookie's memory " {warn, "Ratio of penciller cache size ~w to bookie's memory "
++ "cache size ~w is larger than expected"}}, ++ "cache size ~w is larger than expected"}},
{"B0021",
{info, "Bookie fetch RequestCount=~w and CacheCount=~w"}},
{"R0001", {"R0001",
{debug, "Object fold to process batch of ~w objects"}}, {debug, "Object fold to process batch of ~w objects"}},