diff --git a/priv/leveled.schema b/priv/leveled.schema index 7e34253..4498573 100644 --- a/priv/leveled.schema +++ b/priv/leveled.schema @@ -41,6 +41,7 @@ {datatype, atom} ]}. + %% @doc Compression point %% The point at which compression is applied to the Journal (the Ledger is %% always compressed). Use on_receipt or on_compact. on_compact is suitable @@ -51,6 +52,15 @@ {datatype, atom} ]}. +%% @doc Log level +%% Can be debug, info, warn, error or critical +%% Set the minimum log level to be used within leveled. Leveled will log many +%% lines to allow for stats to be etracted by those using log indexers such as +%% Splunk +{mapping, "leveled.log_level", "leveled.log_level", [ + {default, info}, + {datatype, atom} +]}. %% @doc The approximate size (in bytes) when a Journal file should be rolled. %% Normally keep this as around the size of o(100K) objects. Default is 500MB diff --git a/src/leveled_bookie.erl b/src/leveled_bookie.erl index 11a5312..16be8ef 100644 --- a/src/leveled_bookie.erl +++ b/src/leveled_bookie.erl @@ -107,7 +107,8 @@ -define(CACHE_SIZE_JITTER, 25). -define(JOURNAL_SIZE_JITTER, 20). -define(ABSOLUTEMAX_JOURNALSIZE, 4000000000). --define(LONG_RUNNING, 80000). +-define(LONG_RUNNING, 200000). + % An individual task taking > 200ms gets a specific log -define(COMPRESSION_METHOD, lz4). -define(COMPRESSION_POINT, on_receipt). -define(LOG_LEVEL, info). diff --git a/src/leveled_cdb.erl b/src/leveled_cdb.erl index 8d8dcb0..54817a7 100644 --- a/src/leveled_cdb.erl +++ b/src/leveled_cdb.erl @@ -894,7 +894,12 @@ open_active_file(FileName) when is_list(FileName) -> {ok, LastPosition} -> ok = file:close(Handle); {ok, EndPosition} -> - leveled_log:log("CDB06", [LastPosition, EndPosition]), + case {LastPosition, EndPosition} of + {?BASE_POSITION, 0} -> + ok; + _ -> + leveled_log:log("CDB06", [LastPosition, EndPosition]) + end, {ok, _LastPosition} = file:position(Handle, LastPosition), ok = file:truncate(Handle), ok = file:close(Handle) @@ -1277,7 +1282,13 @@ startup_filter(Key, _ValueAsBin, Position, {Hashtree, _LastKey}, _ExtractFun) -> scan_over_file(Handle, Position, FilterFun, Output, LastKey) -> case saferead_keyvalue(Handle) of false -> - leveled_log:log("CDB09", [Position]), + case {LastKey, Position} of + {empty, ?BASE_POSITION} -> + % Not interesting that we've nothing to read at base + ok; + _ -> + leveled_log:log("CDB09", [Position]) + end, % Bring file back to that position {ok, Position} = file:position(Handle, {bof, Position}), {eof, Output}; @@ -1896,7 +1907,7 @@ dump(FileName) -> end, NumberOfPairs = lists:foldl(Fn, 0, lists:seq(0,255)) bsr 1, io:format("Count of keys in db is ~w~n", [NumberOfPairs]), - {ok, _} = file:position(Handle, {bof, 2048}), + {ok, _} = file:position(Handle, {bof, ?BASE_POSITION}), Fn1 = fun(_I, Acc) -> {KL, VL} = read_next_2_integers(Handle), {Key, KB} = safe_read_next_keybin(Handle, KL), diff --git a/src/leveled_head.erl b/src/leveled_head.erl index d6ffd4f..b096844 100644 --- a/src/leveled_head.erl +++ b/src/leveled_head.erl @@ -85,6 +85,7 @@ -export_type([object_tag/0, + headonly_tag/0, head/0, object_metadata/0, appdefinable_function_tuple/0]). diff --git a/src/leveled_log.erl b/src/leveled_log.erl index 8c0837a..33dafe4 100644 --- a/src/leveled_log.erl +++ b/src/leveled_log.erl @@ -60,7 +60,7 @@ {"B0011", {warn, "Call to destroy the store and so all files to be removed"}}, {"B0013", - {warn, "Long running task took ~w microseconds with task of type ~w"}}, + {warn, "Long running task took ~w microseconds with task_type=~w"}}, {"B0015", {info, "Put timing with sample_count=~w and mem_time=~w ink_time=~w" ++ " with total_object_size=~w"}}, @@ -142,7 +142,8 @@ {warn, "We're doomed - intention recorded to destroy all files"}}, {"P0031", {info, "Completion of update to levelzero" - ++ " with cache_size=~w status=~w and update_success=~w"}}, + ++ " with cache_size=~w level0_due=~w" + ++ " and change_pending=~w"}}, {"P0032", {info, "Fetch head timing with sample_count=~w and level timings of" ++ " foundmem_time=~w found0_time=~w found1_time=~w" @@ -207,7 +208,7 @@ {"PC015", {info, "File created"}}, {"PC016", - {info, "Slow fetch from SFT ~w of ~w microseconds at level ~w " + {info, "Slow fetch from SFT ~w of ~w us at level ~w " ++ "with result ~w"}}, {"PC017", {info, "Notified clerk of manifest change"}}, @@ -222,18 +223,18 @@ {"PC022", {info, "Storing reference to deletions at ManifestSQN=~w"}}, {"PM002", - {info, "Completed dump of L0 cache to list of size ~w"}}, + {info, "Completed dump of L0 cache to list of l0cache_size=~w"}}, {"SST01", - {info, "SST timing for result ~w is sample ~w total ~w and max ~w"}}, + {info, "SST timing for result=~w is sample=~w total=~w and max=~w"}}, {"SST02", {error, "False result returned from SST with filename ~s as " ++ "slot ~w has failed crc check"}}, {"SST03", - {info, "Opening SST file with filename ~s slots ~w and" + {info, "Opening SST file with filename ~s slot_count=~w and" ++ " max sqn ~w"}}, {"SST04", - {info, "Exit called for reason ~w on filename ~s"}}, + {debug, "Exit called for reason ~w on filename ~s"}}, {"SST05", {warn, "Rename rogue filename ~s to ~s"}}, {"SST06", @@ -359,14 +360,15 @@ {"CDB05", {info, "Closing of filename ~s from state ~w for reason ~w"}}, {"CDB06", - {info, "File to be truncated at last position of ~w with end of " + {warn, "File to be truncated at last position of ~w with end of " ++ "file at ~w"}}, {"CDB07", {info, "Hashtree computed"}}, {"CDB08", {info, "Renaming file from ~s to ~s for which existence is ~w"}}, {"CDB09", - {info, "Failure to read Key/Value at Position ~w in scan"}}, + {info, "Failure to read Key/Value at Position ~w in scan " ++ + "this may be the end of the file"}}, {"CDB10", {info, "CRC check failed due to mismatch"}}, {"CDB11", @@ -374,17 +376,17 @@ {"CDB12", {info, "HashTree written"}}, {"CDB13", - {info, "Write options of ~w"}}, + {debug, "Write options of ~w"}}, {"CDB14", {info, "Microsecond timings for hashtree build of " - ++ "to_list ~w sort ~w build ~w"}}, + ++ "to_list=~w sort=~w build=~w"}}, {"CDB15", {info, "Collision in search for hash ~w"}}, {"CDB16", {info, "CDB scan from start ~w in file with end ~w and last_key ~w"}}, {"CDB17", - {info, "After ~w PUTs total write time is ~w total sync time is ~w " - ++ "and max write time is ~w and max sync time is ~w"}}, + {info, "After ~w PUTs total_write_time=~w total_sync_time=~w " + ++ "and max_write_time=~w and max_sync_time=~w"}}, {"CDB18", {info, "Handled return and write of hashtable"}}, {"CDB19", @@ -471,9 +473,10 @@ log(LogRef, Subs, SupportedLogLevels) -> {LogRef, {LogLevel, LogText}} -> case should_i_log(LogLevel, SupportedLogLevels, LogRef) of true -> - io:format(format_time() - ++ " " ++ LogRef ++ " ~w " - ++ LogText ++ "~n", + io:format(format_time() ++ " " + ++ atom_to_list(LogLevel) ++ " " + ++ LogRef ++ " ~w " + ++ LogText ++ "~n", [self()|Subs]); false -> ok @@ -507,18 +510,21 @@ log_timer(LogRef, Subs, StartTime, SupportedLogLevels) -> {LogRef, {LogLevel, LogText}} -> case should_i_log(LogLevel, SupportedLogLevels, LogRef) of true -> - MicroS = timer:now_diff(os:timestamp(), StartTime), - {Unit, Time} = case MicroS of - MicroS when MicroS < 1000 -> - {"microsec", MicroS}; - MicroS -> - {"ms", MicroS div 1000} - end, - io:format(format_time() - ++ " " ++ LogRef ++ " ~w " - ++ LogText - ++ " with time taken ~w " ++ Unit ++ "~n", - [self()|Subs] ++ [Time]); + DurationText = + case timer:now_diff(os:timestamp(), StartTime) of + US when US > 1000 -> + " with us_duration=" ++ integer_to_list(US) ++ + " or ms_duration=" + ++ integer_to_list(US div 1000); + US -> + " with us_duration=" ++ integer_to_list(US) + end, + io:format(format_time() ++ " " + ++ atom_to_list(LogLevel) ++ " " + ++ LogRef ++ " ~w " + ++ LogText + ++ DurationText ++ "~n", + [self()|Subs]); false -> ok end; diff --git a/src/leveled_penciller.erl b/src/leveled_penciller.erl index 584974d..463387d 100644 --- a/src/leveled_penciller.erl +++ b/src/leveled_penciller.erl @@ -234,7 +234,7 @@ -define(PROMPT_WAIT_ONL0, 5). -define(WORKQUEUE_BACKLOG_TOLERANCE, 4). -define(COIN_SIDECOUNT, 5). --define(SLOW_FETCH, 20000). +-define(SLOW_FETCH, 100000). -define(ITERATOR_SCANWIDTH, 4). -define(TIMING_SAMPLECOUNTDOWN, 10000). -define(TIMING_SAMPLESIZE, 100). @@ -693,7 +693,7 @@ handle_call({push_mem, {LedgerTable, PushedIdx, MinSQN, MaxSQN}}, State#state{levelzero_pending=L0Pend, levelzero_constructor=L0Constructor}}; {false, false} -> - leveled_log:log("P0018", [ok, false, false]), + % leveled_log:log("P0018", [ok, false, false]), PushedTree = case is_tuple(LedgerTable) of true -> @@ -1254,7 +1254,7 @@ archive_files(RootPath, UsedFileList) -> -> pcl_state(). %% @doc %% Update the in-memory cache of recent changes for the penciller. This is -%% the level zer at the top of the tree. +%% the level zero at the top of the tree. %% Once the update is made, there needs to be a decision to potentially roll %% the level-zero memory to an on-disk level zero sst file. This can only %% happen when the cache has exeeded the size threshold (with some jitter @@ -1295,17 +1295,18 @@ update_levelzero(L0Size, {PushedTree, PushedIdx, MinSQN, MaxSQN}, end, NoPendingManifestChange = not State#state.work_ongoing, JitterCheck = RandomFactor or CacheMuchTooBig, - case {CacheTooBig, JitterCheck, NoPendingManifestChange} of - {true, true, true} -> + Due = CacheTooBig and JitterCheck, + case {Due, NoPendingManifestChange} of + {true, true} -> {L0Pend, L0Constructor, none} = maybe_roll_memory(UpdState, false), - leveled_log:log_timer("P0031", [true, true, L0Pend], SW), + LogSubs = [NewL0Size, true, true], + leveled_log:log_timer("P0031", LogSubs, SW), UpdState#state{levelzero_pending=L0Pend, levelzero_constructor=L0Constructor}; _ -> - leveled_log:log_timer("P0031", - [CacheTooBig, JitterCheck, false], - SW), + LogSubs = [NewL0Size, Due, NoPendingManifestChange], + leveled_log:log_timer("P0031", LogSubs, SW), UpdState end end.