diff --git a/src/leveled_bookie.erl b/src/leveled_bookie.erl index 109dfaf..b14dfde 100644 --- a/src/leveled_bookie.erl +++ b/src/leveled_bookie.erl @@ -1,6 +1,6 @@ %% -------- Overview --------- %% -%% The eleveleddb is based on the LSM-tree similar to leveldb, except that: +%% Leveled is based on the LSM-tree similar to leveldb, except that: %% - Keys, Metadata and Values are not persisted together - the Keys and %% Metadata are kept in a tree-based ledger, whereas the values are stored %% only in a sequential Journal. @@ -11,7 +11,7 @@ %% and frequent use of iterators) %% - The Journal is an extended nursery log in leveldb terms. It is keyed %% on the sequence number of the write -%% - The ledger is a merge tree, where the key is the actaul object key, and +%% - The ledger is a merge tree, where the key is the actual object key, and %% the value is the metadata of the object including the sequence number %% %% @@ -228,14 +228,14 @@ % The size of the Bookie's memory, the cache of the recent % additions to the ledger. Defaults to ?CACHE_SIZE, plus some % randomised jitter (randomised jitter will still be added to - % configured values + % configured values) % The minimum value is 100 - any lower value will be ignored {max_journalsize, pos_integer()} | % The maximum size of a journal file in bytes. The absolute % maximum must be 4GB due to 4 byte file pointers being used {max_journalobjectcount, pos_integer()} | % The maximum size of the journal by count of the objects. The - % journal must remian within the limit set by both this figures and + % journal must remain within the limit set by both this figures and % the max_journalsize {max_sstslots, pos_integer()} | % The maximum number of slots in a SST file. All testing is done @@ -248,7 +248,7 @@ % partially in hardware (e.g through use of FBWC). % riak_sync is used for backwards compatability with OTP16 - and % will manually call sync() after each write (rather than use the - % O_SYNC option on startup + % O_SYNC option on startup) {head_only, false|with_lookup|no_lookup} | % When set to true, there are three fundamental changes as to how % leveled will work: @@ -455,7 +455,7 @@ book_tempput(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL) %% - A Primary Key and a Value %% - IndexSpecs - a set of secondary key changes associated with the %% transaction -%% - A tag indictaing the type of object. Behaviour for metadata extraction, +%% - A tag indicating the type of object. Behaviour for metadata extraction, %% and ledger compaction will vary by type. There are three currently %% implemented types i (Index), o (Standard), o_rkv (Riak). Keys added with %% Index tags are not fetchable (as they will not be hashed), but are @@ -466,7 +466,7 @@ book_tempput(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL) %% %% The inker will pass the PK/Value/IndexSpecs to the current (append only) %% CDB journal file to persist the change. The call should return either 'ok' -%% or 'roll'. -'roll' indicates that the CDB file has insufficient capacity for +%% or 'roll'. 'roll' indicates that the CDB file has insufficient capacity for %% this write, and a new journal file should be created (with appropriate %% manifest changes to be made). %% @@ -1753,7 +1753,7 @@ return_snapfun(State, SnapType, Query, LongRunning, SnapPreFold) -> -spec snaptype_by_presence(boolean()) -> store|ledger. %% @doc %% Folds that traverse over object heads, may also either require to return -%% the object,or at least confirm th eobject is present in the Ledger. This +%% the object, or at least confirm the object is present in the Ledger. This %% is achieved by enabling presence - and this will change the type of %% snapshot to one that covers the whole store (i.e. both ledger and journal), %% rather than just the ledger. @@ -1764,7 +1764,7 @@ snaptype_by_presence(false) -> -spec get_runner(book_state(), tuple()) -> {async, fun()}. %% @doc -%% Getan {async, Runner} for a given fold type. Fold types have different +%% Get an {async, Runner} for a given fold type. Fold types have different %% tuple inputs get_runner(State, {index_query, Constraint, FoldAccT, Range, TermHandling}) -> {IdxFld, StartT, EndT} = Range, @@ -1933,7 +1933,7 @@ get_deprecatedrunner(State, {tuple(), tuple(), tuple()|no_lookup}. %% @doc %% Convert a range of binary keys into a ledger key range, returning -%% {StartLK, EndLK, Query} where Query is to indicate whether the query +%% {StartLK, EndLK, Query} where Query is to indicate whether the query %% range is worth using to minimise the cost of the snapshot return_ledger_keyrange(Tag, Bucket, KeyRange) -> {StartKey, EndKey, Snap} = @@ -1977,7 +1977,7 @@ maybe_longrunning(SW, Aspect) -> -spec readycache_forsnapshot(ledger_cache(), tuple()|no_lookup|undefined) -> ledger_cache(). %% @doc -%% Strip the ledger cach back to only the relevant informaiton needed in +%% Strip the ledger cach back to only the relevant information needed in %% the query, and to make the cache a snapshot (and so not subject to changes %% such as additions to the ets table) readycache_forsnapshot(LedgerCache, {StartKey, EndKey}) -> @@ -2190,7 +2190,7 @@ addto_ledgercache({H, SQN, KeyChanges}, Cache) -> loader) -> ledger_cache(). %% @doc -%% Add a set of changes associated witha single sequence number (journal +%% Add a set of changes associated with a single sequence number (journal %% update) to the ledger cache. This is used explicitly when loading the %% ledger from the Journal (i.e. at startup) - and in this case the ETS insert %% can be bypassed, as all changes will be flushed to the Penciller before the diff --git a/src/leveled_log.erl b/src/leveled_log.erl index fd59366..1a78d36 100644 --- a/src/leveled_log.erl +++ b/src/leveled_log.erl @@ -305,7 +305,7 @@ {"I0016", {info, "Writing new version of manifest for manifestSQN=~w"}}, {"I0017", - {info, "At SQN=~w journal has filename ~s"}}, + {debug, "At SQN=~w journal has filename ~s"}}, {"I0018", {warn, "We're doomed - intention recorded to destroy all files"}}, {"I0019",