Adjust setting
If cache size is too small then we're more likely to be not ready to evict a L0 file
This commit is contained in:
parent
6b5b51412e
commit
2607792d1f
1 changed files with 1 additions and 12 deletions
|
@ -153,7 +153,7 @@
|
|||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-define(CACHE_SIZE, 1600).
|
||||
-define(CACHE_SIZE, 2000).
|
||||
-define(JOURNAL_FP, "journal").
|
||||
-define(LEDGER_FP, "ledger").
|
||||
-define(SHUTDOWN_WAITS, 60).
|
||||
|
@ -194,17 +194,6 @@ book_put(Pid, Bucket, Key, Object, IndexSpecs) ->
|
|||
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag) ->
|
||||
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, infinity).
|
||||
|
||||
|
||||
|
||||
%% TODO:
|
||||
%% It is not enough simply to change the value to delete, as the journal
|
||||
%% needs to know the key is a tombstone at compaction time, and currently at
|
||||
%% compaction time the clerk only knows the Key and not the Value.
|
||||
%%
|
||||
%% The tombstone cannot be removed from the Journal on compaction, as the
|
||||
%% journal entry the tombstone deletes may not have been reaped - and so if the
|
||||
%% ledger got erased, the value would be resurrected.
|
||||
|
||||
book_riakdelete(Pid, Bucket, Key, IndexSpecs) ->
|
||||
book_put(Pid, Bucket, Key, delete, IndexSpecs, ?RIAK_TAG).
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue