Pass through sync_strategy
Allow to switch for Riak to use o_sync as the sync flag rather than sync
This commit is contained in:
parent
03d025d581
commit
196c807b5e
7 changed files with 105 additions and 29 deletions
|
@ -47,7 +47,8 @@
|
||||||
{max_size :: integer(),
|
{max_size :: integer(),
|
||||||
file_path :: string(),
|
file_path :: string(),
|
||||||
waste_path :: string(),
|
waste_path :: string(),
|
||||||
binary_mode = false :: boolean()}).
|
binary_mode = false :: boolean(),
|
||||||
|
sync_strategy = sync}).
|
||||||
|
|
||||||
-record(inker_options,
|
-record(inker_options,
|
||||||
{cdb_max_size :: integer(),
|
{cdb_max_size :: integer(),
|
||||||
|
|
|
@ -118,7 +118,7 @@
|
||||||
terminate/2,
|
terminate/2,
|
||||||
code_change/3,
|
code_change/3,
|
||||||
book_start/1,
|
book_start/1,
|
||||||
book_start/3,
|
book_start/4,
|
||||||
book_put/5,
|
book_put/5,
|
||||||
book_put/6,
|
book_put/6,
|
||||||
book_tempput/7,
|
book_tempput/7,
|
||||||
|
@ -159,10 +159,11 @@
|
||||||
%%% API
|
%%% API
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
|
|
||||||
book_start(RootPath, LedgerCacheSize, JournalSize) ->
|
book_start(RootPath, LedgerCacheSize, JournalSize, SyncStrategy) ->
|
||||||
book_start([{root_path, RootPath},
|
book_start([{root_path, RootPath},
|
||||||
{cache_size, LedgerCacheSize},
|
{cache_size, LedgerCacheSize},
|
||||||
{max_journalsize, JournalSize}]).
|
{max_journalsize, JournalSize},
|
||||||
|
{sync_strategy, SyncStrategy}]).
|
||||||
|
|
||||||
book_start(Opts) ->
|
book_start(Opts) ->
|
||||||
gen_server:start(?MODULE, [Opts], []).
|
gen_server:start(?MODULE, [Opts], []).
|
||||||
|
@ -661,7 +662,7 @@ snapshot_store(State, SnapType) ->
|
||||||
|
|
||||||
set_options(Opts) ->
|
set_options(Opts) ->
|
||||||
MaxJournalSize = get_opt(max_journalsize, Opts, 10000000000),
|
MaxJournalSize = get_opt(max_journalsize, Opts, 10000000000),
|
||||||
|
SyncStrat = get_opt(sync_strategy, Opts, sync),
|
||||||
WRP = get_opt(waste_retention_period, Opts),
|
WRP = get_opt(waste_retention_period, Opts),
|
||||||
|
|
||||||
AltStrategy = get_opt(reload_strategy, Opts, []),
|
AltStrategy = get_opt(reload_strategy, Opts, []),
|
||||||
|
@ -680,7 +681,8 @@ set_options(Opts) ->
|
||||||
max_run_length = get_opt(max_run_length, Opts),
|
max_run_length = get_opt(max_run_length, Opts),
|
||||||
waste_retention_period = WRP,
|
waste_retention_period = WRP,
|
||||||
cdb_options = #cdb_options{max_size=MaxJournalSize,
|
cdb_options = #cdb_options{max_size=MaxJournalSize,
|
||||||
binary_mode=true}},
|
binary_mode=true,
|
||||||
|
sync_strategy=SyncStrat}},
|
||||||
#penciller_options{root_path = LedgerFP,
|
#penciller_options{root_path = LedgerFP,
|
||||||
max_inmemory_tablesize = PCLL0CacheSize}}.
|
max_inmemory_tablesize = PCLL0CacheSize}}.
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,8 @@
|
||||||
delete_point = 0 :: integer(),
|
delete_point = 0 :: integer(),
|
||||||
inker :: pid(),
|
inker :: pid(),
|
||||||
deferred_delete = false :: boolean(),
|
deferred_delete = false :: boolean(),
|
||||||
waste_path :: string()}).
|
waste_path :: string(),
|
||||||
|
sync_strategy = none}).
|
||||||
|
|
||||||
|
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
|
@ -222,12 +223,14 @@ init([Opts]) ->
|
||||||
starting,
|
starting,
|
||||||
#state{max_size=MaxSize,
|
#state{max_size=MaxSize,
|
||||||
binary_mode=Opts#cdb_options.binary_mode,
|
binary_mode=Opts#cdb_options.binary_mode,
|
||||||
waste_path=Opts#cdb_options.waste_path}}.
|
waste_path=Opts#cdb_options.waste_path,
|
||||||
|
sync_strategy=Opts#cdb_options.sync_strategy}}.
|
||||||
|
|
||||||
starting({open_writer, Filename}, _From, State) ->
|
starting({open_writer, Filename}, _From, State) ->
|
||||||
leveled_log:log("CDB01", [Filename]),
|
leveled_log:log("CDB01", [Filename]),
|
||||||
{LastPosition, HashTree, LastKey} = open_active_file(Filename),
|
{LastPosition, HashTree, LastKey} = open_active_file(Filename),
|
||||||
{ok, Handle} = file:open(Filename, [sync | ?WRITE_OPS]),
|
WriteOps = set_writeops(State#state.sync_strategy),
|
||||||
|
{ok, Handle} = file:open(Filename, WriteOps),
|
||||||
{reply, ok, writer, State#state{handle=Handle,
|
{reply, ok, writer, State#state{handle=Handle,
|
||||||
last_position=LastPosition,
|
last_position=LastPosition,
|
||||||
last_key=LastKey,
|
last_key=LastKey,
|
||||||
|
@ -520,6 +523,23 @@ code_change(_OldVsn, StateName, State, _Extra) ->
|
||||||
%%% Internal functions
|
%%% Internal functions
|
||||||
%%%============================================================================
|
%%%============================================================================
|
||||||
|
|
||||||
|
%% Assumption is that sync should be used - it is a transaction log.
|
||||||
|
%%
|
||||||
|
%% When running the Riak-specific version on Erlang 16, it sets the sync flag
|
||||||
|
%% using the o_sync keyword (as it is in posix). If using a non-Basho OTP 16
|
||||||
|
%% sync is not possible so none will need to be passed. This is not
|
||||||
|
%% recommended, but is allowed here to make it simpler to test against
|
||||||
|
%% off-the-shelf OTP 16
|
||||||
|
set_writeops(SyncStrategy) ->
|
||||||
|
case SyncStrategy of
|
||||||
|
sync ->
|
||||||
|
[sync | ?WRITE_OPS];
|
||||||
|
riak_sync ->
|
||||||
|
[o_sync | ?WRITE_OPS];
|
||||||
|
none ->
|
||||||
|
?WRITE_OPS
|
||||||
|
end.
|
||||||
|
|
||||||
|
|
||||||
%% from_dict(FileName,ListOfKeyValueTuples)
|
%% from_dict(FileName,ListOfKeyValueTuples)
|
||||||
%% Given a filename and a dictionary, create a cdb
|
%% Given a filename and a dictionary, create a cdb
|
||||||
|
@ -1867,6 +1887,9 @@ crc_corrupt_writer_test() ->
|
||||||
?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")),
|
?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")),
|
||||||
ok = cdb_close(P2).
|
ok = cdb_close(P2).
|
||||||
|
|
||||||
|
riak_writeops_test() ->
|
||||||
|
?assertMatch([o_sync, binary, raw, read, write], set_writeops(riak_sync)).
|
||||||
|
|
||||||
nonsense_coverage_test() ->
|
nonsense_coverage_test() ->
|
||||||
{ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []),
|
{ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []),
|
||||||
ok = gen_fsm:send_all_state_event(Pid, nonsense),
|
ok = gen_fsm:send_all_state_event(Pid, nonsense),
|
||||||
|
|
|
@ -24,7 +24,8 @@ all() -> [
|
||||||
|
|
||||||
simple_put_fetch_head_delete(_Config) ->
|
simple_put_fetch_head_delete(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -32,7 +33,8 @@ simple_put_fetch_head_delete(_Config) ->
|
||||||
testutil:check_formissingobject(Bookie1, "Bucket1", "Key2"),
|
testutil:check_formissingobject(Bookie1, "Bucket1", "Key2"),
|
||||||
ok = leveled_bookie:book_close(Bookie1),
|
ok = leveled_bookie:book_close(Bookie1),
|
||||||
StartOpts2 = [{root_path, RootPath},
|
StartOpts2 = [{root_path, RootPath},
|
||||||
{max_journalsize, 3000000}],
|
{max_journalsize, 3000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
||||||
testutil:check_forobject(Bookie2, TestObject),
|
testutil:check_forobject(Bookie2, TestObject),
|
||||||
ObjList1 = testutil:generate_objects(5000, 2),
|
ObjList1 = testutil:generate_objects(5000, 2),
|
||||||
|
@ -66,7 +68,9 @@ simple_put_fetch_head_delete(_Config) ->
|
||||||
|
|
||||||
many_put_fetch_head(_Config) ->
|
many_put_fetch_head(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}, {max_pencillercachesize, 16000}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{max_pencillercachesize, 16000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -74,7 +78,8 @@ many_put_fetch_head(_Config) ->
|
||||||
ok = leveled_bookie:book_close(Bookie1),
|
ok = leveled_bookie:book_close(Bookie1),
|
||||||
StartOpts2 = [{root_path, RootPath},
|
StartOpts2 = [{root_path, RootPath},
|
||||||
{max_journalsize, 1000000000},
|
{max_journalsize, 1000000000},
|
||||||
{max_pencillercachesize, 32000}],
|
{max_pencillercachesize, 32000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
||||||
testutil:check_forobject(Bookie2, TestObject),
|
testutil:check_forobject(Bookie2, TestObject),
|
||||||
GenList = [2, 20002, 40002, 60002, 80002,
|
GenList = [2, 20002, 40002, 60002, 80002,
|
||||||
|
@ -103,7 +108,8 @@ journal_compaction(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath},
|
StartOpts1 = [{root_path, RootPath},
|
||||||
{max_journalsize, 10000000},
|
{max_journalsize, 10000000},
|
||||||
{max_run_length, 1}],
|
{max_run_length, 1},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
ok = leveled_bookie:book_compactjournal(Bookie1, 30000),
|
ok = leveled_bookie:book_compactjournal(Bookie1, 30000),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
|
@ -193,7 +199,8 @@ journal_compaction(_Config) ->
|
||||||
StartOpts2 = [{root_path, RootPath},
|
StartOpts2 = [{root_path, RootPath},
|
||||||
{max_journalsize, 10000000},
|
{max_journalsize, 10000000},
|
||||||
{max_run_length, 1},
|
{max_run_length, 1},
|
||||||
{waste_retention_period, 1}],
|
{waste_retention_period, 1},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie3} = leveled_bookie:book_start(StartOpts2),
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts2),
|
||||||
ok = leveled_bookie:book_compactjournal(Bookie3, 30000),
|
ok = leveled_bookie:book_compactjournal(Bookie3, 30000),
|
||||||
testutil:wait_for_compaction(Bookie3),
|
testutil:wait_for_compaction(Bookie3),
|
||||||
|
@ -208,7 +215,9 @@ journal_compaction(_Config) ->
|
||||||
|
|
||||||
fetchput_snapshot(_Config) ->
|
fetchput_snapshot(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}, {max_journalsize, 30000000}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{max_journalsize, 30000000},
|
||||||
|
{sync_strategy, none}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -309,7 +318,9 @@ load_and_count(_Config) ->
|
||||||
% Use artificially small files, and the load keys, counting they're all
|
% Use artificially small files, and the load keys, counting they're all
|
||||||
% present
|
% present
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}, {max_journalsize, 50000000}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{max_journalsize, 50000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -392,7 +403,9 @@ load_and_count(_Config) ->
|
||||||
|
|
||||||
load_and_count_withdelete(_Config) ->
|
load_and_count_withdelete(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}, {max_journalsize, 50000000}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{max_journalsize, 50000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -448,7 +461,9 @@ load_and_count_withdelete(_Config) ->
|
||||||
|
|
||||||
space_clear_ondelete(_Config) ->
|
space_clear_ondelete(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath}, {max_journalsize, 20000000}],
|
StartOpts1 = [{root_path, RootPath},
|
||||||
|
{max_journalsize, 20000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Book1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Book1} = leveled_bookie:book_start(StartOpts1),
|
||||||
G2 = fun testutil:generate_compressibleobjects/2,
|
G2 = fun testutil:generate_compressibleobjects/2,
|
||||||
testutil:load_objects(20000,
|
testutil:load_objects(20000,
|
||||||
|
|
|
@ -20,7 +20,8 @@ all() -> [
|
||||||
small_load_with2i(_Config) ->
|
small_load_with2i(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath},
|
StartOpts1 = [{root_path, RootPath},
|
||||||
{max_journalsize, 5000000}],
|
{max_journalsize, 5000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
% low journal size to make sure > 1 created
|
% low journal size to make sure > 1 created
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
|
@ -128,7 +129,10 @@ small_load_with2i(_Config) ->
|
||||||
|
|
||||||
query_count(_Config) ->
|
query_count(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
{ok, Book1} = leveled_bookie:book_start(RootPath, 2000, 50000000),
|
{ok, Book1} = leveled_bookie:book_start(RootPath,
|
||||||
|
2000,
|
||||||
|
50000000,
|
||||||
|
testutil:sync_strategy()),
|
||||||
BucketBin = list_to_binary("Bucket"),
|
BucketBin = list_to_binary("Bucket"),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(BucketBin,
|
{TestObject, TestSpec} = testutil:generate_testobject(BucketBin,
|
||||||
"Key1",
|
"Key1",
|
||||||
|
@ -177,7 +181,10 @@ query_count(_Config) ->
|
||||||
Book1,
|
Book1,
|
||||||
?KEY_ONLY),
|
?KEY_ONLY),
|
||||||
ok = leveled_bookie:book_close(Book1),
|
ok = leveled_bookie:book_close(Book1),
|
||||||
{ok, Book2} = leveled_bookie:book_start(RootPath, 1000, 50000000),
|
{ok, Book2} = leveled_bookie:book_start(RootPath,
|
||||||
|
1000,
|
||||||
|
50000000,
|
||||||
|
testutil:sync_strategy()),
|
||||||
Index1Count = count_termsonindex(BucketBin,
|
Index1Count = count_termsonindex(BucketBin,
|
||||||
"idx1_bin",
|
"idx1_bin",
|
||||||
Book2,
|
Book2,
|
||||||
|
@ -288,7 +295,10 @@ query_count(_Config) ->
|
||||||
end,
|
end,
|
||||||
R9),
|
R9),
|
||||||
ok = leveled_bookie:book_close(Book2),
|
ok = leveled_bookie:book_close(Book2),
|
||||||
{ok, Book3} = leveled_bookie:book_start(RootPath, 2000, 50000000),
|
{ok, Book3} = leveled_bookie:book_start(RootPath,
|
||||||
|
2000,
|
||||||
|
50000000,
|
||||||
|
testutil:sync_strategy()),
|
||||||
lists:foreach(fun({IdxF, IdxT, X}) ->
|
lists:foreach(fun({IdxF, IdxT, X}) ->
|
||||||
Q = {index_query,
|
Q = {index_query,
|
||||||
BucketBin,
|
BucketBin,
|
||||||
|
@ -305,7 +315,10 @@ query_count(_Config) ->
|
||||||
R9),
|
R9),
|
||||||
ok = testutil:book_riakput(Book3, Obj9, Spc9),
|
ok = testutil:book_riakput(Book3, Obj9, Spc9),
|
||||||
ok = leveled_bookie:book_close(Book3),
|
ok = leveled_bookie:book_close(Book3),
|
||||||
{ok, Book4} = leveled_bookie:book_start(RootPath, 2000, 50000000),
|
{ok, Book4} = leveled_bookie:book_start(RootPath,
|
||||||
|
2000,
|
||||||
|
50000000,
|
||||||
|
testutil:sync_strategy()),
|
||||||
lists:foreach(fun({IdxF, IdxT, X}) ->
|
lists:foreach(fun({IdxF, IdxT, X}) ->
|
||||||
Q = {index_query,
|
Q = {index_query,
|
||||||
BucketBin,
|
BucketBin,
|
||||||
|
@ -365,7 +378,10 @@ query_count(_Config) ->
|
||||||
|
|
||||||
ok = leveled_bookie:book_close(Book4),
|
ok = leveled_bookie:book_close(Book4),
|
||||||
|
|
||||||
{ok, Book5} = leveled_bookie:book_start(RootPath, 2000, 50000000),
|
{ok, Book5} = leveled_bookie:book_start(RootPath,
|
||||||
|
2000,
|
||||||
|
50000000,
|
||||||
|
testutil:sync_strategy()),
|
||||||
{async, BLF3} = leveled_bookie:book_returnfolder(Book5, BucketListQuery),
|
{async, BLF3} = leveled_bookie:book_returnfolder(Book5, BucketListQuery),
|
||||||
SW_QC = os:timestamp(),
|
SW_QC = os:timestamp(),
|
||||||
BucketSet3 = BLF3(),
|
BucketSet3 = BLF3(),
|
||||||
|
|
|
@ -20,10 +20,12 @@ retain_strategy(_Config) ->
|
||||||
BookOpts = [{root_path, RootPath},
|
BookOpts = [{root_path, RootPath},
|
||||||
{cache_size, 1000},
|
{cache_size, 1000},
|
||||||
{max_journalsize, 5000000},
|
{max_journalsize, 5000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()},
|
||||||
{reload_strategy, [{?RIAK_TAG, retain}]}],
|
{reload_strategy, [{?RIAK_TAG, retain}]}],
|
||||||
BookOptsAlt = [{root_path, RootPath},
|
BookOptsAlt = [{root_path, RootPath},
|
||||||
{cache_size, 1000},
|
{cache_size, 1000},
|
||||||
{max_journalsize, 100000},
|
{max_journalsize, 100000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()},
|
||||||
{reload_strategy, [{?RIAK_TAG, retain}]},
|
{reload_strategy, [{?RIAK_TAG, retain}]},
|
||||||
{max_run_length, 8}],
|
{max_run_length, 8}],
|
||||||
{ok, Spcl3, LastV3} = rotating_object_check(BookOpts, "Bucket3", 800),
|
{ok, Spcl3, LastV3} = rotating_object_check(BookOpts, "Bucket3", 800),
|
||||||
|
@ -47,6 +49,7 @@ recovr_strategy(_Config) ->
|
||||||
BookOpts = [{root_path, RootPath},
|
BookOpts = [{root_path, RootPath},
|
||||||
{cache_size, 1000},
|
{cache_size, 1000},
|
||||||
{max_journalsize, 5000000},
|
{max_journalsize, 5000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()},
|
||||||
{reload_strategy, [{?RIAK_TAG, recovr}]}],
|
{reload_strategy, [{?RIAK_TAG, recovr}]}],
|
||||||
|
|
||||||
R6 = rotating_object_check(BookOpts, "Bucket6", 6400),
|
R6 = rotating_object_check(BookOpts, "Bucket6", 6400),
|
||||||
|
@ -90,7 +93,8 @@ recovr_strategy(_Config) ->
|
||||||
aae_bustedjournal(_Config) ->
|
aae_bustedjournal(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts = [{root_path, RootPath},
|
StartOpts = [{root_path, RootPath},
|
||||||
{max_journalsize, 20000000}],
|
{max_journalsize, 20000000},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
@ -243,7 +247,8 @@ journal_compaction_bustedjournal(_Config) ->
|
||||||
RootPath = testutil:reset_filestructure(),
|
RootPath = testutil:reset_filestructure(),
|
||||||
StartOpts1 = [{root_path, RootPath},
|
StartOpts1 = [{root_path, RootPath},
|
||||||
{max_journalsize, 10000000},
|
{max_journalsize, 10000000},
|
||||||
{max_run_length, 10}],
|
{max_run_length, 10},
|
||||||
|
{sync_strategy, testutil:sync_strategy()}],
|
||||||
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
||||||
{TestObject, TestSpec} = testutil:generate_testobject(),
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
||||||
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
||||||
|
|
|
@ -41,13 +41,26 @@
|
||||||
find_journals/1,
|
find_journals/1,
|
||||||
riak_hash/1,
|
riak_hash/1,
|
||||||
wait_for_compaction/1,
|
wait_for_compaction/1,
|
||||||
foldkeysfun/3]).
|
foldkeysfun/3,
|
||||||
|
sync_strategy/0]).
|
||||||
|
|
||||||
-define(RETURN_TERMS, {true, undefined}).
|
-define(RETURN_TERMS, {true, undefined}).
|
||||||
-define(SLOWOFFER_DELAY, 5).
|
-define(SLOWOFFER_DELAY, 5).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
sync_strategy() ->
|
||||||
|
case erlang:system_info(otp_release) of
|
||||||
|
"17" ->
|
||||||
|
sync;
|
||||||
|
"18" ->
|
||||||
|
sync;
|
||||||
|
"19" ->
|
||||||
|
sync;
|
||||||
|
"16" ->
|
||||||
|
none
|
||||||
|
end.
|
||||||
|
|
||||||
book_riakput(Pid, RiakObject, IndexSpecs) ->
|
book_riakput(Pid, RiakObject, IndexSpecs) ->
|
||||||
{Bucket, Key} = leveled_codec:riakto_keydetails(RiakObject),
|
{Bucket, Key} = leveled_codec:riakto_keydetails(RiakObject),
|
||||||
leveled_bookie:book_put(Pid, Bucket, Key, RiakObject, IndexSpecs, ?RIAK_TAG).
|
leveled_bookie:book_put(Pid, Bucket, Key, RiakObject, IndexSpecs, ?RIAK_TAG).
|
||||||
|
@ -431,7 +444,8 @@ put_altered_indexed_objects(Book, Bucket, KSpecL, RemoveOld2i) ->
|
||||||
rotating_object_check(RootPath, B, NumberOfObjects) ->
|
rotating_object_check(RootPath, B, NumberOfObjects) ->
|
||||||
BookOpts = [{root_path, RootPath},
|
BookOpts = [{root_path, RootPath},
|
||||||
{cache_size, 1000},
|
{cache_size, 1000},
|
||||||
{max_journalsize, 5000000}],
|
{max_journalsize, 5000000},
|
||||||
|
{sync_strategy, sync_strategy()}],
|
||||||
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
||||||
{KSpcL1, V1} = testutil:put_indexed_objects(Book1, B, NumberOfObjects),
|
{KSpcL1, V1} = testutil:put_indexed_objects(Book1, B, NumberOfObjects),
|
||||||
ok = testutil:check_indexed_objects(Book1, B, KSpcL1, V1),
|
ok = testutil:check_indexed_objects(Book1, B, KSpcL1, V1),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue