Make tests compatible with OTP 16
this required a switch to change the sync strategy based on rebar parameter. However tests could be slow on macbook with OTP16 and sync - so timeouts added in unit tests, and ct tests sync_startegy changed to not sync for OTP16.
This commit is contained in:
parent
605a8b1e73
commit
eba21f49fa
8 changed files with 63 additions and 13 deletions
BIN
.rebar/erlcinfo
Normal file
BIN
.rebar/erlcinfo
Normal file
Binary file not shown.
|
@ -1,7 +1,8 @@
|
|||
{erl_opts, [warnings_as_errors,
|
||||
{platform_define, "18", old_rand},
|
||||
{platform_define, "17", old_rand},
|
||||
{platform_define, "^R", old_rand}]}.
|
||||
{platform_define, "^R", old_rand},
|
||||
{platform_define, "^R", no_sync}]}.
|
||||
|
||||
{profiles,
|
||||
[{eqc, [{deps, [meck, fqc]},
|
||||
|
|
|
@ -1671,7 +1671,10 @@ ttl_test() ->
|
|||
ok = book_close(Bookie2),
|
||||
reset_filestructure().
|
||||
|
||||
hashlist_query_test() ->
|
||||
hashlist_query_test_() ->
|
||||
{timeout, 60, fun hashlist_query_testto/0}.
|
||||
|
||||
hashlist_query_testto() ->
|
||||
RootPath = reset_filestructure(),
|
||||
{ok, Bookie1} = book_start([{root_path, RootPath},
|
||||
{max_journalsize, 1000000},
|
||||
|
@ -1719,7 +1722,11 @@ hashlist_query_test() ->
|
|||
ok = book_close(Bookie2),
|
||||
reset_filestructure().
|
||||
|
||||
hashlist_query_withjournalcheck_test() ->
|
||||
|
||||
hashlist_query_withjournalcheck_test_() ->
|
||||
{timeout, 60, fun hashlist_query_withjournalcheck_testto/0}.
|
||||
|
||||
hashlist_query_withjournalcheck_testto() ->
|
||||
RootPath = reset_filestructure(),
|
||||
{ok, Bookie1} = book_start([{root_path, RootPath},
|
||||
{max_journalsize, 1000000},
|
||||
|
@ -1745,7 +1752,10 @@ hashlist_query_withjournalcheck_test() ->
|
|||
ok = book_close(Bookie1),
|
||||
reset_filestructure().
|
||||
|
||||
foldobjects_vs_hashtree_test() ->
|
||||
foldobjects_vs_hashtree_test_() ->
|
||||
{timeout, 60, fun foldobjects_vs_hashtree_testto/0}.
|
||||
|
||||
foldobjects_vs_hashtree_testto() ->
|
||||
RootPath = reset_filestructure(),
|
||||
{ok, Bookie1} = book_start([{root_path, RootPath},
|
||||
{max_journalsize, 1000000},
|
||||
|
@ -1815,8 +1825,10 @@ foldobjects_vs_hashtree_test() ->
|
|||
ok = book_close(Bookie1),
|
||||
reset_filestructure().
|
||||
|
||||
foldobjects_vs_foldheads_bybucket_test_() ->
|
||||
{timeout, 60, fun foldobjects_vs_foldheads_bybucket_testto/0}.
|
||||
|
||||
foldobjects_vs_foldheads_bybucket_test() ->
|
||||
foldobjects_vs_foldheads_bybucket_testto() ->
|
||||
RootPath = reset_filestructure(),
|
||||
{ok, Bookie1} = book_start([{root_path, RootPath},
|
||||
{max_journalsize, 1000000},
|
||||
|
|
|
@ -370,10 +370,11 @@ init([Opts]) ->
|
|||
starting({open_writer, Filename}, _From, State) ->
|
||||
leveled_log:log("CDB01", [Filename]),
|
||||
{LastPosition, HashTree, LastKey} = open_active_file(Filename),
|
||||
WriteOps = set_writeops(State#state.sync_strategy),
|
||||
{WriteOps, UpdStrategy} = set_writeops(State#state.sync_strategy),
|
||||
leveled_log:log("CDB13", [WriteOps]),
|
||||
{ok, Handle} = file:open(Filename, WriteOps),
|
||||
{reply, ok, writer, State#state{handle=Handle,
|
||||
sync_strategy = UpdStrategy,
|
||||
last_position=LastPosition,
|
||||
last_key=LastKey,
|
||||
filename=Filename,
|
||||
|
@ -714,6 +715,8 @@ code_change(_OldVsn, StateName, State, _Extra) ->
|
|||
%%% Internal functions
|
||||
%%%============================================================================
|
||||
|
||||
|
||||
-spec set_writeops(sync|riak_sync|none) -> {list(), sync|riak_sync|none}.
|
||||
%% Assumption is that sync should be used - it is a transaction log.
|
||||
%%
|
||||
%% However this flag is not supported in OTP 16. Bitcask appears to pass an
|
||||
|
@ -721,16 +724,31 @@ code_change(_OldVsn, StateName, State, _Extra) ->
|
|||
%% bitcask nif fileops).
|
||||
%%
|
||||
%% To get round this will try and datasync on each PUT with riak_sync
|
||||
-ifdef(no_sync).
|
||||
|
||||
set_writeops(SyncStrategy) ->
|
||||
case SyncStrategy of
|
||||
sync ->
|
||||
[sync | ?WRITE_OPS];
|
||||
{?WRITE_OPS, riak_sync};
|
||||
riak_sync ->
|
||||
?WRITE_OPS;
|
||||
{?WRITE_OPS, riak_sync};
|
||||
none ->
|
||||
?WRITE_OPS
|
||||
{?WRITE_OPS, none}
|
||||
end.
|
||||
|
||||
-else.
|
||||
|
||||
set_writeops(SyncStrategy) ->
|
||||
case SyncStrategy of
|
||||
sync ->
|
||||
{[sync | ?WRITE_OPS], sync};
|
||||
riak_sync ->
|
||||
{?WRITE_OPS, riak_sync};
|
||||
none ->
|
||||
{?WRITE_OPS, none}
|
||||
end.
|
||||
|
||||
-endif.
|
||||
|
||||
%% from_dict(FileName,ListOfKeyValueTuples)
|
||||
%% Given a filename and a dictionary, create a cdb
|
||||
|
@ -1956,7 +1974,10 @@ generate_sequentialkeys(Count, KVList) ->
|
|||
KV = {"Key" ++ integer_to_list(Count), "Value" ++ integer_to_list(Count)},
|
||||
generate_sequentialkeys(Count - 1, KVList ++ [KV]).
|
||||
|
||||
get_keys_byposition_manykeys_test() ->
|
||||
get_keys_byposition_manykeys_test_() ->
|
||||
{timeout, 60, fun get_keys_byposition_manykeys_test_to/0}.
|
||||
|
||||
get_keys_byposition_manykeys_test_to() ->
|
||||
KeyCount = 1024,
|
||||
{ok, P1} = cdb_open_writer("../test/poskeymany.pnd",
|
||||
#cdb_options{binary_mode=false}),
|
||||
|
|
|
@ -923,7 +923,10 @@ compare_candidate_test() ->
|
|||
?assertMatch([Candidate1, Candidate2, Candidate3, Candidate4],
|
||||
sort_run([Candidate3, Candidate2, Candidate4, Candidate1])).
|
||||
|
||||
compact_singlefile_totwosmallfiles_test() ->
|
||||
compact_singlefile_totwosmallfiles_test_() ->
|
||||
{timeout, 60, fun compact_singlefile_totwosmallfiles_testto/0}.
|
||||
|
||||
compact_singlefile_totwosmallfiles_testto() ->
|
||||
RP = "../test/journal",
|
||||
CP = "../test/journal/journal_file/post_compact/",
|
||||
ok = filelib:ensure_dir(CP),
|
||||
|
|
|
@ -1335,6 +1335,7 @@ add_missing_hash({K, {SQN, ST, MD}}) ->
|
|||
clean_dir_test() ->
|
||||
% Pointless gesture to test coverage
|
||||
RootPath = "../test/ledger",
|
||||
ok = filelib:ensure_dir(RootPath),
|
||||
?assertMatch(ok, file:write_file(RootPath ++ "/test.bob", "hello")),
|
||||
ok = clean_subdir(RootPath ++ "/test.bob"),
|
||||
ok = file:delete(RootPath ++ "/test.bob").
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
rand_bytes/1
|
||||
]).
|
||||
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
%%%===================================================================
|
||||
%%% New (r19+) rand style functions
|
||||
%%%===================================================================
|
||||
|
@ -45,3 +48,11 @@ rand_bytes(Size) ->
|
|||
crypto:rand_bytes(Size).
|
||||
|
||||
-endif.
|
||||
|
||||
|
||||
-ifdef(TEST).
|
||||
|
||||
rand_test() ->
|
||||
?assertMatch(true, uniform() < 1).
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -137,11 +137,12 @@ sync_strategy() ->
|
|||
sync;
|
||||
"19" ->
|
||||
sync;
|
||||
"16" ->
|
||||
_ ->
|
||||
% running the sync strategy with OTP16 on macbook is
|
||||
% super slow. So revert to no sync
|
||||
none
|
||||
end.
|
||||
|
||||
|
||||
book_riakput(Pid, RiakObject, IndexSpecs) ->
|
||||
leveled_bookie:book_put(Pid,
|
||||
RiakObject#r_object.bucket,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue