Add unit test to hit safe read edge cases

... and they can't be hit.  So remove branches and let them fail if something unexplicable happens
This commit is contained in:
Martin Sumner 2017-11-09 17:12:47 +00:00
parent 69f7e4c12a
commit ae70af5eec
2 changed files with 50 additions and 17 deletions

View file

@ -1193,23 +1193,15 @@ saferead_keyvalue(Handle) ->
false; false;
{KeyL, ValueL} -> {KeyL, ValueL} ->
case safe_read_next_key(Handle, KeyL) of case safe_read_next_key(Handle, KeyL) of
{error, _} ->
false;
eof ->
false;
false -> false ->
false; false;
Key -> Key ->
case file:read(Handle, ValueL) of {ok, Value} = file:read(Handle, ValueL),
eof -> case crccheck_value(Value) of
false; true ->
{ok, Value} -> {Key, Value, KeyL, ValueL};
case crccheck_value(Value) of false ->
true -> false
{Key, Value, KeyL, ValueL};
false ->
false
end
end end
end end
end. end.
@ -2015,10 +2007,10 @@ generate_sequentialkeys(Count, KVList) ->
generate_sequentialkeys(Count - 1, KVList ++ [KV]). generate_sequentialkeys(Count - 1, KVList ++ [KV]).
get_keys_byposition_manykeys_test_() -> get_keys_byposition_manykeys_test_() ->
{timeout, 60, fun get_keys_byposition_manykeys_test_to/0}. {timeout, 120, fun get_keys_byposition_manykeys_test_to/0}.
get_keys_byposition_manykeys_test_to() -> get_keys_byposition_manykeys_test_to() ->
KeyCount = 1024, KeyCount = 2048,
{ok, P1} = cdb_open_writer("../test/poskeymany.pnd", {ok, P1} = cdb_open_writer("../test/poskeymany.pnd",
#cdb_options{binary_mode=false}), #cdb_options{binary_mode=false}),
KVList = generate_sequentialkeys(KeyCount, []), KVList = generate_sequentialkeys(KeyCount, []),
@ -2210,6 +2202,47 @@ crc_corrupt_writer_test() ->
?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")), ?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")),
ok = cdb_close(P2). ok = cdb_close(P2).
safe_read_test() ->
Key = <<"Key">>,
Value = <<"Value">>,
CRC = calc_crc(Value),
ValToWrite = <<Value/binary, CRC:32/integer>>,
KeyL = byte_size(Key),
ValueL= byte_size(ValToWrite),
TestFN = "../test/saferead.pnd",
BinToWrite =
<<KeyL:32/integer, ValueL:32/integer, Key/binary, ValToWrite/binary>>,
TestCorruptedWriteFun =
fun(BitNumber, ok) ->
<<PreBin:BitNumber/bitstring,
Bit:1/integer,
PostBin/bitstring>> = BinToWrite,
BadBit = Bit bxor 1,
AltBin = <<PreBin:BitNumber/bitstring,
BadBit:1/integer,
PostBin/bitstring>>,
file:delete(TestFN),
{ok, Handle} = file:open(TestFN, ?WRITE_OPS),
ok = file:pwrite(Handle, 0, AltBin),
{ok, _} = file:position(Handle, bof),
case saferead_keyvalue(Handle) of
false ->
ok;
{Key, Value, KeyL, ValueL} ->
ok
end
end,
Check = lists:foldl(TestCorruptedWriteFun,
ok,
lists:seq(1, -1 + 8 * (KeyL + ValueL + 8))),
?assertMatch(ok, Check),
file:delete(TestFN).
nonsense_coverage_test() -> nonsense_coverage_test() ->
{ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []), {ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []),
ok = gen_fsm:send_all_state_event(Pid, nonsense), ok = gen_fsm:send_all_state_event(Pid, nonsense),

View file

@ -338,7 +338,7 @@ schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) ->
%% @doc %% @doc
%% Get a score for a single CDB file in the journal. This will pull out a bunch %% Get a score for a single CDB file in the journal. This will pull out a bunch
%% of keys and sizes at random in an efficient way (by scanning the hashtable %% of keys and sizes at random in an efficient way (by scanning the hashtable
%% then just picking the key and siz einformation of disk). %% then just picking the key and size information of disk).
%% %%
%% The score should represent a percentage which is the size of the file by %% The score should represent a percentage which is the size of the file by
%% comparison to the original file if compaction was to be run. So if a file %% comparison to the original file if compaction was to be run. So if a file