diff --git a/src/leveled_cdb.erl b/src/leveled_cdb.erl index f67583b..791cbcf 100644 --- a/src/leveled_cdb.erl +++ b/src/leveled_cdb.erl @@ -1193,23 +1193,15 @@ saferead_keyvalue(Handle) -> false; {KeyL, ValueL} -> case safe_read_next_key(Handle, KeyL) of - {error, _} -> - false; - eof -> - false; false -> false; Key -> - case file:read(Handle, ValueL) of - eof -> - false; - {ok, Value} -> - case crccheck_value(Value) of - true -> - {Key, Value, KeyL, ValueL}; - false -> - false - end + {ok, Value} = file:read(Handle, ValueL), + case crccheck_value(Value) of + true -> + {Key, Value, KeyL, ValueL}; + false -> + false end end end. @@ -2015,10 +2007,10 @@ generate_sequentialkeys(Count, KVList) -> generate_sequentialkeys(Count - 1, KVList ++ [KV]). get_keys_byposition_manykeys_test_() -> - {timeout, 60, fun get_keys_byposition_manykeys_test_to/0}. + {timeout, 120, fun get_keys_byposition_manykeys_test_to/0}. get_keys_byposition_manykeys_test_to() -> - KeyCount = 1024, + KeyCount = 2048, {ok, P1} = cdb_open_writer("../test/poskeymany.pnd", #cdb_options{binary_mode=false}), KVList = generate_sequentialkeys(KeyCount, []), @@ -2210,6 +2202,47 @@ crc_corrupt_writer_test() -> ?assertMatch({"Key100", "Value100"}, cdb_get(P2, "Key100")), ok = cdb_close(P2). +safe_read_test() -> + Key = <<"Key">>, + Value = <<"Value">>, + CRC = calc_crc(Value), + ValToWrite = <>, + KeyL = byte_size(Key), + ValueL= byte_size(ValToWrite), + + TestFN = "../test/saferead.pnd", + BinToWrite = + <>, + + TestCorruptedWriteFun = + fun(BitNumber, ok) -> + <> = BinToWrite, + BadBit = Bit bxor 1, + AltBin = <>, + file:delete(TestFN), + {ok, Handle} = file:open(TestFN, ?WRITE_OPS), + ok = file:pwrite(Handle, 0, AltBin), + {ok, _} = file:position(Handle, bof), + case saferead_keyvalue(Handle) of + false -> + ok; + {Key, Value, KeyL, ValueL} -> + ok + end + end, + + Check = lists:foldl(TestCorruptedWriteFun, + ok, + lists:seq(1, -1 + 8 * (KeyL + ValueL + 8))), + + ?assertMatch(ok, Check), + file:delete(TestFN). + + nonsense_coverage_test() -> {ok, Pid} = gen_fsm:start(?MODULE, [#cdb_options{}], []), ok = gen_fsm:send_all_state_event(Pid, nonsense), diff --git a/src/leveled_iclerk.erl b/src/leveled_iclerk.erl index 4b808d2..1d1a608 100644 --- a/src/leveled_iclerk.erl +++ b/src/leveled_iclerk.erl @@ -338,7 +338,7 @@ schedule_compaction(CompactionHours, RunsPerDay, CurrentTS) -> %% @doc %% Get a score for a single CDB file in the journal. This will pull out a bunch %% of keys and sizes at random in an efficient way (by scanning the hashtable -%% then just picking the key and siz einformation of disk). +%% then just picking the key and size information of disk). %% %% The score should represent a percentage which is the size of the file by %% comparison to the original file if compaction was to be run. So if a file