Mas i389 rebuildledger (#390)
* Protect penciller from empty ledger cache updates which may occur when loading the ledger from the journal, after the ledger has been cleared. * Score caching and randomisation The test allkeydelta_journal_multicompact can occasionally fail when a compaction doesn't happen, but then does the next loop. Suspect this is as a result of score caching, randomisation of key grabs for scoring, plus jitter on size boundaries. Modified test for predictability. Plus formatting changes * Avoid small batches Avoid small batches due to large SQN gaps * Rationalise tests Two tests overlaps with the new, much broader, replace_everything/1 test. Ported over any remaining checks of interest and dropped two tests.
This commit is contained in:
parent
a033e280e6
commit
a01c74f268
9 changed files with 358 additions and 320 deletions
|
@ -5,7 +5,6 @@
|
|||
-export([
|
||||
recovery_with_samekeyupdates/1,
|
||||
same_key_rotation_withindexes/1,
|
||||
hot_backup_simple/1,
|
||||
hot_backup_changes/1,
|
||||
retain_strategy/1,
|
||||
recalc_strategy/1,
|
||||
|
@ -16,15 +15,14 @@
|
|||
aae_bustedjournal/1,
|
||||
journal_compaction_bustedjournal/1,
|
||||
close_duringcompaction/1,
|
||||
allkeydelta_journal_multicompact/1,
|
||||
recompact_keydeltas/1,
|
||||
simple_cachescoring/1
|
||||
simple_cachescoring/1,
|
||||
replace_everything/1
|
||||
]).
|
||||
|
||||
all() -> [
|
||||
recovery_with_samekeyupdates,
|
||||
same_key_rotation_withindexes,
|
||||
hot_backup_simple,
|
||||
hot_backup_changes,
|
||||
retain_strategy,
|
||||
recalc_strategy,
|
||||
|
@ -34,13 +32,141 @@ all() -> [
|
|||
aae_bustedjournal,
|
||||
journal_compaction_bustedjournal,
|
||||
close_duringcompaction,
|
||||
allkeydelta_journal_multicompact,
|
||||
recompact_keydeltas,
|
||||
stdtag_recalc,
|
||||
simple_cachescoring
|
||||
simple_cachescoring,
|
||||
replace_everything
|
||||
].
|
||||
|
||||
|
||||
replace_everything(_Config) ->
|
||||
% See https://github.com/martinsumner/leveled/issues/389
|
||||
% Also replaces previous test which was checking the comapction process
|
||||
% respects the journal object count passed at startup
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
BackupPath = testutil:reset_filestructure("backupRE"),
|
||||
CompPath = filename:join(RootPath, "journal/journal_files/post_compact"),
|
||||
SmallJournalCount = 7000,
|
||||
StdJournalCount = 20000,
|
||||
BookOpts =
|
||||
fun(JournalObjectCount) ->
|
||||
[{root_path, RootPath},
|
||||
{cache_size, 2000},
|
||||
{max_journalobjectcount, JournalObjectCount},
|
||||
{sync_strategy, testutil:sync_strategy()},
|
||||
{reload_strategy, [{?RIAK_TAG, recalc}]}]
|
||||
end,
|
||||
{ok, Book1} = leveled_bookie:book_start(BookOpts(StdJournalCount)),
|
||||
BKT = "ReplaceAll",
|
||||
BKT1 = "ReplaceAll1",
|
||||
BKT2 = "ReplaceAll2",
|
||||
BKT3 = "ReplaceAll3",
|
||||
{KSpcL1, V1} =
|
||||
testutil:put_indexed_objects(Book1, BKT, 50000),
|
||||
ok = testutil:check_indexed_objects(Book1, BKT, KSpcL1, V1),
|
||||
{KSpcL2, V2} =
|
||||
testutil:put_altered_indexed_objects(Book1, BKT, KSpcL1),
|
||||
ok = testutil:check_indexed_objects(Book1, BKT, KSpcL2, V2),
|
||||
compact_and_wait(Book1, 1000),
|
||||
{ok, FileList1} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList1)]),
|
||||
compact_and_wait(Book1, 1000),
|
||||
{ok, FileList2} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList2)]),
|
||||
true = FileList1 == FileList2,
|
||||
{async, BackupFun} = leveled_bookie:book_hotbackup(Book1),
|
||||
ok = BackupFun(BackupPath),
|
||||
|
||||
io:format("Restarting without key store~n"),
|
||||
ok = leveled_bookie:book_close(Book1),
|
||||
|
||||
BookOptsBackup = [{root_path, BackupPath},
|
||||
{cache_size, 2000},
|
||||
{sync_strategy, testutil:sync_strategy()}],
|
||||
SW1 = os:timestamp(),
|
||||
{ok, Book2} = leveled_bookie:book_start(BookOptsBackup),
|
||||
|
||||
io:format(
|
||||
"Opened backup with no ledger in ~w ms~n",
|
||||
[timer:now_diff(os:timestamp(), SW1) div 1000]),
|
||||
ok = testutil:check_indexed_objects(Book2, BKT, KSpcL2, V2),
|
||||
ok = leveled_bookie:book_close(Book2),
|
||||
|
||||
SW2 = os:timestamp(),
|
||||
{ok, Book3} = leveled_bookie:book_start(BookOptsBackup),
|
||||
io:format(
|
||||
"Opened backup with ledger in ~w ms~n",
|
||||
[timer:now_diff(os:timestamp(), SW2) div 1000]),
|
||||
ok = testutil:check_indexed_objects(Book3, BKT, KSpcL2, V2),
|
||||
ok = leveled_bookie:book_destroy(Book3),
|
||||
|
||||
{ok, Book4} = leveled_bookie:book_start(BookOpts(StdJournalCount)),
|
||||
{KSpcL3, V3} = testutil:put_indexed_objects(Book4, BKT1, 1000),
|
||||
{KSpcL4, _V4} = testutil:put_indexed_objects(Book4, BKT2, 50000),
|
||||
{KSpcL5, V5} =
|
||||
testutil:put_altered_indexed_objects(Book4, BKT2, KSpcL4),
|
||||
compact_and_wait(Book4),
|
||||
{async, BackupFun4} = leveled_bookie:book_hotbackup(Book4),
|
||||
ok = BackupFun4(BackupPath),
|
||||
ok = leveled_bookie:book_close(Book4),
|
||||
|
||||
io:format("Restarting without key store~n"),
|
||||
SW5 = os:timestamp(),
|
||||
{ok, Book5} = leveled_bookie:book_start(BookOptsBackup),
|
||||
io:format(
|
||||
"Opened backup with no ledger in ~w ms~n",
|
||||
[timer:now_diff(os:timestamp(), SW5) div 1000]),
|
||||
ok = testutil:check_indexed_objects(Book5, BKT, KSpcL2, V2),
|
||||
ok = testutil:check_indexed_objects(Book5, BKT1, KSpcL3, V3),
|
||||
ok = testutil:check_indexed_objects(Book5, BKT2, KSpcL5, V5),
|
||||
ok = leveled_bookie:book_destroy(Book5),
|
||||
|
||||
io:format("Testing with sparse distribution after update~n"),
|
||||
io:format(
|
||||
"Also use smaller Journal files and confirm value used "
|
||||
"in compaction~n"),
|
||||
{ok, Book6} = leveled_bookie:book_start(BookOpts(SmallJournalCount)),
|
||||
{KSpcL6, _V6} = testutil:put_indexed_objects(Book6, BKT3, 60000),
|
||||
{OSpcL6, RSpcL6} = lists:split(200, lists:ukeysort(1, KSpcL6)),
|
||||
{KSpcL7, V7} =
|
||||
testutil:put_altered_indexed_objects(Book6, BKT3, RSpcL6),
|
||||
{ok, FileList3} = file:list_dir(CompPath),
|
||||
compact_and_wait(Book6),
|
||||
{ok, FileList4} = file:list_dir(CompPath),
|
||||
{OSpcL6A, V7} =
|
||||
testutil:put_altered_indexed_objects(Book6, BKT3, OSpcL6, true, V7),
|
||||
{async, BackupFun6} = leveled_bookie:book_hotbackup(Book6),
|
||||
ok = BackupFun6(BackupPath),
|
||||
ok = leveled_bookie:book_close(Book6),
|
||||
|
||||
io:format("Checking object count in newly compacted journal files~n"),
|
||||
NewlyCompactedFiles = lists:subtract(FileList4, FileList3),
|
||||
true = length(NewlyCompactedFiles) >= 1,
|
||||
CDBFilterFun = fun(_K, _V, _P, Acc, _EF) -> {loop, Acc + 1} end,
|
||||
CheckLengthFun =
|
||||
fun(FN) ->
|
||||
{ok, CF} =
|
||||
leveled_cdb:cdb_open_reader(filename:join(CompPath, FN)),
|
||||
{_LP, TK} =
|
||||
leveled_cdb:cdb_scan(CF, CDBFilterFun, 0, undefined),
|
||||
io:format("File ~s has ~w keys~n", [FN, TK]),
|
||||
true = TK =< SmallJournalCount
|
||||
end,
|
||||
lists:foreach(CheckLengthFun, NewlyCompactedFiles),
|
||||
|
||||
io:format("Restarting without key store~n"),
|
||||
SW7 = os:timestamp(),
|
||||
{ok, Book7} = leveled_bookie:book_start(BookOptsBackup),
|
||||
io:format(
|
||||
"Opened backup with no ledger in ~w ms~n",
|
||||
[timer:now_diff(os:timestamp(), SW7) div 1000]),
|
||||
ok = testutil:check_indexed_objects(Book7, BKT3, KSpcL7 ++ OSpcL6A, V7),
|
||||
ok = leveled_bookie:book_destroy(Book7),
|
||||
|
||||
testutil:reset_filestructure(BackupPath),
|
||||
testutil:reset_filestructure().
|
||||
|
||||
|
||||
close_duringcompaction(_Config) ->
|
||||
% Prompt a compaction, and close immedately - confirm that the close
|
||||
% happens without error.
|
||||
|
@ -203,13 +329,14 @@ same_key_rotation_withindexes(_Config) ->
|
|||
CheckFun =
|
||||
fun(Bookie) ->
|
||||
{async, R} =
|
||||
leveled_bookie:book_indexfold(Bookie,
|
||||
{Bucket, <<>>},
|
||||
{FoldKeysFun, []},
|
||||
{list_to_binary("binary_bin"),
|
||||
<<0:32/integer>>,
|
||||
<<255:32/integer>>},
|
||||
{true, undefined}),
|
||||
leveled_bookie:book_indexfold(
|
||||
Bookie,
|
||||
{Bucket, <<>>},
|
||||
{FoldKeysFun, []},
|
||||
{list_to_binary("binary_bin"),
|
||||
<<0:32/integer>>,
|
||||
<<255:32/integer>>},
|
||||
{true, undefined}),
|
||||
QR = R(),
|
||||
BadAnswers =
|
||||
lists:filter(fun({I, _K}) -> I =/= <<IdxCnt:32/integer>> end, QR),
|
||||
|
@ -229,36 +356,6 @@ same_key_rotation_withindexes(_Config) ->
|
|||
testutil:reset_filestructure().
|
||||
|
||||
|
||||
hot_backup_simple(_Config) ->
|
||||
% The journal may have a hot backup. This allows for an online Bookie
|
||||
% to be sent a message to prepare a backup function, which an asynchronous
|
||||
% worker can then call to generate a backup taken at the point in time
|
||||
% the original message was processsed.
|
||||
%
|
||||
% The basic test is to:
|
||||
% 1 - load a Bookie, take a backup, delete the original path, restore from
|
||||
% that path
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
BackupPath = testutil:reset_filestructure("backup0"),
|
||||
BookOpts = [{root_path, RootPath},
|
||||
{cache_size, 1000},
|
||||
{max_journalsize, 10000000},
|
||||
{sync_strategy, testutil:sync_strategy()}],
|
||||
{ok, Spcl1, LastV1} = rotating_object_check(BookOpts, "Bucket1", 3200),
|
||||
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
||||
{async, BackupFun} = leveled_bookie:book_hotbackup(Book1),
|
||||
ok = BackupFun(BackupPath),
|
||||
ok = leveled_bookie:book_close(Book1),
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
BookOptsBackup = [{root_path, BackupPath},
|
||||
{cache_size, 2000},
|
||||
{max_journalsize, 20000000},
|
||||
{sync_strategy, testutil:sync_strategy()}],
|
||||
{ok, BookBackup} = leveled_bookie:book_start(BookOptsBackup),
|
||||
ok = testutil:check_indexed_objects(BookBackup, "Bucket1", Spcl1, LastV1),
|
||||
ok = leveled_bookie:book_close(BookBackup),
|
||||
BackupPath = testutil:reset_filestructure("backup0").
|
||||
|
||||
hot_backup_changes(_Config) ->
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
BackupPath = testutil:reset_filestructure("backup0"),
|
||||
|
@ -946,95 +1043,6 @@ busted_journal_test(MaxJournalSize, PressMethod, PressPoint, Bust) ->
|
|||
ok = leveled_bookie:book_close(Bookie2),
|
||||
testutil:reset_filestructure(10000).
|
||||
|
||||
|
||||
allkeydelta_journal_multicompact(_Config) ->
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
CompPath = filename:join(RootPath, "journal/journal_files/post_compact"),
|
||||
B = <<"test_bucket">>,
|
||||
StartOptsFun =
|
||||
fun(JOC) ->
|
||||
[{root_path, RootPath},
|
||||
{max_journalobjectcount, JOC},
|
||||
{max_run_length, 4},
|
||||
{singlefile_compactionpercentage, 70.0},
|
||||
{maxrunlength_compactionpercentage, 85.0},
|
||||
{sync_strategy, testutil:sync_strategy()}]
|
||||
end,
|
||||
{ok, Bookie1} = leveled_bookie:book_start(StartOptsFun(14000)),
|
||||
{KSpcL1, _V1} = testutil:put_indexed_objects(Bookie1, B, 24000),
|
||||
{KSpcL2, V2} = testutil:put_altered_indexed_objects(Bookie1,
|
||||
B,
|
||||
KSpcL1,
|
||||
false),
|
||||
compact_and_wait(Bookie1, 0),
|
||||
compact_and_wait(Bookie1, 0),
|
||||
{ok, FileList1} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList1)]),
|
||||
compact_and_wait(Bookie1, 0),
|
||||
{ok, FileList2} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList2)]),
|
||||
true = FileList1 == FileList2,
|
||||
|
||||
ok = testutil:check_indexed_objects(Bookie1,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2,
|
||||
V2),
|
||||
|
||||
ok = leveled_bookie:book_close(Bookie1),
|
||||
leveled_penciller:clean_testdir(RootPath ++ "/ledger"),
|
||||
io:format("Restart without ledger~n"),
|
||||
{ok, Bookie2} = leveled_bookie:book_start(StartOptsFun(13000)),
|
||||
|
||||
ok = testutil:check_indexed_objects(Bookie2,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2,
|
||||
V2),
|
||||
|
||||
{KSpcL3, _V3} = testutil:put_altered_indexed_objects(Bookie2,
|
||||
B,
|
||||
KSpcL2,
|
||||
false),
|
||||
compact_and_wait(Bookie2, 0),
|
||||
{ok, FileList3} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList3)]),
|
||||
|
||||
ok = leveled_bookie:book_close(Bookie2),
|
||||
|
||||
io:format("Restart with smaller journal object count~n"),
|
||||
{ok, Bookie3} = leveled_bookie:book_start(StartOptsFun(7000)),
|
||||
|
||||
{KSpcL4, V4} = testutil:put_altered_indexed_objects(Bookie3,
|
||||
B,
|
||||
KSpcL3,
|
||||
false),
|
||||
|
||||
compact_and_wait(Bookie3, 0),
|
||||
|
||||
ok = testutil:check_indexed_objects(Bookie3,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4,
|
||||
V4),
|
||||
{ok, FileList4} = file:list_dir(CompPath),
|
||||
io:format("Number of files after compaction ~w~n", [length(FileList4)]),
|
||||
|
||||
ok = leveled_bookie:book_close(Bookie3),
|
||||
|
||||
NewlyCompactedFiles = lists:subtract(FileList4, FileList3),
|
||||
true = length(NewlyCompactedFiles) >= 3,
|
||||
CDBFilterFun = fun(_K, _V, _P, Acc, _EF) -> {loop, Acc + 1} end,
|
||||
CheckLengthFun =
|
||||
fun(FN) ->
|
||||
{ok, CF} =
|
||||
leveled_cdb:cdb_open_reader(filename:join(CompPath, FN)),
|
||||
{_LP, TK} =
|
||||
leveled_cdb:cdb_scan(CF, CDBFilterFun, 0, undefined),
|
||||
io:format("File ~s has ~w keys~n", [FN, TK]),
|
||||
true = TK =< 7000
|
||||
end,
|
||||
lists:foreach(CheckLengthFun, NewlyCompactedFiles),
|
||||
|
||||
testutil:reset_filestructure(10000).
|
||||
|
||||
recompact_keydeltas(_Config) ->
|
||||
RootPath = testutil:reset_filestructure(),
|
||||
B = <<"test_bucket">>,
|
||||
|
@ -1049,73 +1057,52 @@ recompact_keydeltas(_Config) ->
|
|||
end,
|
||||
{ok, Bookie1} = leveled_bookie:book_start(StartOptsFun(45000)),
|
||||
{KSpcL1, _V1} = testutil:put_indexed_objects(Bookie1, B, 24000),
|
||||
{KSpcL2, _V2} = testutil:put_altered_indexed_objects(Bookie1,
|
||||
B,
|
||||
KSpcL1,
|
||||
false),
|
||||
{KSpcL2, _V2} =
|
||||
testutil:put_altered_indexed_objects(Bookie1, B, KSpcL1, false),
|
||||
ok = leveled_bookie:book_close(Bookie1),
|
||||
{ok, Bookie2} = leveled_bookie:book_start(StartOptsFun(45000)),
|
||||
compact_and_wait(Bookie2, 0),
|
||||
{KSpcL3, V3} = testutil:put_altered_indexed_objects(Bookie2,
|
||||
B,
|
||||
KSpcL2,
|
||||
false),
|
||||
{KSpcL3, V3} =
|
||||
testutil:put_altered_indexed_objects(Bookie2, B, KSpcL2, false),
|
||||
compact_and_wait(Bookie2, 0),
|
||||
ok = testutil:check_indexed_objects(Bookie2,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3,
|
||||
V3),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Bookie2, B, KSpcL1 ++ KSpcL2 ++ KSpcL3, V3),
|
||||
ok = leveled_bookie:book_close(Bookie2),
|
||||
testutil:reset_filestructure(10000).
|
||||
|
||||
|
||||
|
||||
rotating_object_check(BookOpts, B, NumberOfObjects) ->
|
||||
{ok, Book1} = leveled_bookie:book_start(BookOpts),
|
||||
{KSpcL1, V1} = testutil:put_indexed_objects(Book1, B, NumberOfObjects),
|
||||
ok = testutil:check_indexed_objects(Book1,
|
||||
B,
|
||||
KSpcL1,
|
||||
V1),
|
||||
{KSpcL2, V2} = testutil:put_altered_indexed_objects(Book1,
|
||||
B,
|
||||
KSpcL1,
|
||||
false),
|
||||
ok = testutil:check_indexed_objects(Book1,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2,
|
||||
V2),
|
||||
{KSpcL3, V3} = testutil:put_altered_indexed_objects(Book1,
|
||||
B,
|
||||
KSpcL2,
|
||||
false),
|
||||
ok = testutil:check_indexed_objects(Book1,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3,
|
||||
V3),
|
||||
ok = testutil:check_indexed_objects(Book1, B, KSpcL1, V1),
|
||||
{KSpcL2, V2} =
|
||||
testutil:put_altered_indexed_objects(Book1, B, KSpcL1, false),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Book1, B, KSpcL1 ++ KSpcL2, V2),
|
||||
{KSpcL3, V3} =
|
||||
testutil:put_altered_indexed_objects(Book1, B, KSpcL2, false),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Book1, B, KSpcL1 ++ KSpcL2 ++ KSpcL3, V3),
|
||||
ok = leveled_bookie:book_close(Book1),
|
||||
{ok, Book2} = leveled_bookie:book_start(BookOpts),
|
||||
ok = testutil:check_indexed_objects(Book2,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3,
|
||||
V3),
|
||||
{KSpcL4, V4} = testutil:put_altered_indexed_objects(Book2,
|
||||
B,
|
||||
KSpcL3,
|
||||
false),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Book2, B, KSpcL1 ++ KSpcL2 ++ KSpcL3, V3),
|
||||
{KSpcL4, V4} =
|
||||
testutil:put_altered_indexed_objects(Book2, B, KSpcL3, false),
|
||||
io:format("Bucket complete - checking index before compaction~n"),
|
||||
ok = testutil:check_indexed_objects(Book2,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4,
|
||||
V4),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Book2, B, KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4, V4),
|
||||
|
||||
compact_and_wait(Book2),
|
||||
|
||||
io:format("Checking index following compaction~n"),
|
||||
ok = testutil:check_indexed_objects(Book2,
|
||||
B,
|
||||
KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4,
|
||||
V4),
|
||||
ok =
|
||||
testutil:check_indexed_objects(
|
||||
Book2, B, KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4, V4),
|
||||
|
||||
ok = leveled_bookie:book_close(Book2),
|
||||
{ok, KSpcL1 ++ KSpcL2 ++ KSpcL3 ++ KSpcL4, V4}.
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
put_indexed_objects/3,
|
||||
put_altered_indexed_objects/3,
|
||||
put_altered_indexed_objects/4,
|
||||
put_altered_indexed_objects/5,
|
||||
check_indexed_objects/4,
|
||||
rotating_object_check/3,
|
||||
corrupt_journal/5,
|
||||
|
@ -719,60 +720,55 @@ foldkeysfun_returnbucket(Bucket, Key, Acc) ->
|
|||
check_indexed_objects(Book, B, KSpecL, V) ->
|
||||
% Check all objects match, return what should be the results of an all
|
||||
% index query
|
||||
IdxR = lists:map(fun({K, Spc}) ->
|
||||
{ok, O} = book_riakget(Book, B, K),
|
||||
V = testutil:get_value(O),
|
||||
{add,
|
||||
"idx1_bin",
|
||||
IdxVal} = lists:keyfind(add, 1, Spc),
|
||||
{IdxVal, K} end,
|
||||
KSpecL),
|
||||
IdxR =
|
||||
lists:map(
|
||||
fun({K, Spc}) ->
|
||||
{ok, O} = book_riakget(Book, B, K),
|
||||
V = testutil:get_value(O),
|
||||
{add, "idx1_bin", IdxVal} = lists:keyfind(add, 1, Spc),
|
||||
{IdxVal, K}
|
||||
end,
|
||||
KSpecL),
|
||||
% Check the all index query matches expectations
|
||||
R = leveled_bookie:book_returnfolder(Book,
|
||||
{index_query,
|
||||
B,
|
||||
{fun foldkeysfun/3, []},
|
||||
{"idx1_bin",
|
||||
"0",
|
||||
"|"},
|
||||
?RETURN_TERMS}),
|
||||
R =
|
||||
leveled_bookie:book_returnfolder(
|
||||
Book,
|
||||
{index_query,
|
||||
B,
|
||||
{fun foldkeysfun/3, []},
|
||||
{"idx1_bin", "0", "|"},
|
||||
?RETURN_TERMS}),
|
||||
SW = os:timestamp(),
|
||||
{async, Fldr} = R,
|
||||
QR0 = Fldr(),
|
||||
io:format("Query match found of length ~w in ~w microseconds " ++
|
||||
"expected ~w ~n",
|
||||
[length(QR0),
|
||||
timer:now_diff(os:timestamp(), SW),
|
||||
length(IdxR)]),
|
||||
io:format(
|
||||
"Query match found of length ~w in ~w microseconds "
|
||||
"expected ~w ~n",
|
||||
[length(QR0), timer:now_diff(os:timestamp(), SW), length(IdxR)]),
|
||||
QR = lists:sort(QR0),
|
||||
ER = lists:sort(IdxR),
|
||||
|
||||
ok = if
|
||||
ER == QR ->
|
||||
ok
|
||||
end,
|
||||
ok = if ER == QR -> ok end,
|
||||
ok.
|
||||
|
||||
|
||||
put_indexed_objects(Book, Bucket, Count) ->
|
||||
V = testutil:get_compressiblevalue(),
|
||||
IndexGen = testutil:get_randomindexes_generator(1),
|
||||
V = get_compressiblevalue(),
|
||||
IndexGen = get_randomindexes_generator(1),
|
||||
SW = os:timestamp(),
|
||||
ObjL1 = testutil:generate_objects(Count,
|
||||
uuid,
|
||||
[],
|
||||
V,
|
||||
IndexGen,
|
||||
Bucket),
|
||||
KSpecL = lists:map(fun({_RN, Obj, Spc}) ->
|
||||
book_riakput(Book, Obj, Spc),
|
||||
{testutil:get_key(Obj), Spc}
|
||||
end,
|
||||
ObjL1),
|
||||
io:format("Put of ~w objects with ~w index entries "
|
||||
++
|
||||
"each completed in ~w microseconds~n",
|
||||
[Count, 1, timer:now_diff(os:timestamp(), SW)]),
|
||||
ObjL1 =
|
||||
generate_objects(Count, uuid, [], V, IndexGen, Bucket),
|
||||
KSpecL =
|
||||
lists:map(
|
||||
fun({_RN, Obj, Spc}) ->
|
||||
book_riakput(Book, Obj, Spc),
|
||||
{testutil:get_key(Obj), Spc}
|
||||
end,
|
||||
ObjL1),
|
||||
io:format(
|
||||
"Put of ~w objects with ~w index entries "
|
||||
"each completed in ~w microseconds~n",
|
||||
[Count, 1, timer:now_diff(os:timestamp(), SW)]),
|
||||
{KSpecL, V}.
|
||||
|
||||
|
||||
|
@ -780,8 +776,12 @@ put_altered_indexed_objects(Book, Bucket, KSpecL) ->
|
|||
put_altered_indexed_objects(Book, Bucket, KSpecL, true).
|
||||
|
||||
put_altered_indexed_objects(Book, Bucket, KSpecL, RemoveOld2i) ->
|
||||
IndexGen = get_randomindexes_generator(1),
|
||||
V = get_compressiblevalue(),
|
||||
put_altered_indexed_objects(Book, Bucket, KSpecL, RemoveOld2i, V).
|
||||
|
||||
put_altered_indexed_objects(Book, Bucket, KSpecL, RemoveOld2i, V) ->
|
||||
IndexGen = get_randomindexes_generator(1),
|
||||
|
||||
FindAdditionFun = fun(SpcItem) -> element(1, SpcItem) == add end,
|
||||
MapFun =
|
||||
fun({K, Spc}) ->
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue