2016-10-03 23:34:28 +01:00
|
|
|
-module(basic_SUITE).
|
2024-09-06 11:18:24 +01:00
|
|
|
-include("leveled.hrl").
|
|
|
|
-export([all/0, init_per_suite/1, end_per_suite/1]).
|
2016-10-16 15:41:09 +01:00
|
|
|
-export([simple_put_fetch_head_delete/1,
|
2016-10-05 09:54:53 +01:00
|
|
|
many_put_fetch_head/1,
|
|
|
|
journal_compaction/1,
|
2016-10-13 17:51:47 +01:00
|
|
|
fetchput_snapshot/1,
|
2016-10-16 15:41:09 +01:00
|
|
|
load_and_count/1,
|
2016-10-23 22:45:43 +01:00
|
|
|
load_and_count_withdelete/1,
|
2017-05-23 11:59:44 +01:00
|
|
|
space_clear_ondelete/1,
|
2017-11-06 15:54:58 +00:00
|
|
|
is_empty_test/1,
|
2018-09-25 16:56:24 +01:00
|
|
|
many_put_fetch_switchcompression/1,
|
2018-09-27 11:32:42 +01:00
|
|
|
bigjournal_littlejournal/1,
|
2019-01-27 22:03:55 +00:00
|
|
|
bigsst_littlesst/1,
|
2018-10-03 18:29:20 +01:00
|
|
|
safereaderror_startup/1,
|
2023-12-19 11:56:03 +00:00
|
|
|
remove_journal_test/1,
|
|
|
|
bigpcl_bucketlist/1
|
2016-10-18 01:59:03 +01:00
|
|
|
]).
|
2016-10-03 23:34:28 +01:00
|
|
|
|
2016-10-21 11:38:30 +01:00
|
|
|
all() -> [
|
2019-01-27 23:31:44 +00:00
|
|
|
simple_put_fetch_head_delete,
|
|
|
|
many_put_fetch_head,
|
|
|
|
journal_compaction,
|
|
|
|
fetchput_snapshot,
|
|
|
|
load_and_count,
|
|
|
|
load_and_count_withdelete,
|
|
|
|
space_clear_ondelete,
|
|
|
|
is_empty_test,
|
|
|
|
many_put_fetch_switchcompression,
|
|
|
|
bigjournal_littlejournal,
|
2019-01-27 22:03:55 +00:00
|
|
|
bigsst_littlesst,
|
2018-10-03 18:38:56 +01:00
|
|
|
safereaderror_startup,
|
2023-12-19 11:56:03 +00:00
|
|
|
remove_journal_test,
|
|
|
|
bigpcl_bucketlist
|
2016-10-14 22:58:01 +01:00
|
|
|
].
|
2016-10-03 23:34:28 +01:00
|
|
|
|
2016-10-05 18:28:31 +01:00
|
|
|
|
2024-09-06 11:18:24 +01:00
|
|
|
init_per_suite(Config) ->
|
|
|
|
testutil:init_per_suite([{suite, "basic"}|Config]),
|
|
|
|
Config.
|
|
|
|
|
|
|
|
end_per_suite(Config) ->
|
|
|
|
testutil:end_per_suite(Config).
|
|
|
|
|
2016-10-16 15:41:09 +01:00
|
|
|
simple_put_fetch_head_delete(_Config) ->
|
2018-12-05 00:17:39 +00:00
|
|
|
io:format("simple test with info and no forced logs~n"),
|
|
|
|
simple_test_withlog(info, []),
|
|
|
|
io:format("simple test with error and no forced logs~n"),
|
|
|
|
simple_test_withlog(error, []),
|
|
|
|
io:format("simple test with error and stats logs~n"),
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
simple_test_withlog(
|
|
|
|
error,
|
|
|
|
[b0015, b0016, b0017, b0018, p0032, sst12, cdb19, sst13, i0019]).
|
2018-12-05 00:17:39 +00:00
|
|
|
|
2018-12-11 21:59:57 +00:00
|
|
|
|
2018-12-05 00:17:39 +00:00
|
|
|
simple_test_withlog(LogLevel, ForcedLogs) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-25 17:41:08 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
2018-12-05 00:17:39 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{log_level, LogLevel},
|
|
|
|
{forced_logs, ForcedLogs}],
|
2016-10-03 23:34:28 +01:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
|
|
|
testutil:check_formissingobject(Bookie1, "Bucket1", "Key2"),
|
2016-10-03 23:34:28 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
2016-11-02 12:58:27 +00:00
|
|
|
StartOpts2 = [{root_path, RootPath},
|
2016-11-25 17:41:08 +00:00
|
|
|
{max_journalsize, 3000000},
|
2018-12-05 00:17:39 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{log_level, LogLevel},
|
|
|
|
{forced_logs, ForcedLogs}],
|
2016-10-03 23:34:28 +01:00
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
2018-12-11 21:59:57 +00:00
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
|
|
|
ObjList1 = testutil:generate_objects(5000, 2),
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:riakload(Bookie2, ObjList1),
|
2016-10-03 23:34:28 +01:00
|
|
|
ChkList1 = lists:sublist(lists:sort(ObjList1), 100),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList1),
|
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
|
|
|
testutil:check_formissingobject(Bookie2, "Bucket1", "Key2"),
|
2016-10-14 22:58:01 +01:00
|
|
|
ok = leveled_bookie:book_put(Bookie2, "Bucket1", "Key2", "Value2",
|
|
|
|
[{add, "Index1", "Term1"}]),
|
|
|
|
{ok, "Value2"} = leveled_bookie:book_get(Bookie2, "Bucket1", "Key2"),
|
2023-10-05 10:33:20 +01:00
|
|
|
{ok, {62888926, S, undefined}} =
|
|
|
|
leveled_bookie:book_head(Bookie2, "Bucket1", "Key2"),
|
|
|
|
true = (S == 58) or (S == 60),
|
|
|
|
% After OTP 26 the object is 58 bytes not 60
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_formissingobject(Bookie2, "Bucket1", "Key2"),
|
2016-10-14 22:58:01 +01:00
|
|
|
ok = leveled_bookie:book_put(Bookie2, "Bucket1", "Key2", <<"Value2">>,
|
|
|
|
[{remove, "Index1", "Term1"},
|
|
|
|
{add, "Index1", <<"Term2">>}]),
|
|
|
|
{ok, <<"Value2">>} = leveled_bookie:book_get(Bookie2, "Bucket1", "Key2"),
|
2016-10-05 09:54:53 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
2016-10-14 22:58:01 +01:00
|
|
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
{ok, <<"Value2">>} = leveled_bookie:book_get(Bookie3, "Bucket1", "Key2"),
|
2016-10-16 15:41:09 +01:00
|
|
|
ok = leveled_bookie:book_delete(Bookie3, "Bucket1", "Key2",
|
|
|
|
[{remove, "Index1", "Term1"}]),
|
|
|
|
not_found = leveled_bookie:book_get(Bookie3, "Bucket1", "Key2"),
|
2016-10-30 22:06:44 +00:00
|
|
|
not_found = leveled_bookie:book_head(Bookie3, "Bucket1", "Key2"),
|
2016-10-14 22:58:01 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie3),
|
2016-10-16 15:41:09 +01:00
|
|
|
{ok, Bookie4} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
not_found = leveled_bookie:book_get(Bookie4, "Bucket1", "Key2"),
|
2016-11-21 12:34:40 +00:00
|
|
|
ok = leveled_bookie:book_destroy(Bookie4).
|
2016-10-05 09:54:53 +01:00
|
|
|
|
|
|
|
many_put_fetch_head(_Config) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
StartOpts1 =
|
|
|
|
[{root_path, RootPath},
|
|
|
|
{max_pencillercachesize, 16000},
|
|
|
|
{sync_strategy, riak_sync},
|
|
|
|
{compression_point, on_compact}
|
|
|
|
],
|
2016-10-05 09:54:53 +01:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2019-03-13 16:21:03 +00:00
|
|
|
{ok, 1} = leveled_bookie:book_sqn(Bookie1,
|
|
|
|
testutil:get_bucket(TestObject),
|
|
|
|
testutil:get_key(TestObject),
|
|
|
|
?RIAK_TAG),
|
2016-10-05 09:54:53 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
2016-11-02 12:58:27 +00:00
|
|
|
StartOpts2 = [{root_path, RootPath},
|
2017-11-20 15:31:31 +00:00
|
|
|
{max_journalsize, 50000000},
|
2016-11-25 17:41:08 +00:00
|
|
|
{max_pencillercachesize, 32000},
|
2017-11-06 18:44:08 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{compression_point, on_receipt}],
|
2016-10-05 09:54:53 +01:00
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
2018-12-11 21:59:57 +00:00
|
|
|
ok = leveled_bookie:book_loglevel(Bookie2, error),
|
|
|
|
ok = leveled_bookie:book_addlogs(Bookie2, ["B0015"]),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
2019-03-13 16:21:03 +00:00
|
|
|
{ok, 1} = leveled_bookie:book_sqn(Bookie2,
|
|
|
|
testutil:get_bucket(TestObject),
|
|
|
|
testutil:get_key(TestObject),
|
|
|
|
?RIAK_TAG),
|
2016-10-05 09:54:53 +01:00
|
|
|
GenList = [2, 20002, 40002, 60002, 80002,
|
|
|
|
100002, 120002, 140002, 160002, 180002],
|
2016-10-18 01:59:03 +01:00
|
|
|
CLs = testutil:load_objects(20000, GenList, Bookie2, TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
2018-12-11 21:59:57 +00:00
|
|
|
{error, ["B0015"]} = leveled_bookie:book_logsettings(Bookie2),
|
|
|
|
ok = leveled_bookie:book_removelogs(Bookie2, ["B0015"]),
|
2016-10-05 09:54:53 +01:00
|
|
|
CL1A = lists:nth(1, CLs),
|
|
|
|
ChkListFixed = lists:nth(length(CLs), CLs),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, CL1A),
|
2018-12-11 21:59:57 +00:00
|
|
|
{error, []} = leveled_bookie:book_logsettings(Bookie2),
|
|
|
|
ok = leveled_bookie:book_loglevel(Bookie2, info),
|
2016-10-18 01:59:03 +01:00
|
|
|
ObjList2A = testutil:generate_objects(5000, 2),
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:riakload(Bookie2, ObjList2A),
|
2016-10-05 09:54:53 +01:00
|
|
|
ChkList2A = lists:sublist(lists:sort(ObjList2A), 1000),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList2A),
|
|
|
|
testutil:check_forlist(Bookie2, ChkListFixed),
|
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
|
|
|
testutil:check_forlist(Bookie2, ChkList2A),
|
|
|
|
testutil:check_forlist(Bookie2, ChkListFixed),
|
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
2016-10-05 09:54:53 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
|
|
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts2),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie3, ChkList2A),
|
|
|
|
testutil:check_forobject(Bookie3, TestObject),
|
2019-03-13 16:21:03 +00:00
|
|
|
{ok, 1} = leveled_bookie:book_sqn(Bookie3,
|
|
|
|
testutil:get_bucket(TestObject),
|
|
|
|
testutil:get_key(TestObject),
|
|
|
|
?RIAK_TAG),
|
|
|
|
not_found = leveled_bookie:book_sqn(Bookie3,
|
|
|
|
testutil:get_bucket(TestObject),
|
|
|
|
testutil:get_key(TestObject),
|
|
|
|
?STD_TAG),
|
2019-03-14 00:08:01 +00:00
|
|
|
not_found = leveled_bookie:book_sqn(Bookie3,
|
|
|
|
testutil:get_bucket(TestObject),
|
|
|
|
testutil:get_key(TestObject)),
|
2017-02-26 20:52:40 +00:00
|
|
|
testutil:check_formissingobject(Bookie3, "Bookie1", "MissingKey0123"),
|
2016-11-21 12:34:40 +00:00
|
|
|
ok = leveled_bookie:book_destroy(Bookie3).
|
2016-10-05 09:54:53 +01:00
|
|
|
|
2018-09-25 16:56:24 +01:00
|
|
|
bigjournal_littlejournal(_Config) ->
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 50000000},
|
|
|
|
{max_pencillercachesize, 32000},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{compression_point, on_compact}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
ObjL1 =
|
|
|
|
testutil:generate_objects(100, 1, [],
|
|
|
|
leveled_rand:rand_bytes(10000),
|
|
|
|
fun() -> [] end, <<"B">>),
|
|
|
|
testutil:riakload(Bookie1, ObjL1),
|
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
|
|
|
StartOpts2 = lists:ukeysort(1, [{max_journalsize, 5000}|StartOpts1]),
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
ObjL2 =
|
|
|
|
testutil:generate_objects(10, 1000, [],
|
|
|
|
leveled_rand:rand_bytes(10000),
|
|
|
|
fun() -> [] end, <<"B">>),
|
|
|
|
testutil:riakload(Bookie2, ObjL2),
|
|
|
|
testutil:check_forlist(Bookie2, ObjL1),
|
|
|
|
testutil:check_forlist(Bookie2, ObjL2),
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie2).
|
|
|
|
|
|
|
|
|
2019-01-27 22:03:55 +00:00
|
|
|
bigsst_littlesst(_Config) ->
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 50000000},
|
2019-01-29 13:40:55 +00:00
|
|
|
{cache_size, 500},
|
2019-01-27 22:03:55 +00:00
|
|
|
{max_pencillercachesize, 16000},
|
|
|
|
{max_sstslots, 256},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{compression_point, on_compact}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
ObjL1 =
|
2023-10-05 10:33:20 +01:00
|
|
|
lists:keysort(
|
|
|
|
1,
|
2023-11-07 14:58:43 +00:00
|
|
|
testutil:generate_objects(
|
|
|
|
100000,
|
|
|
|
1,
|
|
|
|
[],
|
|
|
|
leveled_rand:rand_bytes(100),
|
|
|
|
fun() -> [] end,
|
|
|
|
<<"B">>)
|
2023-10-05 10:33:20 +01:00
|
|
|
),
|
2019-01-27 22:03:55 +00:00
|
|
|
testutil:riakload(Bookie1, ObjL1),
|
|
|
|
testutil:check_forlist(Bookie1, ObjL1),
|
2023-11-07 14:58:43 +00:00
|
|
|
timer:sleep(10000), % Wait for delete timeout
|
2019-01-27 22:03:55 +00:00
|
|
|
JFP = RootPath ++ "/ledger/ledger_files/",
|
|
|
|
{ok, FNS1} = file:list_dir(JFP),
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie1),
|
|
|
|
|
|
|
|
StartOpts2 = lists:ukeysort(1, [{max_sstslots, 24}|StartOpts1]),
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
testutil:riakload(Bookie2, ObjL1),
|
|
|
|
testutil:check_forlist(Bookie2, ObjL1),
|
2023-11-07 14:58:43 +00:00
|
|
|
timer:sleep(10000), % Wait for delete timeout
|
2019-01-27 22:03:55 +00:00
|
|
|
{ok, FNS2} = file:list_dir(JFP),
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie2),
|
|
|
|
io:format("Big SST ~w files Little SST ~w files~n",
|
|
|
|
[length(FNS1), length(FNS2)]),
|
2019-01-29 13:40:55 +00:00
|
|
|
true = length(FNS2) >= (2 * length(FNS1)).
|
2019-01-27 22:03:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
journal_compaction(_Config) ->
|
2017-11-08 16:18:48 +00:00
|
|
|
journal_compaction_tester(false, 3600),
|
|
|
|
journal_compaction_tester(false, undefined),
|
|
|
|
journal_compaction_tester(true, 3600).
|
|
|
|
|
|
|
|
journal_compaction_tester(Restart, WRP) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-02 12:58:27 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 10000000},
|
2016-11-25 17:41:08 +00:00
|
|
|
{max_run_length, 1},
|
2017-11-08 16:18:48 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{waste_retention_period, WRP}],
|
|
|
|
{ok, Bookie0} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
ok = leveled_bookie:book_compactjournal(Bookie0, 30000),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2017-11-08 16:18:48 +00:00
|
|
|
ok = testutil:book_riakput(Bookie0, TestObject, TestSpec),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject),
|
2016-11-03 12:31:00 +00:00
|
|
|
ObjList1 = testutil:generate_objects(20000, 2),
|
2017-11-08 16:18:48 +00:00
|
|
|
testutil:riakload(Bookie0, ObjList1),
|
2016-10-30 20:14:11 +00:00
|
|
|
ChkList1 = lists:sublist(lists:sort(ObjList1), 10000),
|
2017-11-08 16:18:48 +00:00
|
|
|
testutil:check_forlist(Bookie0, ChkList1),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject),
|
2017-07-02 22:23:02 +01:00
|
|
|
{B2, K2, V2, Spec2, MD} = {"Bucket2",
|
|
|
|
"Key2",
|
|
|
|
"Value2",
|
2016-10-05 18:28:31 +01:00
|
|
|
[],
|
2017-07-02 22:23:02 +01:00
|
|
|
[{"MDK2", "MDV2"}]},
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject2, TestSpec2} = testutil:generate_testobject(B2, K2,
|
|
|
|
V2, Spec2, MD),
|
2017-11-08 16:18:48 +00:00
|
|
|
ok = testutil:book_riakput(Bookie0, TestObject2, TestSpec2),
|
|
|
|
ok = leveled_bookie:book_compactjournal(Bookie0, 30000),
|
|
|
|
testutil:check_forlist(Bookie0, ChkList1),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject2),
|
|
|
|
testutil:check_forlist(Bookie0, ChkList1),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject),
|
|
|
|
testutil:check_forobject(Bookie0, TestObject2),
|
2016-11-03 12:31:00 +00:00
|
|
|
%% Delete some of the objects
|
|
|
|
ObjListD = testutil:generate_objects(10000, 2),
|
|
|
|
lists:foreach(fun({_R, O, _S}) ->
|
2017-11-08 16:18:48 +00:00
|
|
|
testutil:book_riakdelete(Bookie0,
|
2018-12-11 20:42:00 +00:00
|
|
|
testutil:get_bucket(O),
|
|
|
|
testutil:get_key(O),
|
2016-11-07 10:27:38 +00:00
|
|
|
[])
|
2016-11-03 12:31:00 +00:00
|
|
|
end,
|
|
|
|
ObjListD),
|
|
|
|
|
|
|
|
%% Now replace all the other objects
|
|
|
|
ObjList2 = testutil:generate_objects(40000, 10002),
|
2017-11-08 16:18:48 +00:00
|
|
|
testutil:riakload(Bookie0, ObjList2),
|
|
|
|
|
|
|
|
Bookie1 =
|
|
|
|
case Restart of
|
|
|
|
true ->
|
|
|
|
ok = leveled_bookie:book_close(Bookie0),
|
|
|
|
{ok, RestartedB} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
RestartedB;
|
|
|
|
false ->
|
|
|
|
Bookie0
|
|
|
|
end,
|
2016-10-30 20:14:11 +00:00
|
|
|
|
2016-11-14 19:34:11 +00:00
|
|
|
|
2017-11-09 12:02:03 +00:00
|
|
|
WasteFP = RootPath ++ "/journal/journal_files/waste",
|
|
|
|
% Start snapshot - should stop deletions
|
2017-10-17 20:39:11 +01:00
|
|
|
{ok, PclClone, InkClone} =
|
|
|
|
leveled_bookie:book_snapshot(Bookie1, store, undefined, false),
|
2017-11-09 12:02:03 +00:00
|
|
|
ok = leveled_bookie:book_compactjournal(Bookie1, 30000),
|
|
|
|
testutil:wait_for_compaction(Bookie1),
|
|
|
|
% Wait to cause delete_pending to be blocked by snapshot
|
|
|
|
% timeout on switch to delete_pending is 10s
|
|
|
|
timer:sleep(10100),
|
|
|
|
case WRP of
|
|
|
|
undefined ->
|
|
|
|
ok;
|
|
|
|
_ ->
|
|
|
|
% Check nothing got deleted
|
|
|
|
{ok, CJs} = file:list_dir(WasteFP),
|
|
|
|
true = length(CJs) == 0
|
|
|
|
end,
|
|
|
|
ok = leveled_penciller:pcl_close(PclClone),
|
|
|
|
ok = leveled_inker:ink_close(InkClone),
|
2020-11-27 02:35:27 +00:00
|
|
|
% Snapshot released so deletes should occur at next timeout
|
2017-11-09 12:02:03 +00:00
|
|
|
case WRP of
|
|
|
|
undefined ->
|
|
|
|
timer:sleep(10100); % wait for delete_pending timeout
|
2016-11-14 19:34:11 +00:00
|
|
|
% Wait 2 seconds for files to be deleted
|
2017-11-09 12:02:03 +00:00
|
|
|
_ ->
|
|
|
|
FindDeletedFilesFun =
|
|
|
|
fun(X, Found) ->
|
|
|
|
case Found of
|
|
|
|
true ->
|
|
|
|
Found;
|
|
|
|
false ->
|
|
|
|
{ok, Files} = file:list_dir(WasteFP),
|
|
|
|
if
|
|
|
|
length(Files) > 0 ->
|
|
|
|
io:format("Deleted files found~n"),
|
|
|
|
true;
|
|
|
|
length(Files) == 0 ->
|
|
|
|
timer:sleep(X),
|
|
|
|
false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
lists:foldl(FindDeletedFilesFun,
|
|
|
|
false,
|
|
|
|
[2000,2000,2000,2000,2000,2000])
|
|
|
|
end,
|
2016-11-14 19:34:11 +00:00
|
|
|
{ok, ClearedJournals} = file:list_dir(WasteFP),
|
|
|
|
io:format("~w ClearedJournals found~n", [length(ClearedJournals)]),
|
2017-11-08 16:18:48 +00:00
|
|
|
case is_integer(WRP) of
|
|
|
|
true ->
|
|
|
|
true = length(ClearedJournals) > 0;
|
|
|
|
false ->
|
|
|
|
true = length(ClearedJournals) == 0
|
|
|
|
end,
|
2016-10-30 20:14:11 +00:00
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
ChkList3 = lists:sublist(lists:sort(ObjList2), 500),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie1, ChkList3),
|
2016-11-14 19:34:11 +00:00
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
2017-11-09 12:02:03 +00:00
|
|
|
|
2016-10-03 23:34:28 +01:00
|
|
|
% Restart
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
|
|
|
testutil:check_forlist(Bookie2, ChkList3),
|
2016-11-14 11:17:14 +00:00
|
|
|
|
2016-11-14 19:34:11 +00:00
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
2016-11-14 11:17:14 +00:00
|
|
|
|
|
|
|
StartOpts2 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 10000000},
|
|
|
|
{max_run_length, 1},
|
2016-11-25 17:41:08 +00:00
|
|
|
{waste_retention_period, 1},
|
|
|
|
{sync_strategy, testutil:sync_strategy()}],
|
2016-11-14 11:17:14 +00:00
|
|
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
ok = leveled_bookie:book_compactjournal(Bookie3, 30000),
|
2019-01-24 15:46:17 +00:00
|
|
|
busy = leveled_bookie:book_compactjournal(Bookie3, 30000),
|
2016-11-14 11:17:14 +00:00
|
|
|
testutil:wait_for_compaction(Bookie3),
|
|
|
|
ok = leveled_bookie:book_close(Bookie3),
|
|
|
|
|
|
|
|
{ok, ClearedJournalsPC} = file:list_dir(WasteFP),
|
|
|
|
io:format("~w ClearedJournals found~n", [length(ClearedJournalsPC)]),
|
2017-11-08 16:18:48 +00:00
|
|
|
case is_integer(WRP) of
|
|
|
|
true ->
|
|
|
|
true = length(ClearedJournals) > 0;
|
|
|
|
false ->
|
|
|
|
true = length(ClearedJournals) == 0
|
|
|
|
end,
|
2016-11-14 11:17:14 +00:00
|
|
|
|
2016-10-26 20:39:16 +01:00
|
|
|
testutil:reset_filestructure(10000).
|
2016-10-03 23:34:28 +01:00
|
|
|
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
fetchput_snapshot(_Config) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-25 17:41:08 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 30000000},
|
2024-09-04 09:04:24 +01:00
|
|
|
{cache_size, 2000},
|
|
|
|
{max_pencillercachesize, 16000},
|
2016-11-25 17:41:08 +00:00
|
|
|
{sync_strategy, none}],
|
2016-10-05 09:54:53 +01:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% Load up 5000 objects
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
ObjList1 = testutil:generate_objects(5000, 2),
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:riakload(Bookie1, ObjList1),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% Now take a snapshot - check it has the same objects
|
|
|
|
|
2016-11-02 12:58:27 +00:00
|
|
|
SnapOpts1 = [{snapshot_bookie, Bookie1}],
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, SnapBookie1} = leveled_bookie:book_start(SnapOpts1),
|
2016-10-05 09:54:53 +01:00
|
|
|
ChkList1 = lists:sublist(lists:sort(ObjList1), 100),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie1, ChkList1),
|
|
|
|
testutil:check_forlist(SnapBookie1, ChkList1),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
2023-11-08 09:18:01 +00:00
|
|
|
compare_foldwithsnap(Bookie1, SnapBookie1, ChkList1),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Close the snapshot, check the original store still has the objects
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
ok = leveled_bookie:book_close(SnapBookie1),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie1, ChkList1),
|
2016-10-05 09:54:53 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Closed initial bookies~n"),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% all now closed
|
2016-10-07 10:04:48 +01:00
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Open a new store (to start with the previously loaded data)
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
2016-11-02 12:58:27 +00:00
|
|
|
SnapOpts2 = [{snapshot_bookie, Bookie2}],
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% And take a snapshot of that store
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, SnapBookie2} = leveled_bookie:book_start(SnapOpts2),
|
|
|
|
io:format("Bookies restarted~n"),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% Check both the newly opened store and its snapshot have the data
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList1),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Check active bookie still contains original data~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(SnapBookie2, ChkList1),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Check snapshot still contains original data~n"),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Generate some replacement objects, load them up - check the master
|
|
|
|
% store has the replacement objects, but the snapshot still has the old
|
|
|
|
% objects
|
2016-10-07 10:04:48 +01:00
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
ObjList2 = testutil:generate_objects(5000, 2),
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:riakload(Bookie2, ObjList2),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Replacement objects put~n"),
|
|
|
|
|
|
|
|
ChkList2 = lists:sublist(lists:sort(ObjList2), 100),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList2),
|
|
|
|
testutil:check_forlist(SnapBookie2, ChkList1),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Checked for replacement objects in active bookie" ++
|
|
|
|
", old objects in snapshot~n"),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Check out how many ledger files we now have (should just be 1)
|
|
|
|
|
2016-10-19 00:10:48 +01:00
|
|
|
ok = filelib:ensure_dir(RootPath ++ "/ledger/ledger_files"),
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, FNsA} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% generate some new objects and load them up. Check that the master store
|
|
|
|
% has the new objects, and the snapshot doesn't
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
ObjList3 = testutil:generate_objects(15000, 5002),
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:riakload(Bookie2, ObjList3),
|
2016-10-07 10:04:48 +01:00
|
|
|
ChkList3 = lists:sublist(lists:sort(ObjList3), 100),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList3),
|
|
|
|
testutil:check_formissinglist(SnapBookie2, ChkList3),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% Now loads lots of new objects
|
|
|
|
|
2023-10-03 18:30:40 +01:00
|
|
|
GenList = [20002, 40002, 60002, 80002, 100002, 120002, 140002, 160002],
|
2016-10-18 01:59:03 +01:00
|
|
|
CLs2 = testutil:load_objects(20000, GenList, Bookie2, TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Loaded significant numbers of new objects~n"),
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, lists:nth(length(CLs2), CLs2)),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Checked active bookie has new objects~n"),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Start a second snapshot, which should have the new objects, whilst the
|
|
|
|
% previous snapshot still doesn't
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, SnapBookie3} = leveled_bookie:book_start(SnapOpts2),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(SnapBookie3, lists:nth(length(CLs2), CLs2)),
|
|
|
|
testutil:check_formissinglist(SnapBookie2, ChkList3),
|
|
|
|
testutil:check_formissinglist(SnapBookie2, lists:nth(length(CLs2), CLs2)),
|
2016-10-19 00:10:48 +01:00
|
|
|
testutil:check_forlist(Bookie2, ChkList2),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(SnapBookie3, ChkList2),
|
|
|
|
testutil:check_forlist(SnapBookie2, ChkList1),
|
2016-10-07 10:04:48 +01:00
|
|
|
io:format("Started new snapshot and check for new objects~n"),
|
2023-11-08 09:18:01 +00:00
|
|
|
|
|
|
|
compare_foldwithsnap(Bookie2, SnapBookie3, ChkList3),
|
2016-10-07 10:04:48 +01:00
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% Load yet more objects, these are replacement objects for the last load
|
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
CLs3 = testutil:load_objects(20000, GenList, Bookie2, TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
|
|
|
testutil:check_forlist(Bookie2, lists:nth(length(CLs3), CLs3)),
|
|
|
|
testutil:check_forlist(Bookie2, lists:nth(1, CLs3)),
|
2016-10-21 11:38:30 +01:00
|
|
|
|
|
|
|
io:format("Starting 15s sleep in which snap2 should block deletion~n"),
|
|
|
|
timer:sleep(15000),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% There should be lots of ledger files, as we have replaced the objects
|
|
|
|
% which has created new files, but the old files are still in demand from
|
|
|
|
% the snapshot
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, FNsB} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
|
|
|
ok = leveled_bookie:book_close(SnapBookie2),
|
2016-10-21 11:38:30 +01:00
|
|
|
io:format("Starting 15s sleep as snap2 close should unblock deletion~n"),
|
|
|
|
timer:sleep(15000),
|
|
|
|
io:format("Pause for deletion has ended~n"),
|
|
|
|
|
2018-05-17 18:20:16 +01:00
|
|
|
% So the pause here is to allow for delete pendings to take effect after the
|
|
|
|
% closing of the snapshot
|
|
|
|
|
|
|
|
% Now check that any deletions haven't impacted the availability of data
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, lists:nth(length(CLs3), CLs3)),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
|
|
|
% Close the other snapshot, and pause - after the pause there should be a
|
|
|
|
% reduction in the number of ledger files due to the deletes
|
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
ok = leveled_bookie:book_close(SnapBookie3),
|
2016-10-21 11:38:30 +01:00
|
|
|
io:format("Starting 15s sleep as snap3 close should unblock deletion~n"),
|
|
|
|
timer:sleep(15000),
|
|
|
|
io:format("Pause for deletion has ended~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forlist(Bookie2, lists:nth(length(CLs3), CLs3)),
|
|
|
|
testutil:check_forlist(Bookie2, lists:nth(1, CLs3)),
|
2018-05-17 18:20:16 +01:00
|
|
|
|
2016-10-07 10:04:48 +01:00
|
|
|
{ok, FNsC} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
2018-05-17 18:20:16 +01:00
|
|
|
io:format("FNsA ~w FNsB ~w FNsC ~w~n",
|
|
|
|
[length(FNsA), length(FNsB), length(FNsC)]),
|
2016-10-07 10:04:48 +01:00
|
|
|
true = length(FNsB) > length(FNsA),
|
2018-05-17 18:20:16 +01:00
|
|
|
true = length(FNsB) > length(FNsC),
|
|
|
|
% smaller due to replacements and files deleting
|
|
|
|
% This is dependent on the sleep though (yuk)
|
2016-10-12 17:12:49 +01:00
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
{B1Size, B1Count} = testutil:check_bucket_stats(Bookie2, "Bucket1"),
|
2016-10-12 17:12:49 +01:00
|
|
|
true = B1Size > 0,
|
|
|
|
true = B1Count == 1,
|
2016-10-18 01:59:03 +01:00
|
|
|
{B1Size, B1Count} = testutil:check_bucket_stats(Bookie2, "Bucket1"),
|
|
|
|
{BSize, BCount} = testutil:check_bucket_stats(Bookie2, "Bucket"),
|
2016-10-12 17:12:49 +01:00
|
|
|
true = BSize > 0,
|
2023-10-03 18:30:40 +01:00
|
|
|
true = BCount == 180000,
|
|
|
|
|
|
|
|
io:format("Shutdown with overhanging snapshot~n"),
|
2016-10-12 17:12:49 +01:00
|
|
|
|
2023-10-03 18:30:40 +01:00
|
|
|
{ok, SnpPCL1, SnpJrnl1} =
|
|
|
|
leveled_bookie:book_snapshot(Bookie2, store, undefined, true),
|
|
|
|
{ok, SnpPCL2, SnpJrnl2} =
|
|
|
|
leveled_bookie:book_snapshot(Bookie2, store, undefined, true),
|
|
|
|
|
|
|
|
TestPid = self(),
|
|
|
|
spawn(
|
|
|
|
fun() ->
|
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
|
|
|
TestPid ! ok
|
|
|
|
end),
|
|
|
|
|
|
|
|
timer:sleep(5000),
|
|
|
|
ok = leveled_penciller:pcl_close(SnpPCL1),
|
|
|
|
ok = leveled_inker:ink_close(SnpJrnl1),
|
|
|
|
true = is_process_alive(SnpPCL2),
|
|
|
|
true = is_process_alive(SnpJrnl2),
|
|
|
|
|
|
|
|
io:format("Time for close to complete is 2 * 10s~n"),
|
|
|
|
io:format("Both Inker and Penciller will have snapshot delay~n"),
|
|
|
|
|
|
|
|
receive ok -> ok end,
|
|
|
|
|
|
|
|
false = is_process_alive(SnpPCL2),
|
|
|
|
false = is_process_alive(SnpJrnl2),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:reset_filestructure().
|
2016-10-05 09:54:53 +01:00
|
|
|
|
|
|
|
|
2023-11-08 09:18:01 +00:00
|
|
|
compare_foldwithsnap(Bookie, SnapBookie, ChkList) ->
|
|
|
|
HeadFoldFun = fun(B, K, _Hd, Acc) -> [{B, K}|Acc] end,
|
|
|
|
KeyFoldFun = fun(B, K, Acc) -> [{B, K}|Acc] end,
|
|
|
|
{async, HeadFoldDB} =
|
|
|
|
leveled_bookie:book_headfold(
|
|
|
|
Bookie, ?RIAK_TAG, {HeadFoldFun, []}, true, false, false
|
|
|
|
),
|
|
|
|
{async, HeadFoldSnap} =
|
|
|
|
leveled_bookie:book_headfold(
|
|
|
|
SnapBookie, ?RIAK_TAG, {HeadFoldFun, []}, true, false, false
|
|
|
|
),
|
|
|
|
true = HeadFoldDB() == HeadFoldSnap(),
|
|
|
|
|
|
|
|
testutil:check_forlist(SnapBookie, ChkList),
|
|
|
|
|
|
|
|
{async, KeyFoldSnap} =
|
|
|
|
leveled_bookie:book_keylist(
|
|
|
|
SnapBookie, ?RIAK_TAG, {KeyFoldFun, []}
|
|
|
|
),
|
|
|
|
true = HeadFoldSnap() == KeyFoldSnap().
|
|
|
|
|
|
|
|
|
2016-10-13 17:51:47 +01:00
|
|
|
load_and_count(_Config) ->
|
|
|
|
% Use artificially small files, and the load keys, counting they're all
|
|
|
|
% present
|
2019-01-14 12:27:51 +00:00
|
|
|
load_and_count(50000000, 2500, 28000),
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
load_and_count(200000000, 50, 200000),
|
|
|
|
load_and_count(50000000, 1000, 5000).
|
2019-01-14 12:27:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
load_and_count(JournalSize, BookiesMemSize, PencillerMemSize) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-25 17:41:08 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
2019-01-14 12:27:51 +00:00
|
|
|
{max_journalsize, JournalSize},
|
|
|
|
{cache_size, BookiesMemSize},
|
|
|
|
{max_pencillercachesize, PencillerMemSize},
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{stats_logfrequency, 5},
|
|
|
|
{stats_probability, 80}],
|
2016-10-13 17:51:47 +01:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2016-10-13 17:51:47 +01:00
|
|
|
io:format("Loading initial small objects~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
G1 = fun testutil:generate_smallobjects/2,
|
2016-10-13 17:51:47 +01:00
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
G1),
|
2017-10-24 13:19:30 +01:00
|
|
|
{_S, Count} =
|
|
|
|
testutil:check_bucket_stats(Bookie1, "Bucket"),
|
2016-10-13 17:51:47 +01:00
|
|
|
if
|
|
|
|
Acc + 5000 == Count ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
0,
|
|
|
|
lists:seq(1, 20)),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2016-10-13 17:51:47 +01:00
|
|
|
io:format("Loading larger compressible objects~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
G2 = fun testutil:generate_compressibleobjects/2,
|
2016-10-13 17:51:47 +01:00
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
G2),
|
2017-10-24 13:19:30 +01:00
|
|
|
{_S, Count} =
|
|
|
|
testutil:check_bucket_stats(Bookie1, "Bucket"),
|
2016-10-13 17:51:47 +01:00
|
|
|
if
|
|
|
|
Acc + 5000 == Count ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
100000,
|
|
|
|
lists:seq(1, 20)),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2016-10-13 17:51:47 +01:00
|
|
|
io:format("Replacing small objects~n"),
|
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
G1),
|
2017-10-24 13:19:30 +01:00
|
|
|
{_S, Count} =
|
|
|
|
testutil:check_bucket_stats(Bookie1, "Bucket"),
|
2016-10-13 17:51:47 +01:00
|
|
|
if
|
|
|
|
Count == 200000 ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
0,
|
|
|
|
lists:seq(1, 20)),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2016-10-13 17:51:47 +01:00
|
|
|
io:format("Loading more small objects~n"),
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
io:format("Now with unused snapshot so deletions are blocked~n"),
|
|
|
|
{ok, PclClone, null} =
|
|
|
|
leveled_bookie:book_snapshot(Bookie1, ledger, undefined, true),
|
2016-10-13 17:51:47 +01:00
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
G2),
|
2017-10-24 13:19:30 +01:00
|
|
|
{_S, Count} =
|
|
|
|
testutil:check_bucket_stats(Bookie1, "Bucket"),
|
2016-10-13 17:51:47 +01:00
|
|
|
if
|
|
|
|
Acc + 5000 == Count ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
200000,
|
|
|
|
lists:seq(1, 20)),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
Develop 3.1 d30update (#386)
* Mas i370 patch d (#383)
* Refactor penciller memory
In high-volume tests on large key-count clusters, so significant variation in the P0031 time has been seen:
TimeBucket PatchA
a.0ms_to_1ms 18554
b.1ms_to_2ms 51778
c.2ms_to_3ms 696
d.3ms_to_5ms 220
e.5ms_to_8ms 59
f.8ms_to_13ms 40
g.13ms_to_21ms 364
h.21ms_to_34ms 277
i.34ms_to_55ms 34
j.55ms_to_89ms 17
k.89ms_to_144ms 21
l.144ms_to_233ms 31
m.233ms_to_377ms 45
n.377ms_to_610ms 52
o.610ms_to_987ms 59
p.987ms_to_1597ms 55
q.1597ms_to_2684ms 54
r.2684ms_to_4281ms 29
s.4281ms_to_6965ms 7
t.6295ms_to_11246ms 1
It is unclear why this varies so much. The time to add to the cache appears to be minimal (but perhaps there is an issue with timing points in the code), whereas the time to add to the index is much more significant and variable. There is also variable time when the memory is rolled (although the actual activity here appears to be minimal.
The refactoring here is two-fold:
- tidy and simplify by keeping LoopState managed within handle_call, and add more helpful dialyzer specs;
- change the update to the index to be a simple extension of a list, rather than any conversion.
This alternative version of the pmem index in unit test is orders of magnitude faster to add - and is the same order of magnitude to check. Anticipation is that it may be more efficient in terms of memory changes.
* Compress SST index
Reduces the size of the leveled_sst index with two changes:
1 - Where there is a common prefix of tuple elements (e.g. Bucket) across the whole leveled_sst file - only the non-common part is indexed, and a function is used to compare.
2 - There is less "indexing" of the index i.e. only 1 in 16 keys are passed into the gb_trees part instead of 1 in 4
* Immediate hibernate
Reasons for delay in hibernate were not clear.
Straight after creation the process will not be in receipt of messages (must wait for the manifest to be updated), so better to hibernate now. This also means the log PC023 provides more accurate information.
* Refactor BIC
This patch avoids the following:
- repeated replacement of the same element in the BIC (via get_kvrange), by checking presence via GET before sing SET
- Stops re-reading of all elements to discover high modified date
Also there appears to have been a bug where a missing HMD for the file is required to add to the cache. However, now the cache may be erased without erasing the HMD. This means that the cache can never be rebuilt
* Use correct size in test results
erts_debug:flat_size/1 returns size in words (i.e. 8 bytes on 64-bit CPU) not bytes
* Don't change summary record
As it is persisted as part of the file write, any change to the summary record cannot be rolled back
* Clerk to prompt L0 write
Simplifies the logic if the clerk request work for the penciller prompts L0 writes as well as Manifest changes.
The advantage now is that if the penciller memory is full, and PUT load stops, the clerk should still be able to prompt persistence. the penciller can therefore make use of dead time this way
* Add push on journal compact
If there has been a backlog, followed by a quiet period - there may be a large ledger cache left unpushed. Journal compaction events are about once per hour, so the performance overhead of a false push should be minimal, with the advantage of clearing any backlog before load starts again.
This is only relevant to riak users with very off/full batch type workloads.
* Extend tests
To more consistently trigger all overload scenarios
* Fix range keys smaller than prefix
Can't make end key an empty binary in this case, as it may be bigger than any keys within the range, but will appear to be smaller.
Unit tests and ct tests added to expose the potential issue
* Tidy-up
- Remove penciller logs which are no longer called
- Get pclerk to only wait MIN_TIMEOUT after doing work, in case there is a backlog
- Remove update_levelzero_cache function as it is unique to handle_call of push_mem, and simple enough to be inline
- Alight testutil slow offer with standard slow offer used
* Tidy-up
Remove pre-otp20 references.
Reinstate the check that the starting pid is still active, this was added to tidy up shutdown.
Resolve failure to run on otp20 due to `-if` sttaement
* Tidy up
Using null rather then {null, Key} is potentially clearer as it is not a concern what they Key is in this case, and removes a comparison step from the leveled_codec:endkey_passed/2 function.
There were issues with coverage in eunit tests as the leveled_pclerk shut down. This prompted a general tidy of leveled_pclerk (remove passing of LoopState into internal functions, and add dialyzer specs.
* Remove R16 relic
* Further testing another issue
The StartKey must always be less than or equal to the prefix when the first N characters are stripped, but this is not true of the EndKey (for the query) which does not have to be between the FirstKey and the LastKey.
If the EndKey query does not match it must be greater than the Prefix (as otherwise it would not have been greater than the FirstKey - so set to null.
* Fix unit test
Unit test had a typo - and result interpretation had a misunderstanding.
* Code and spec tidy
Also look to the cover the situation when the FirstKey is the same as the Prefix with tests.
This is, in theory, not an issue as it is the EndKey for each sublist which is indexed in leveled_tree. However, guard against it mapping to null here, just in case there are dangers lurking (note that tests will still pass without `M > N` guard in place.
* Hibernate on BIC complete
There are three situations when the BIC becomes complete:
- In a file created as part of a merge the BIS is learned in the merge
- After startup, files below L1 learn the block cache through reads that happen to read the block, eventually the while cache will be read, unless...
- Either before/after the cache is complete, it can get whiped by a timeout after a get_sqn request (e.g. as prompted by a journal compaction) ... it will then be re-filled of the back of get/get-range requests.
In all these situations we want to hibernate after the BIC is fill - to reflect the fact that the LoopState should now be relatively stable, so it is a good point to GC and rationalise location of data.
Previously on the the first base was covered. Now all three are covered through the bic_complete message.
* Test all index keys have same term
This works functionally, but is not optimised (the term is replicated in the index)
* Summaries with same index term
If the summary index all have the same index term - only the object keys need to be indexes
* Simplify case statements
We either match the pattern of <<Prefix:N, Suffix>> or the answer should be null
* OK for M == N
If M = N for the first key, it will have a suffix of <<>>. This will match (as expected) a query Start Key of the sam size, and be smaller than any query Start Key that has the same prefix.
If the query Start Key does not match the prefix - it will be null - as it must be smaller than the Prefix (as other wise the query Start Key would be bigger than the Last Key).
The constraint of M > N was introduced before the *_prefix_filter functions were checking the prefix, to avoid issues. Now the prefix is being checked, then M == N is ok.
* Simplify
Correct the test to use a binary field in the range.
To avoid further issue, only apply filter when everything is a binary() type.
* Add test for head_only mode
When leveled is used as a tictacaae key store (in parallel mode), the keys will be head_only entries. Double check they are handled as expected like object keys
* Revert previous change - must support typed buckets
Add assertion to confirm worthwhile optimisation
* Add support for configurable cache multiple (#375)
* Mas i370 patch e (#385)
Improvement to monitoring for efficiency and improved readability of logs and stats.
As part of this, where possible, tried to avoid updating loop state on READ messages in leveled processes (as was the case when tracking stats within each process).
No performance benefits found with change, but improved stats has helped discover other potential gains.
2022-12-18 20:18:03 +00:00
|
|
|
ok = leveled_penciller:pcl_close(PclClone),
|
|
|
|
{_S, 300000} = testutil:check_bucket_stats(Bookie1, "Bucket"),
|
2016-10-13 17:51:47 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{_, 300000} = testutil:check_bucket_stats(Bookie2, "Bucket"),
|
2023-10-03 18:30:40 +01:00
|
|
|
|
2016-10-13 17:51:47 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
2023-10-03 18:30:40 +01:00
|
|
|
|
2019-05-23 10:16:15 +01:00
|
|
|
ManifestFP =
|
|
|
|
leveled_pmanifest:filepath(filename:join(RootPath, ?LEDGER_FP),
|
|
|
|
manifest),
|
|
|
|
IsManifest = fun(FN) -> filename:extension(FN) == ".man" end,
|
|
|
|
{ok, RawManList} = file:list_dir(ManifestFP),
|
|
|
|
ManList = lists:filter(IsManifest, RawManList),
|
|
|
|
io:format("Length of manifest file list ~w~n", [length(ManList)]),
|
|
|
|
true = length(ManList) =< 5,
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:reset_filestructure().
|
2016-10-13 17:51:47 +01:00
|
|
|
|
2016-10-16 15:41:09 +01:00
|
|
|
load_and_count_withdelete(_Config) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-25 17:41:08 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 50000000},
|
|
|
|
{sync_strategy, testutil:sync_strategy()}],
|
2016-10-16 15:41:09 +01:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2016-10-16 15:41:09 +01:00
|
|
|
io:format("Loading initial small objects~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
G1 = fun testutil:generate_smallobjects/2,
|
2016-10-16 15:41:09 +01:00
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
G1),
|
|
|
|
{_S, Count} = testutil:check_bucket_stats(Bookie1,
|
|
|
|
"Bucket"),
|
2016-10-16 15:41:09 +01:00
|
|
|
if
|
|
|
|
Acc + 5000 == Count ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
0,
|
|
|
|
lists:seq(1, 20)),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2018-12-11 20:42:00 +00:00
|
|
|
{BucketD, KeyD} =
|
|
|
|
{testutil:get_bucket(TestObject), testutil:get_key(TestObject)},
|
2016-10-18 01:59:03 +01:00
|
|
|
{_, 1} = testutil:check_bucket_stats(Bookie1, BucketD),
|
2016-11-07 10:11:57 +00:00
|
|
|
ok = testutil:book_riakdelete(Bookie1, BucketD, KeyD, []),
|
|
|
|
not_found = testutil:book_riakget(Bookie1, BucketD, KeyD),
|
2016-10-18 01:59:03 +01:00
|
|
|
{_, 0} = testutil:check_bucket_stats(Bookie1, BucketD),
|
2016-10-16 15:41:09 +01:00
|
|
|
io:format("Loading larger compressible objects~n"),
|
2016-10-18 01:59:03 +01:00
|
|
|
G2 = fun testutil:generate_compressibleobjects/2,
|
2016-10-16 15:41:09 +01:00
|
|
|
lists:foldl(fun(_X, Acc) ->
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:load_objects(5000,
|
|
|
|
[Acc + 2],
|
|
|
|
Bookie1,
|
|
|
|
no_check,
|
|
|
|
G2),
|
|
|
|
{_S, Count} = testutil:check_bucket_stats(Bookie1,
|
|
|
|
"Bucket"),
|
2016-10-16 15:41:09 +01:00
|
|
|
if
|
|
|
|
Acc + 5000 == Count ->
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
Acc + 5000 end,
|
|
|
|
100000,
|
|
|
|
lists:seq(1, 20)),
|
2016-11-07 10:11:57 +00:00
|
|
|
not_found = testutil:book_riakget(Bookie1, BucketD, KeyD),
|
2016-10-16 15:41:09 +01:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
2023-10-03 18:30:40 +01:00
|
|
|
|
2016-10-16 15:41:09 +01:00
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:check_formissingobject(Bookie2, BucketD, KeyD),
|
2017-02-26 20:52:40 +00:00
|
|
|
testutil:check_formissingobject(Bookie2, "Bookie1", "MissingKey0123"),
|
2016-10-18 01:59:03 +01:00
|
|
|
{_BSize, 0} = testutil:check_bucket_stats(Bookie2, BucketD),
|
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
2023-10-03 18:30:40 +01:00
|
|
|
|
2016-10-18 01:59:03 +01:00
|
|
|
testutil:reset_filestructure().
|
2016-10-03 23:34:28 +01:00
|
|
|
|
2016-10-23 22:45:43 +01:00
|
|
|
|
2016-10-27 00:57:19 +01:00
|
|
|
space_clear_ondelete(_Config) ->
|
2016-10-23 22:45:43 +01:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
2016-11-25 17:41:08 +00:00
|
|
|
StartOpts1 = [{root_path, RootPath},
|
2017-06-02 08:37:57 +01:00
|
|
|
{max_journalsize, 10000000},
|
2016-11-25 17:41:08 +00:00
|
|
|
{sync_strategy, testutil:sync_strategy()}],
|
2016-10-23 22:45:43 +01:00
|
|
|
{ok, Book1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
G2 = fun testutil:generate_compressibleobjects/2,
|
|
|
|
testutil:load_objects(20000,
|
|
|
|
[uuid, uuid, uuid, uuid],
|
|
|
|
Book1,
|
|
|
|
no_check,
|
|
|
|
G2),
|
|
|
|
|
2018-05-01 22:30:28 +01:00
|
|
|
FoldKeysFun = fun(B, K, Acc) -> [{B, K}|Acc] end,
|
2018-08-23 10:27:43 +01:00
|
|
|
|
|
|
|
{async, F1} = leveled_bookie:book_keylist(Book1, o_rkv, {FoldKeysFun, []}),
|
|
|
|
|
2016-10-23 22:45:43 +01:00
|
|
|
SW1 = os:timestamp(),
|
|
|
|
KL1 = F1(),
|
|
|
|
ok = case length(KL1) of
|
|
|
|
80000 ->
|
|
|
|
io:format("Key list took ~w microseconds for 80K keys~n",
|
|
|
|
[timer:now_diff(os:timestamp(), SW1)]),
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
timer:sleep(10000), % Allow for any L0 file to be rolled
|
|
|
|
{ok, FNsA_L} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
|
|
|
{ok, FNsA_J} = file:list_dir(RootPath ++ "/journal/journal_files"),
|
2017-04-17 23:01:55 +01:00
|
|
|
io:format("FNsA - Bookie created ~w journal files and ~w ledger files~n",
|
2016-10-23 22:45:43 +01:00
|
|
|
[length(FNsA_J), length(FNsA_L)]),
|
2018-08-23 10:27:43 +01:00
|
|
|
|
2016-11-04 15:56:57 +00:00
|
|
|
% Get an iterator to lock the inker during compaction
|
2016-11-28 22:26:09 +00:00
|
|
|
FoldObjectsFun = fun(B, K, ObjBin, Acc) ->
|
|
|
|
[{B, K, erlang:phash2(ObjBin)}|Acc] end,
|
2018-08-23 10:27:43 +01:00
|
|
|
|
|
|
|
{async, HTreeF1} = leveled_bookie:book_objectfold(Book1,
|
|
|
|
?RIAK_TAG,
|
|
|
|
{FoldObjectsFun, []},
|
|
|
|
false),
|
|
|
|
|
2018-05-01 22:30:28 +01:00
|
|
|
% This query does not Snap PreFold - and so will not prevent
|
|
|
|
% pending deletes from prompting actual deletes
|
|
|
|
|
2018-08-23 10:27:43 +01:00
|
|
|
{async, KF1} = leveled_bookie:book_keylist(Book1, o_rkv, {FoldKeysFun, []}),
|
2018-05-01 22:30:28 +01:00
|
|
|
% This query does Snap PreFold, and so will prevent deletes from
|
|
|
|
% the ledger
|
2017-04-17 23:01:55 +01:00
|
|
|
|
2016-11-04 15:56:57 +00:00
|
|
|
% Delete the keys
|
2016-10-23 22:45:43 +01:00
|
|
|
SW2 = os:timestamp(),
|
|
|
|
lists:foreach(fun({Bucket, Key}) ->
|
2016-11-07 10:27:38 +00:00
|
|
|
testutil:book_riakdelete(Book1,
|
2016-11-07 10:11:57 +00:00
|
|
|
Bucket,
|
|
|
|
Key,
|
|
|
|
[])
|
2016-10-23 22:45:43 +01:00
|
|
|
end,
|
|
|
|
KL1),
|
|
|
|
io:format("Deletion took ~w microseconds for 80K keys~n",
|
|
|
|
[timer:now_diff(os:timestamp(), SW2)]),
|
2016-11-04 15:56:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2016-10-23 22:45:43 +01:00
|
|
|
ok = leveled_bookie:book_compactjournal(Book1, 30000),
|
2016-10-26 20:39:16 +01:00
|
|
|
F = fun leveled_bookie:book_islastcompactionpending/1,
|
|
|
|
lists:foldl(fun(X, Pending) ->
|
|
|
|
case Pending of
|
|
|
|
false ->
|
|
|
|
false;
|
|
|
|
true ->
|
|
|
|
io:format("Loop ~w waiting for journal "
|
|
|
|
++ "compaction to complete~n", [X]),
|
|
|
|
timer:sleep(20000),
|
|
|
|
F(Book1)
|
|
|
|
end end,
|
|
|
|
true,
|
|
|
|
lists:seq(1, 15)),
|
2016-11-04 15:56:57 +00:00
|
|
|
io:format("Waiting for journal deletes - blocked~n"),
|
|
|
|
timer:sleep(20000),
|
2017-04-17 23:01:55 +01:00
|
|
|
|
2018-05-01 22:30:28 +01:00
|
|
|
io:format("Sleep over - Fold Objects query ~n"),
|
|
|
|
% for this query snapshot is made at fold time, and so the results are
|
|
|
|
% empty
|
2017-10-17 20:39:11 +01:00
|
|
|
true = length(HTreeF1()) == 0,
|
2018-05-01 22:30:28 +01:00
|
|
|
|
2017-04-17 23:01:55 +01:00
|
|
|
% This query uses a genuine async fold on a snasphot made at request time
|
2018-05-01 22:30:28 +01:00
|
|
|
% and so the results should be non-empty
|
|
|
|
io:format("Now Query 2 - Fold Keys query~n"),
|
2017-04-17 23:01:55 +01:00
|
|
|
true = length(KF1()) == 80000,
|
|
|
|
|
2016-11-04 15:56:57 +00:00
|
|
|
io:format("Waiting for journal deletes - unblocked~n"),
|
|
|
|
timer:sleep(20000),
|
2016-10-23 22:45:43 +01:00
|
|
|
{ok, FNsB_L} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
|
|
|
{ok, FNsB_J} = file:list_dir(RootPath ++ "/journal/journal_files"),
|
2016-11-04 15:56:57 +00:00
|
|
|
{ok, FNsB_PC} = file:list_dir(RootPath
|
|
|
|
++ "/journal/journal_files/post_compact"),
|
2016-10-26 20:39:16 +01:00
|
|
|
PointB_Journals = length(FNsB_J) + length(FNsB_PC),
|
2017-04-17 23:01:55 +01:00
|
|
|
io:format("FNsB - Bookie has ~w journal files and ~w ledger files " ++
|
2016-10-23 22:45:43 +01:00
|
|
|
"after deletes~n",
|
2016-10-26 20:39:16 +01:00
|
|
|
[PointB_Journals, length(FNsB_L)]),
|
2016-10-23 22:45:43 +01:00
|
|
|
|
2018-08-23 10:27:43 +01:00
|
|
|
{async, F2} = leveled_bookie:book_keylist(Book1, o_rkv, {FoldKeysFun, []}),
|
2016-10-23 22:45:43 +01:00
|
|
|
SW3 = os:timestamp(),
|
|
|
|
KL2 = F2(),
|
|
|
|
ok = case length(KL2) of
|
|
|
|
0 ->
|
|
|
|
io:format("Key list took ~w microseconds for no keys~n",
|
|
|
|
[timer:now_diff(os:timestamp(), SW3)]),
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
ok = leveled_bookie:book_close(Book1),
|
|
|
|
|
|
|
|
{ok, Book2} = leveled_bookie:book_start(StartOpts1),
|
2018-08-23 10:27:43 +01:00
|
|
|
{async, F3} = leveled_bookie:book_keylist(Book2, o_rkv, {FoldKeysFun, []}),
|
2016-10-23 22:45:43 +01:00
|
|
|
SW4 = os:timestamp(),
|
|
|
|
KL3 = F3(),
|
|
|
|
ok = case length(KL3) of
|
|
|
|
0 ->
|
|
|
|
io:format("Key list took ~w microseconds for no keys~n",
|
|
|
|
[timer:now_diff(os:timestamp(), SW4)]),
|
|
|
|
ok
|
|
|
|
end,
|
|
|
|
ok = leveled_bookie:book_close(Book2),
|
|
|
|
{ok, FNsC_L} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
2017-04-17 23:01:55 +01:00
|
|
|
io:format("FNsC - Bookie has ~w ledger files " ++
|
2016-10-26 20:39:16 +01:00
|
|
|
"after close~n", [length(FNsC_L)]),
|
|
|
|
|
|
|
|
{ok, Book3} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
io:format("This should cause a final ledger merge event~n"),
|
|
|
|
io:format("Will require the penciller to resolve the issue of creating" ++
|
|
|
|
" an empty file as all keys compact on merge~n"),
|
2017-09-27 23:52:49 +01:00
|
|
|
|
|
|
|
CheckFun =
|
|
|
|
fun(X, FileCount) ->
|
|
|
|
case FileCount of
|
|
|
|
0 ->
|
|
|
|
0;
|
|
|
|
_ ->
|
|
|
|
timer:sleep(X),
|
|
|
|
{ok, NewFC} =
|
|
|
|
file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
|
|
|
io:format("Looping with ledger file count ~w~n",
|
|
|
|
[length(NewFC)]),
|
|
|
|
length(strip_nonsst(NewFC))
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
|
|
|
|
FC = lists:foldl(CheckFun, infinity, [2000, 3000, 5000, 8000]),
|
2016-10-26 20:39:16 +01:00
|
|
|
ok = leveled_bookie:book_close(Book3),
|
2017-09-27 23:52:49 +01:00
|
|
|
case FC of
|
|
|
|
0 ->
|
|
|
|
ok;
|
|
|
|
_ ->
|
|
|
|
{ok, Book4} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
lists:foldl(CheckFun, infinity, [2000, 3000, 5000, 8000]),
|
|
|
|
leveled_bookie:book_close(Book4)
|
|
|
|
end,
|
|
|
|
|
2016-10-26 20:39:16 +01:00
|
|
|
{ok, FNsD_L} = file:list_dir(RootPath ++ "/ledger/ledger_files"),
|
2017-04-17 23:01:55 +01:00
|
|
|
io:format("FNsD - Bookie has ~w ledger files " ++
|
2017-09-27 23:52:49 +01:00
|
|
|
"after second close~n", [length(strip_nonsst(FNsD_L))]),
|
2017-04-17 23:01:55 +01:00
|
|
|
lists:foreach(fun(FN) ->
|
2017-06-02 08:37:57 +01:00
|
|
|
io:format("FNsD - Ledger file is ~s~n", [FN])
|
2017-04-17 23:01:55 +01:00
|
|
|
end,
|
|
|
|
FNsD_L),
|
2016-10-26 20:39:16 +01:00
|
|
|
true = PointB_Journals < length(FNsA_J),
|
2017-09-27 23:52:49 +01:00
|
|
|
true = length(strip_nonsst(FNsD_L)) < length(strip_nonsst(FNsA_L)),
|
|
|
|
true = length(strip_nonsst(FNsD_L)) < length(strip_nonsst(FNsB_L)),
|
2023-11-10 15:04:47 +00:00
|
|
|
true = length(strip_nonsst(FNsD_L)) =< length(strip_nonsst(FNsC_L)),
|
2017-09-27 23:52:49 +01:00
|
|
|
true = length(strip_nonsst(FNsD_L)) == 0.
|
|
|
|
|
2017-05-23 11:59:44 +01:00
|
|
|
|
2017-09-27 23:52:49 +01:00
|
|
|
strip_nonsst(FileList) ->
|
|
|
|
SSTOnlyFun =
|
|
|
|
fun(FN, Acc) ->
|
|
|
|
case filename:extension(FN) of
|
|
|
|
".sst" ->
|
|
|
|
[FN|Acc];
|
|
|
|
_ ->
|
|
|
|
Acc
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
lists:foldl(SSTOnlyFun, [], FileList).
|
2017-05-23 11:59:44 +01:00
|
|
|
|
|
|
|
|
|
|
|
is_empty_test(_Config) ->
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{sync_strategy, testutil:sync_strategy()}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
|
|
|
|
{B1, K1, V1, Spec, MD} = {term_to_binary("Bucket1"),
|
|
|
|
term_to_binary("Key1"),
|
|
|
|
"Value1",
|
|
|
|
[],
|
|
|
|
[{"MDK1", "MDV1"}]},
|
|
|
|
{TestObject1, TestSpec1} =
|
|
|
|
testutil:generate_testobject(B1, K1, V1, Spec, MD),
|
|
|
|
{B1, K2, V2, Spec, MD} = {term_to_binary("Bucket1"),
|
|
|
|
term_to_binary("Key2"),
|
|
|
|
"Value2",
|
|
|
|
[],
|
|
|
|
[{"MDK1", "MDV1"}]},
|
|
|
|
{TestObject2, TestSpec2} =
|
|
|
|
testutil:generate_testobject(B1, K2, V2, Spec, MD),
|
|
|
|
{B2, K3, V3, Spec, MD} = {term_to_binary("Bucket2"),
|
|
|
|
term_to_binary("Key3"),
|
|
|
|
"Value3",
|
|
|
|
[],
|
|
|
|
[{"MDK1", "MDV1"}]},
|
|
|
|
{TestObject3, TestSpec3} =
|
|
|
|
testutil:generate_testobject(B2, K3, V3, Spec, MD),
|
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject1, TestSpec1),
|
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject2, TestSpec2),
|
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject3, TestSpec3),
|
|
|
|
|
|
|
|
FoldBucketsFun = fun(B, Acc) -> sets:add_element(B, Acc) end,
|
2018-09-01 10:39:23 +01:00
|
|
|
BucketListQuery = {bucket_list,
|
2017-05-23 11:59:44 +01:00
|
|
|
?RIAK_TAG,
|
|
|
|
{FoldBucketsFun, sets:new()}},
|
|
|
|
{async, BL} = leveled_bookie:book_returnfolder(Bookie1, BucketListQuery),
|
|
|
|
true = sets:size(BL()) == 2,
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_put(Bookie1, B2, K3, delete, [], ?RIAK_TAG),
|
|
|
|
{async, BLpd1} = leveled_bookie:book_returnfolder(Bookie1, BucketListQuery),
|
|
|
|
true = sets:size(BLpd1()) == 1,
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_put(Bookie1, B1, K2, delete, [], ?RIAK_TAG),
|
|
|
|
{async, BLpd2} = leveled_bookie:book_returnfolder(Bookie1, BucketListQuery),
|
|
|
|
true = sets:size(BLpd2()) == 1,
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_put(Bookie1, B1, K1, delete, [], ?RIAK_TAG),
|
|
|
|
{async, BLpd3} = leveled_bookie:book_returnfolder(Bookie1, BucketListQuery),
|
|
|
|
true = sets:size(BLpd3()) == 0,
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_close(Bookie1).
|
2017-11-06 15:54:58 +00:00
|
|
|
|
|
|
|
|
2018-10-03 18:29:20 +01:00
|
|
|
remove_journal_test(_Config) ->
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_pencillercachesize, 16000},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
|
|
|
{compression_point, on_compact}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
GenList = [1, 20001, 40001, 60001],
|
|
|
|
CLs = testutil:load_objects(20000, GenList, Bookie1, no_check,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
|
|
|
CheckList1 = lists:sublist(lists:nth(1, CLs), 100, 1000),
|
|
|
|
CheckList2 = lists:sublist(lists:nth(2, CLs), 100, 1000),
|
|
|
|
CheckList3 = lists:sublist(lists:nth(3, CLs), 100, 1000),
|
|
|
|
CheckList4 = lists:sublist(lists:nth(4, CLs), 100, 1000),
|
|
|
|
testutil:check_forlist(Bookie1, CheckList1),
|
|
|
|
testutil:check_forlist(Bookie1, CheckList2),
|
|
|
|
testutil:check_forlist(Bookie1, CheckList3),
|
|
|
|
testutil:check_forlist(Bookie1, CheckList4),
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
|
|
|
leveled_inker:clean_testdir(RootPath ++ "/journal"),
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
|
|
|
|
% If we're not careful here new data will be added, and we
|
|
|
|
% won't be able to read it
|
|
|
|
[NewCheckList] =
|
|
|
|
testutil:load_objects(1000, [80001], Bookie2, no_check,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
|
|
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
testutil:check_forlist(Bookie3, NewCheckList),
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie3).
|
|
|
|
|
|
|
|
|
2017-11-06 15:54:58 +00:00
|
|
|
many_put_fetch_switchcompression(_Config) ->
|
2024-01-23 16:25:03 +00:00
|
|
|
{T0, ok} =
|
|
|
|
timer:tc(fun many_put_fetch_switchcompression_tester/1, [native]),
|
|
|
|
{T1, ok} =
|
|
|
|
timer:tc(fun many_put_fetch_switchcompression_tester/1, [lz4]),
|
|
|
|
{T2, ok} =
|
|
|
|
timer:tc(fun many_put_fetch_switchcompression_tester/1, [zstd]),
|
|
|
|
io:format("Test timings native=~w lz4=~w, zstd=~w", [T0, T1, T2]).
|
|
|
|
|
|
|
|
many_put_fetch_switchcompression_tester(CompressionMethod) ->
|
2017-11-06 15:54:58 +00:00
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_pencillercachesize, 16000},
|
2023-11-07 14:58:43 +00:00
|
|
|
{max_journalobjectcount, 30000},
|
|
|
|
{compression_level, 3},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
2024-01-23 16:25:03 +00:00
|
|
|
{compression_method, native},
|
|
|
|
{ledger_compression, none}],
|
2023-11-07 14:58:43 +00:00
|
|
|
StartOpts2 = [{root_path, RootPath},
|
|
|
|
{max_pencillercachesize, 24000},
|
|
|
|
{max_journalobjectcount, 30000},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
2024-01-23 16:25:03 +00:00
|
|
|
{compression_method, CompressionMethod},
|
|
|
|
{ledger_compression, as_store}],
|
2023-11-07 14:58:43 +00:00
|
|
|
StartOpts3 = [{root_path, RootPath},
|
|
|
|
{max_pencillercachesize, 16000},
|
|
|
|
{max_journalobjectcount, 30000},
|
|
|
|
{sync_strategy, testutil:sync_strategy()},
|
2024-01-23 16:25:03 +00:00
|
|
|
{compression_method, none},
|
|
|
|
{ledger_compression, as_store}],
|
2023-11-07 14:58:43 +00:00
|
|
|
|
|
|
|
|
2017-11-06 15:54:58 +00:00
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
{TestObject, TestSpec} = testutil:generate_testobject(),
|
|
|
|
ok = testutil:book_riakput(Bookie1, TestObject, TestSpec),
|
|
|
|
testutil:check_forobject(Bookie1, TestObject),
|
2023-11-07 14:58:43 +00:00
|
|
|
CL1s =
|
|
|
|
testutil:load_objects(
|
|
|
|
40000,
|
|
|
|
[2, 40002],
|
|
|
|
Bookie1,
|
|
|
|
TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
|
|
|
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie1, CL) end, CL1s),
|
2017-11-06 15:54:58 +00:00
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
|
|
|
|
2023-11-07 14:58:43 +00:00
|
|
|
%% Change compression method -> lz4
|
2017-11-06 15:54:58 +00:00
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
testutil:check_forobject(Bookie2, TestObject),
|
2023-11-07 14:58:43 +00:00
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie2, CL) end, CL1s),
|
|
|
|
|
|
|
|
CL2s =
|
|
|
|
testutil:load_objects(
|
|
|
|
40000,
|
|
|
|
[80002, 120002],
|
|
|
|
Bookie2,
|
|
|
|
TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie2, CL) end, CL2s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie2, CL) end, CL1s),
|
2017-11-06 15:54:58 +00:00
|
|
|
ok = leveled_bookie:book_close(Bookie2),
|
|
|
|
|
|
|
|
%% Change method back again
|
|
|
|
{ok, Bookie3} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
testutil:check_formissingobject(Bookie3, "Bookie1", "MissingKey0123"),
|
2023-11-07 14:58:43 +00:00
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie3, CL) end, CL2s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie3, CL) end, CL1s),
|
|
|
|
|
|
|
|
CL3s =
|
|
|
|
testutil:load_objects(
|
|
|
|
40000,
|
|
|
|
[160002, 200002],
|
|
|
|
Bookie3,
|
|
|
|
TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2,
|
|
|
|
30000
|
|
|
|
),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie3, CL) end, CL3s),
|
|
|
|
ok = leveled_bookie:book_close(Bookie3),
|
|
|
|
|
|
|
|
% Change method to no compression
|
|
|
|
{ok, Bookie4} = leveled_bookie:book_start(StartOpts3),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL2s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL1s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL3s),
|
|
|
|
|
|
|
|
CL4s =
|
|
|
|
testutil:load_objects(
|
|
|
|
40000,
|
|
|
|
[240002, 280002],
|
|
|
|
Bookie4,
|
|
|
|
TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2
|
|
|
|
),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL3s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL4s),
|
|
|
|
testutil:delete_some_objects(Bookie4, lists:flatten(CL3s), 60000),
|
|
|
|
CL5s =
|
|
|
|
testutil:load_objects(
|
|
|
|
40000,
|
|
|
|
[320002, 360002],
|
|
|
|
Bookie4,
|
|
|
|
TestObject,
|
|
|
|
fun testutil:generate_smallobjects/2
|
|
|
|
),
|
|
|
|
ok = leveled_bookie:book_compactjournal(Bookie4, 30000),
|
|
|
|
testutil:wait_for_compaction(Bookie4),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL4s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie4, CL) end, CL5s),
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_close(Bookie4),
|
|
|
|
|
|
|
|
%% Change compression method -> lz4
|
|
|
|
{ok, Bookie5} = leveled_bookie:book_start(StartOpts2),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie5, CL) end, CL1s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie5, CL) end, CL4s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie5, CL) end, CL5s),
|
|
|
|
ok = leveled_bookie:book_close(Bookie5),
|
|
|
|
|
|
|
|
%% Change compression method -> native
|
|
|
|
{ok, Bookie6} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie6, CL) end, CL1s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie6, CL) end, CL4s),
|
|
|
|
lists:foreach(
|
|
|
|
fun(CL) -> ok = testutil:check_forlist(Bookie6, CL) end, CL5s),
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie6).
|
2018-09-27 11:32:42 +01:00
|
|
|
|
|
|
|
safereaderror_startup(_Config) ->
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{compression_point, on_compact},
|
|
|
|
{max_journalsize, 1000}, {cache_size, 2060}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_plainstart(StartOpts1),
|
|
|
|
B1 = <<98, 117, 99, 107, 101, 116, 51>>,
|
|
|
|
K1 =
|
|
|
|
<<38, 50, 201, 47, 167, 125, 57, 232, 84, 38, 14, 114, 24, 62,
|
|
|
|
12, 74>>,
|
|
|
|
Obj1 =
|
|
|
|
<<87, 150, 217, 230, 4, 81, 170, 68, 181, 224, 60, 232, 4, 74,
|
|
|
|
159, 12, 156, 56, 194, 181, 18, 158, 195, 207, 106, 191, 80,
|
|
|
|
111, 100, 81, 252, 248>>,
|
|
|
|
Obj2 =
|
|
|
|
<<86, 201, 253, 149, 213, 10, 32, 166, 33, 136, 42, 79, 103, 250,
|
|
|
|
139, 95, 42, 143, 161, 3, 185, 74, 149, 226, 232, 214, 183, 64,
|
|
|
|
69, 56, 167, 78>>,
|
|
|
|
ok = leveled_bookie:book_put(Bookie1, B1, K1, Obj1, []),
|
|
|
|
ok = leveled_bookie:book_put(Bookie1, B1, K1, Obj2, []),
|
|
|
|
exit(Bookie1, kill),
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
{ok, ReadBack} = leveled_bookie:book_get(Bookie2, B1, K1),
|
|
|
|
io:format("Read back ~w", [ReadBack]),
|
|
|
|
true = ReadBack == Obj2,
|
2023-12-19 11:56:03 +00:00
|
|
|
ok = leveled_bookie:book_close(Bookie2).
|
|
|
|
|
|
|
|
bigpcl_bucketlist(_Config) ->
|
|
|
|
%% https://github.com/martinsumner/leveled/issues/326
|
|
|
|
%% In OTP 22+ there appear to be issues with anonymous functions which
|
|
|
|
%% have a reference to loop state, requiring a copy of all the loop state
|
|
|
|
%% to be made when returning the function.
|
|
|
|
%% This test creates alarge loop state on the leveled_penciller to prove
|
|
|
|
%% this.
|
|
|
|
%% The problem can be resolved simply by renaming the element of the loop
|
|
|
|
%% state using within the anonymous function.
|
|
|
|
RootPath = testutil:reset_filestructure(),
|
|
|
|
BucketCount = 500,
|
|
|
|
ObjectCount = 100,
|
|
|
|
StartOpts1 = [{root_path, RootPath},
|
|
|
|
{max_journalsize, 50000000},
|
|
|
|
{cache_size, 4000},
|
|
|
|
{max_pencillercachesize, 128000},
|
|
|
|
{max_sstslots, 256},
|
|
|
|
{sync_strategy, testutil:sync_strategy()}],
|
|
|
|
{ok, Bookie1} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
BucketList =
|
|
|
|
lists:map(fun(I) -> list_to_binary(integer_to_list(I)) end,
|
|
|
|
lists:seq(1, BucketCount)),
|
|
|
|
|
|
|
|
MapFun =
|
|
|
|
fun(B) ->
|
|
|
|
testutil:generate_objects(ObjectCount, 1, [],
|
|
|
|
leveled_rand:rand_bytes(100),
|
|
|
|
fun() -> [] end,
|
|
|
|
B)
|
|
|
|
end,
|
|
|
|
ObjLofL = lists:map(MapFun, BucketList),
|
|
|
|
lists:foreach(fun(ObjL) -> testutil:riakload(Bookie1, ObjL) end, ObjLofL),
|
|
|
|
BucketFold =
|
|
|
|
fun(B, _K, _V, Acc) ->
|
|
|
|
case sets:is_element(B, Acc) of
|
|
|
|
true ->
|
|
|
|
Acc;
|
|
|
|
false ->
|
|
|
|
sets:add_element(B, Acc)
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
FBAccT = {BucketFold, sets:new()},
|
|
|
|
|
|
|
|
{async, BucketFolder1} =
|
|
|
|
leveled_bookie:book_headfold(Bookie1,
|
|
|
|
?RIAK_TAG,
|
|
|
|
{bucket_list, BucketList},
|
|
|
|
FBAccT,
|
|
|
|
false, false, false),
|
|
|
|
|
|
|
|
{FoldTime1, BucketList1} = timer:tc(BucketFolder1, []),
|
|
|
|
true = BucketCount == sets:size(BucketList1),
|
|
|
|
ok = leveled_bookie:book_close(Bookie1),
|
|
|
|
|
|
|
|
{ok, Bookie2} = leveled_bookie:book_start(StartOpts1),
|
|
|
|
|
|
|
|
{async, BucketFolder2} =
|
|
|
|
leveled_bookie:book_headfold(Bookie2,
|
|
|
|
?RIAK_TAG,
|
|
|
|
{bucket_list, BucketList},
|
|
|
|
FBAccT,
|
|
|
|
false, false, false),
|
|
|
|
{FoldTime2, BucketList2} = timer:tc(BucketFolder2, []),
|
|
|
|
true = BucketCount == sets:size(BucketList2),
|
|
|
|
|
|
|
|
io:format("Fold pre-close ~w ms post-close ~w ms~n",
|
|
|
|
[FoldTime1 div 1000, FoldTime2 div 1000]),
|
|
|
|
|
|
|
|
true = FoldTime1 < 10 * FoldTime2,
|
|
|
|
%% The fold in-memory should be the same order of magnitude of response
|
|
|
|
%% time as the fold post-persistence
|
|
|
|
|
|
|
|
ok = leveled_bookie:book_destroy(Bookie2).
|