Mas i335 otp24 (#336)
* Address OTP24 warnings, ct and eunit paths * Reorg to add OTP 24 support * Update VOLUME.md * Correct broken refs * Update README.md * CI on all main branches Co-authored-by: Ulf Wiger <ulf@wiger.net>
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Parallel Node Testing
|
||||
|
||||
Initial volume tests have been [based on standard basho_bench eleveldb test](../test/volume/single_node/examples) to run multiple stores in parallel on the same node and and subjecting them to concurrent pressure.
|
||||
Initial volume tests have been [based on standard basho_bench eleveldb test](volume/single_node/examples) to run multiple stores in parallel on the same node and and subjecting them to concurrent pressure.
|
||||
|
||||
This showed a [relative positive performance for leveled](VOLUME_PRERIAK.md) for both population and load. This also showed that although the leveled throughput was relatively stable, it was still subject to fluctuations related to CPU constraints - especially as compaction of the ledger was a CPU intensive activity. Prior to moving on to full Riak testing, a number of changes where then made to leveled to reduce the CPU load during these merge events.
|
||||
|
||||
|
@ -38,7 +38,7 @@ Comparison charts for this test:
|
|||
|
||||
Riak + leveled | Riak + eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
### Mid-Size Object, SSDs, No Sync-On-Write
|
||||
|
||||
|
@ -54,7 +54,7 @@ Comparison charts for this test:
|
|||
|
||||
Riak + leveled | Riak + eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
### Mid-Size Object, HDDs, No Sync-On-Write
|
||||
|
||||
|
@ -70,7 +70,7 @@ Comparison charts for this test:
|
|||
|
||||
Riak + leveled | Riak + eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
Note that there is a clear inflexion point when throughput starts to drop sharply at about the hour mark into the test.
|
||||
This is the stage when the volume of data has begun to exceed the volume supportable in cache, and so disk activity begins to be required for GET operations with increasing frequency.
|
||||
|
@ -89,7 +89,7 @@ Comparison charts for this test:
|
|||
|
||||
Riak + leveled | Riak + eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
|
||||
### Double-Size Object, SSDs, No Sync-On-Write
|
||||
|
@ -106,14 +106,14 @@ Comparison charts for this test:
|
|||
|
||||
Riak + leveled | Riak + eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
|
||||
### Lies, damned lies etc
|
||||
|
||||
The first thing to note about the test is the impact of the pareto distribution and the start from an empty store, on what is actually being tested. At the start of the test there is a 0% chance of a GET request actually finding an object. Normally, it will be 3 hours into the test before a GET request will have a 50% chance of finding an object.
|
||||
|
||||

|
||||

|
||||
|
||||
Both leveled and leveldb are optimised for finding non-presence through the use of bloom filters, so the comparison is not unduly influenced by this. However, the workload at the end of the test is both more realistic (in that objects are found), and harder if the previous throughput had been greater (in that more objects are found).
|
||||
|
||||
|
@ -152,7 +152,7 @@ These tests have been completed using the following static characteristics which
|
|||
- 5 x i2.2x nodes,
|
||||
- 6 hour duration.
|
||||
|
||||
This is [a test used in Phase 1](https://github.com/martinsumner/leveled/blob/master/docs/VOLUME.md#mid-size-object-ssds-no-sync-on-write). Note that since Phase 1 was completed a number of performance improvements have been made in leveled, so that the starting gap between Riak/leveled and Riak/leveldb has widened.
|
||||
This is [a test used in Phase 1](VOLUME.md#mid-size-object-ssds-no-sync-on-write). Note that since Phase 1 was completed a number of performance improvements have been made in leveled, so that the starting gap between Riak/leveled and Riak/leveldb has widened.
|
||||
|
||||
The tests have been run using the new riak_kv_sweeper facility within develop. This feature is an alternative approach to controlling and scheduling rebuilds, allowing for other work to be scheduled into the same fold. As the test is focused on hashtree rebuilds, the test was run with:
|
||||
|
||||
|
@ -173,7 +173,7 @@ The comparison between leveled and leveldb shows a marked difference in throughp
|
|||
|
||||
Riak + leveled | Riak + leveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
The differences between the two tests are:
|
||||
|
||||
|
@ -231,7 +231,7 @@ As before, the Riak + leveled test had substantially lower tail latency, and ach
|
|||
|
||||
Riak + leveled | Riak + leveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
The throughput difference by hour of the test was:
|
||||
|
||||
|
@ -271,11 +271,9 @@ The secondary index test was built on a test which sent
|
|||
The query load is relatively light compared to GET/PUT load in-line with Basho recommendations (decline from 350 queries per second to 120 queries per second through the test). The queries
|
||||
return o(1000) results maximum towards the tail of the test and o(1) results at the start of the test.
|
||||
|
||||
Further details on the implementation of the secondary indexes for volume tests can be found in the [driver file](https://github.com/martinsumner/basho_bench/blob/mas-nhsload/src/basho_bench_driver_riakc_pb.erl) for the test.
|
||||
|
||||
Riak + leveled | Riak + leveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
The results are similar as to previous tests. Although the test is on infrastructure with optimised disk throughput (and with no flushing to disk on write from Riak to minimise direct pressure from Riak), when running the tests with leveldb disk busyness rapidly becomes a constraining factor - and the reaction to that is volatility in throughput. Riak combined with leveldb is capable in short bursts of greater throughput than Riak + leveled, however when throttled within the cluster by a node or nodes with busy disks, the reaction is extreme.
|
||||
|
||||
|
@ -307,7 +305,7 @@ Here is a side-by-side on a standard Phase 1 test on i2, without sync, and with
|
|||
|
||||
Riak + leveled | Riak + bitcask
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
In the first hour of the test, bitcask throughput is <b>39.13%</b> greater than leveled. Over the whole test, the bitcask-backed cluster achieves <b>16.48%</b> more throughput than leveled, but in the last hour this advantage is just <b>0.34%</b>.
|
||||
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
|
||||
## Parallel Node Testing - Non-Riak
|
||||
|
||||
Initial volume tests have been [based on standard basho_bench eleveldb test](../test/volume/single_node/examples) to run multiple stores in parallel on the same node and and subjecting them to concurrent pressure.
|
||||
Initial volume tests have been [based on standard basho_bench eleveldb test](volume/single_node/examples) to run multiple stores in parallel on the same node and and subjecting them to concurrent pressure.
|
||||
|
||||
This showed a relative positive performance for leveled for both population and load.
|
||||
|
||||
Populate leveled | Populate eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
Load leveled | Load eleveldb
|
||||
:-------------------------:|:-------------------------:
|
||||
 | 
|
||||
 | 
|
||||
|
||||
This test was a positive comparison for LevelEd, but also showed that although the LevelEd throughput was relatively stable it was still subject to fluctuations related to CPU constraints. Prior to moving on to full Riak testing, a number of changes where then made to LevelEd to reduce the CPU load in particular during merge events.
|
||||
|
||||
|
|
After Width: | Height: | Size: 129 KiB |
After Width: | Height: | Size: 102 KiB |
After Width: | Height: | Size: 115 KiB |
After Width: | Height: | Size: 78 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 111 KiB |
After Width: | Height: | Size: 83 KiB |
After Width: | Height: | Size: 97 KiB |
After Width: | Height: | Size: 79 KiB |
After Width: | Height: | Size: 98 KiB |
After Width: | Height: | Size: 81 KiB |
After Width: | Height: | Size: 109 KiB |
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 99 KiB |
After Width: | Height: | Size: 76 KiB |
BIN
docs/volume/cluster_two/output/NotPresentPerc.png
Normal file
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 99 KiB |
After Width: | Height: | Size: 78 KiB |
21
docs/volume/single_node/examples/eleveldb_load.config
Normal file
|
@ -0,0 +1,21 @@
|
|||
{mode, max}.
|
||||
|
||||
{duration, 30}.
|
||||
|
||||
{concurrent, 24}.
|
||||
|
||||
{driver, basho_bench_driver_eleveldb}.
|
||||
|
||||
{key_generator, {int_to_bin_bigendian,{uniform_int, 1000000}}}.
|
||||
|
||||
{value_generator, {fixed_bin, 8000}}.
|
||||
|
||||
{operations, [{get, 5}, {put, 1}]}.
|
||||
|
||||
%% the second element in the list below (e.g., "../../public/eleveldb") must
|
||||
%% point to the relevant directory of a eleveldb installation
|
||||
{code_paths, ["../eleveldb/ebin"]}.
|
||||
|
||||
{eleveldb_dir, "/tmp/eleveldb.bench"}.
|
||||
{eleveldb_num_instances, 12}.
|
||||
|
21
docs/volume/single_node/examples/eleveldb_pop.config
Normal file
|
@ -0,0 +1,21 @@
|
|||
{mode, max}.
|
||||
|
||||
{duration, 30}.
|
||||
|
||||
{concurrent, 24}.
|
||||
|
||||
{driver, basho_bench_driver_eleveldb}.
|
||||
|
||||
{key_generator, {int_to_bin_bigendian,{partitioned_sequential_int, 10000000}}}.
|
||||
|
||||
{value_generator, {fixed_bin, 8000}}.
|
||||
|
||||
{operations, [{put, 1}]}.
|
||||
|
||||
%% the second element in the list below (e.g., "../../public/eleveldb") must
|
||||
%% point to the relevant directory of a eleveldb installation
|
||||
{code_paths, ["../eleveldb/ebin"]}.
|
||||
|
||||
{eleveldb_dir, "/tmp/eleveldb.bench"}.
|
||||
{eleveldb_num_instances, 12}.
|
||||
|
21
docs/volume/single_node/examples/eleveleddb_load.config
Normal file
|
@ -0,0 +1,21 @@
|
|||
{mode, max}.
|
||||
|
||||
{duration, 30}.
|
||||
|
||||
{concurrent, 24}.
|
||||
|
||||
{driver, basho_bench_driver_eleveleddb}.
|
||||
|
||||
{key_generator, {int_to_bin_bigendian,{uniform_int, 1000000}}}.
|
||||
|
||||
{value_generator, {fixed_bin, 8000}}.
|
||||
|
||||
{operations, [{get, 5}, {put, 1}]}.
|
||||
|
||||
%% the second element in the list below (e.g., "../../public/eleveldb") must
|
||||
%% point to the relevant directory of a eleveldb installation
|
||||
{code_paths, ["../eleveleddb/_build/default/lib/eleveleddb/ebin"]}.
|
||||
|
||||
{eleveleddb_dir, "/tmp/eleveleddb.bench"}.
|
||||
{eleveleddb_num_instances, 12}.
|
||||
|
21
docs/volume/single_node/examples/eleveleddb_pop.config
Normal file
|
@ -0,0 +1,21 @@
|
|||
{mode, max}.
|
||||
|
||||
{duration, 30}.
|
||||
|
||||
{concurrent, 24}.
|
||||
|
||||
{driver, basho_bench_driver_eleveleddb}.
|
||||
|
||||
{key_generator, {int_to_bin_bigendian,{partitioned_sequential_int, 10000000}}}.
|
||||
|
||||
{value_generator, {fixed_bin, 8000}}.
|
||||
|
||||
{operations, [{put, 1}]}.
|
||||
|
||||
%% the second element in the list below (e.g., "../../public/eleveldb") must
|
||||
%% point to the relevant directory of a eleveleddb installation
|
||||
{code_paths, ["../eleveleddb/_build/default/lib/eleveleddb/ebin"]}.
|
||||
|
||||
{eleveleddb_dir, "/tmp/eleveleddb.bench"}.
|
||||
{eleveleddb_num_instances, 12}.
|
||||
|
BIN
docs/volume/single_node/output/leveldb_load.png
Normal file
After Width: | Height: | Size: 316 KiB |
BIN
docs/volume/single_node/output/leveldb_pop.png
Normal file
After Width: | Height: | Size: 315 KiB |
BIN
docs/volume/single_node/output/leveled_load.png
Normal file
After Width: | Height: | Size: 333 KiB |
BIN
docs/volume/single_node/output/leveled_pop.png
Normal file
After Width: | Height: | Size: 274 KiB |
|
@ -0,0 +1,93 @@
|
|||
%% -------------------------------------------------------------------
|
||||
%%
|
||||
%% Copyright (c) 2015 Basho Techonologies
|
||||
%%
|
||||
%% This file is provided to you under the Apache License,
|
||||
%% Version 2.0 (the "License"); you may not use this file
|
||||
%% except in compliance with the License. You may obtain
|
||||
%% a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing,
|
||||
%% software distributed under the License is distributed on an
|
||||
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
%% KIND, either express or implied. See the License for the
|
||||
%% specific language governing permissions and limitations
|
||||
%% under the License.
|
||||
%%
|
||||
%% -------------------------------------------------------------------
|
||||
|
||||
%% Raw eleveldb driver. It opens a number of eleveldb instances and assigns
|
||||
%% one to each created worker in round robin fashion. So, for example, creating
|
||||
%% 32 instances and 64 concurrent workers would bind a pair of workers to
|
||||
%% each instance for all operations.
|
||||
-module(basho_bench_driver_eleveleddb).
|
||||
|
||||
-export([new/1,
|
||||
run/4]).
|
||||
|
||||
% -include("basho_bench.hrl").
|
||||
|
||||
-record(state, {
|
||||
instance
|
||||
}).
|
||||
|
||||
get_instances() ->
|
||||
case basho_bench_config:get(eleveleddb_instances, undefined) of
|
||||
undefined ->
|
||||
Instances = start_instances(),
|
||||
% ?INFO("Instances started ~w~n", [Instances]),
|
||||
basho_bench_config:set(eleveleddb_instances, Instances),
|
||||
Instances;
|
||||
Instances ->
|
||||
Instances
|
||||
end.
|
||||
|
||||
|
||||
start_instances() ->
|
||||
BaseDir = basho_bench_config:get(eleveleddb_dir, "."),
|
||||
Num = basho_bench_config:get(eleveleddb_num_instances, 1),
|
||||
% ?INFO("Starting up ~p eleveleddb instances under ~s .\n",
|
||||
% [Num, BaseDir]),
|
||||
Refs = [begin
|
||||
Dir = filename:join(BaseDir, "instance." ++ integer_to_list(N)),
|
||||
% ?INFO("Opening eleveleddb instance in ~s\n", [Dir]),
|
||||
{ok, Ref} = leveled_bookie:book_start(Dir, 2000, 500000000),
|
||||
Ref
|
||||
end || N <- lists:seq(1, Num)],
|
||||
list_to_tuple(Refs).
|
||||
|
||||
new(Id) ->
|
||||
Instances = get_instances(),
|
||||
Count = size(Instances),
|
||||
Idx = ((Id - 1) rem Count) + 1,
|
||||
% ?INFO("Worker ~p using instance ~p.\n", [Id, Idx]),
|
||||
State = #state{instance = element(Idx, Instances)},
|
||||
{ok, State}.
|
||||
|
||||
|
||||
run(get, KeyGen, _ValueGen, State = #state{instance = Ref}) ->
|
||||
Key = KeyGen(),
|
||||
case leveled_bookie:book_get(Ref, "PerfBucket", Key, o) of
|
||||
{ok, _Value} ->
|
||||
{ok, State};
|
||||
not_found ->
|
||||
{ok, State};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end;
|
||||
run(put, KeyGen, ValGen, State = #state{instance = Ref}) ->
|
||||
Key = KeyGen(),
|
||||
Value = ValGen(),
|
||||
case leveled_bookie:book_put(Ref, "PerfBucket", Key, Value, []) of
|
||||
ok ->
|
||||
{ok, State};
|
||||
pause ->
|
||||
timer:sleep(1000),
|
||||
{ok, State};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
|