From 2cb87f3a6e693b85e8c73c7064f6592039a83074 Mon Sep 17 00:00:00 2001 From: Martin Sumner Date: Wed, 26 May 2021 17:40:09 +0100 Subject: [PATCH] Develop 3.1 eqc (#339) * Add EQC profile Don't run EQC tests, unless specifically requested e.g. `./rebar3 as eqc eunit --module=leveled_eqc` * Remove eqc_cover parse_transform Causes a compilation issue with the assertException in leveled_runner * Allow EQC test to compile EQC only works on OTP 22 for now, but other tests should still work on OTP 22 and OTP 24 * Add more complex statem based eqc test * Add check for eqc profile --- .eqc-info | Bin 57751 -> 0 bytes README.md | 4 + rebar.config | 10 +- src/leveled_runner.erl | 7 + test/property/leveled_simpleeqc.erl | 433 +++++++++ test/property/leveled_statemeqc.erl | 1292 +++++++++++++++++++++++++++ 6 files changed, 1742 insertions(+), 4 deletions(-) delete mode 100644 .eqc-info create mode 100644 test/property/leveled_simpleeqc.erl create mode 100644 test/property/leveled_statemeqc.erl diff --git a/.eqc-info b/.eqc-info deleted file mode 100644 index 663432b9ecb73c15a9bfb20d9633269a6fbdde2e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57751 zcmeHQ30zFw`@gfaYES#3P?D5ZX-7!*Eb&IYx0$J?rkOEK3qp&cNQj6wTZ^@jqSumA zmKGAB9a*wOq%76{-aBY&#!Y_S-|zpv?tDITKXdNPJm;M6^PJ~A=Q+eUA?J47Ee zfnWXy0rOu@5+jHqfFbY?K|mZE|Njt9Va>!4z!1O?z!1O?`0pbicpL!KFk9ij4?I>Q zh5&{Dh5&}Z{}us#0Fe7P&VTwcCX0d~Xtl$=@MJ*91EkXYSrjk47r`9>JmR1Wc@=a~ zFaSmWw+z5)|2q)Cl%7cHdwAizt8@Ka>GVahnT(Sj_BpFfT5?d|Q)gf7tS|L}!h%;7(NE;|32 zAO!I@-uz7xfMdWlATl<1?#sYjfZ*|?GU#r;6t)+G3^9|o(genhTQq>5Vlu&#f-o3q z9RXpmFVt8ecYjER*+%eZDLDzo$(XZeyi{*Y-D#f2B{?!msL~Vd-48PwSZ$uUmo$yr_VSI-$sbn`2jRMktkzD?V zVvYU%2N4iF&YAxO)4w@F5 zP<~2vi25_hAe%yCaE3O5VXp=eoeABaglG|D*!5=|bQ1?;)_c4Rd9)<;`D9ksrPS3) zfq$l~NO=@j*(4fvmgM04!HJen*bp%MuFTl0C!|i5RF|ln4t#0DtXL6CsGsS)y=4oH zEKsN;<|?cynQasP;5liN^lAMZ%fdB|R|?upbE~70c7-ymY<4%j+%h4jE<9K7u+ocd z>uP4FSG<|Jt20@($UW7*-g@%zixt5OT2D?o50RjOlMol6mZd`HL?V{A-iG>%6BQIWy^0f$BA9I>k zt{G*aIisQ^?LlVAR=;I0g`9^4o!jH%SrQv=S@!3)rQ!Gp>B=*&@4z6;F@^Wz$A-gm z1Obsl8ih&&Av*(NnxH#{#imj~j%M+c^p`UTMDT0b^}*%vM%FI<8s6#jK^388eBzcDQc z8PmVQ-2Ep1*ar*&3;_%Q3;_%Q3;_%Q3;_%Q3;_%Q3;_%Q3;_&*-vt4A9nSuryvFzZ z|9$p$RH;ufO8w^;Fq5uEf550uxlO^>h;Cv?#nDIv2pw8)jes|gHO62 zNG^gOniu-ZpB+=QZ-kUf%8l!5WCJe7ELVN8#PxzRbC^7#D&mc+sk8f4!THk@w5F7~ zF3WtGJwfG4ea5t`(xsoumu?R&-tQckZ9|)4)}a z!E)+p(SxE%N9uB3o^B2Cq%L}PcbD(R=vSXD7s}UL?5r<#av8bnuc!CwvQDn_I4HGD zn0jA2l(nh)$z!pNrPMs>mD{wF!@RQO_?+Xz3)^ zvCqzpSa7rWk?bZFyJO;$1WIlSWcIX5MGNXpaW6i5GAMRu@`A2gHTFAa&{REUwrnN3 zjLFK~wV&+Zez5sr?TbwLj_gymdu(ruIxKs4%-Z-|;5Iqiw#{n=9Y5c)@k`q;C^}}Y z<=`Rt9w$jISuk?!^?awA1j5_xN{H0OdA^w6W{6*VR80>~39^!59!>q`L zq73fAx>X-b=!zNA3>qYU@1z zWOBkpr_<3(R!ACWhNRCq;QuMMc$Pv`(~o-@M(JN=XT786Qf5x2X+gPXM zMk$S2n>*GiT6m{lf@^)Pj>4%4@6xs&U|fwfe$qYf)x+Tv3{&2+Hjma^<@nBWzk5TK z?aYwri{8j>%Q`{HEEgYYc~m>}cIyWFGjsCwTQ3X^tju*CMJ6cb2F)X#m<7Ciq;ykk z-tP07$Hx{GEU%e;Kua@Ep?J+?5pcG!9x1y~Qf}XglCCQS6TM!Qe2#l}uEe69s(0ze zl>~u~i~>s-gjtBVkKx)-q|n$5I!D|EIHJ#ycg}sT?Zn?5K0YGeE6ao1-5RVg!;*{# zgdv%hfIc9?Y<1?~-9LRCTM8b3cJ$TCKH@6yvi)R((w@AnkE5El<*L?t+Sk~xuB=6>e(OM0jI9Fp-cs#x{FDn!CIf|__`_6Y*J`jCEMeC*HqWV=Pgew(oS71eklCt#1Sb>WNNjz_N~U~u%o8eWG+XP&Pf{QzVg%1 z;W?v2S?iqbWN+*&BWT*1M~c3YlFqVQ@Vfr4%;Sx*v|UlJgACr_PDfpzd}@OL(=SS9 zc6yli78Bp|hwLA3o_gmH41z5p%xZ`*BSOoWn~}|N&PRN&hW_0F8>IN6?N<&?e9xb| zeEX>>>EME>$*HT3DCQ#KTj6soFnej?18J2nhaJp6 zsfElUBsG@q0yM^VCCewZTb+3FrtH*(%1f#{p60qL{Rv9HKP5?MHq>&lwbtmkTpwrO zoEt0N*EI%Z=@us)EVjU24F>bhj z_&O^K)^Z02lL9HNUDv%VE1K5miQhi`$1RiW^S2{5&TP%vZf%n2o1FXcZia7--+4 z)KbsjbkkmkGw7oStPvx`gPqSH4~f{kE%D6&`KLs z|G@Z&b<7RnqYI){-_VAptQRJ}7?-i|vMHeXBJo;hkC^FRrBubaQJdxmHRx9D5Q_I! zT|QhunR8do0!$XE53Xc0HNpT|J{YIP2l(B3!+eaparC zsg_$xaPlD!j{{i^XM`7=+&1l1SC@I<_2}I0q$Nz(B<=f|nw!+bhL2f|^NV+z>ukxk zp9l}F{)?MlLE+(^C<^xmr&%QfNgwxITs~R!hxRnZ4ZBnKr_vdTj+GZF-nB<8 z9&W;|7EWbO`MlF>lebz-ygVx^(?lkIrMq64aWk=*kYq+RSlIgM_*{|Psy}y*d#+?l zxEj-(V9}_2wHhydS>smthnVa$w}qW2WOQZPWve|hvkg2ar+-#l*~D*(Jw9q}?S3L4n z0*B`2+^LOtXe8bJY;ExFNd_;;qbI9tSvz|v(tKgR}5Z(H9)cqDMBZpI94`ei z6bk>kXfbh=&hpMBVvx-Djmq!u?E3y(B7ss9|F|?UP=uE|k|<=*8$8&_PYR_bK9VHH ziQY#{C+_HdF(^D4{1(0skA5tkACe;Mg&}|;fFXb(fFXb(fFXb(fFXb(fFXb(fFXb( zfFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb( zfFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb(fFXb( zfFXb(fFXb(fFbaAAmCWRIsf0WBpg2X>AU&gzvcfPjT{d5@6G@q&7De|$)M8NPzG@* zt2m27qEMM^=sd&0Cy7Ttlbv%c|NT{W3$G@JI^UMn()N@uo(CV_M?sG7n~WUa$2nY| z`(V^B2lH_bM*Ysg{9n#R?LDrqb%amV^AV<=Rj-~C95Yh=%1Tx~j60o5KD+5@u!FSR zKD+#gr)`}Tr8b-9Dcueq?>u>oi*If2TcZ;;iXYq!>#wV=V#Kuu;1tRijsJAxj)}2e z@jd%Fh1rMY#AaqstlwmpqYtL*oX$INYox`kZ24^qq#edg4t8Pce$Y{{Fcf)eMX7HN zIPf^51elVLzhk(`#wYWQlw*t7atZl~w_T6Rb$yW0o~JzdnTh1g+g0Y>p~X4MoeT&0 zS7x$PR&HxCyCflfK-%JDk@cn6sjXV48CAAziPtW!5>C2&^CjTMB+8B~Z@=I?NwH)r z*szb19^H|)kr5tnGsK9{KwJ26=lSQ#&^Q{z5tXR97JeWpyz=4glMY4c{5XS5%a9dPJYF zzA9Gs)YUhKi}LFqES&?~o=*mTE(c!cOM_);# z-3+&JO`oFzT%J$N5m)qGoEEDn*?~6%2_7B{=P=U#_hZ|7dhX7Z)$YD4*g$DvDr&N9 zvoxzBgBz8S?TjmP&y0%Ao-}Qhd|BZ3?Mid!8zV0--#l(Tzi6YKM8e0OQ6&w?7>OwHHuQZk z<+9qd_Dd5MzEJk-__#C1qP@#B(9~)4#5>t(SELV`Kb0eq7YW~;d;y0Cwilk;JwDbk zUUrKYInXHPgUqfM`)Y&}BdxFH32q4C8yLaA;8j^X!ZnZ1uKxLP%+HfPmZuRp}3m62R z(~rNN_{wu&tN>>!;yV5KcgJs#V+ZYEc?oxak`IMVqx!Ku7&LOf>_J~=5idLw07ORN zh4h;bBs48f5Zyg)e13FB1Jn>z=#3)-LX_nsHxGY0sOk<(2psE42E^$MI)hBHqcgl1 zerzZyQ5f_~0DuSy*W>sQ((6Y^ZvaB@DbIKhx(|TI5xoSUmb$~$LF5Vg03Y(E5P9fV z-`Z(9AbEEHP%eWmz6YVpNCik}0RYAaIoMQ|?(gdc8F0QJo$Tl40g~7Z7Kj)~Lzyhd zl!LzWr?V+6N)YJFq*0*ed4M!O3T)O2!%$J_9t;+gfrkeseBQL(cL4k&^Go60AQ9>g z-i;qxicmwiVOJcWJ1`-6e*Dw!M@V}BLU0cd5Ire$h$yZAmoUrM!tBHRwKHPmdO(Za zx)4DvE~3!yKrMITH6JRfP~d{_dckmy}*xLj#e(dW zCt_2_CG0(IasH!aaC&>*AL6@0U;8R;7HoO+$Z>jJC{us?g1r+RCf5lb@m(C2VzS8S zPou1!XxaN2EqXC{xiPD$WOrc$W#hQo$wDEc^@8(L1Dssee6kvLYF}YybevDK=krF~ znyL3vyuogn@lOt^HChXd>6&CeUES{L65pku-=tM-xhqnep6%}Gd@h)7T&Yp8;B}45 zxfLt^DjG5W;%M7d$8mb|v!e4nx;GnBwUQUhEU3Q5K43jHbb7AteCg@4(_s*1BK{X$ z#Q(xcL-56e$(^SR9@d;pO-yJ$w=x zRwuJQ)}$sEypc!9SR!~Ky5jMZGhUb0pC{Seerr3$CdcL~IrQTBwMNmYCLR|yG!35^ zCrBz9EvG(eUtgOvN~#u7 zhPyX~G@zSDXbF*z`$QcAncL)3AHd{;tXUx`eyd5neuRF|%>$cM-F@6V_-Xu1)1Pwl zhbF?^J^UZ|Y5aKb zQ~nQ3qwZBwkLx!a}Q`I z@OKKJtYu|UYU52Fhl3|YA-?9BG}$}6MI zV{YmjjcAI)OW(>WIV@dNNDB8csKV{_GF#T7u||2vhEqb_ub&oGMeIsl+#+uKM{0&z zWz~Zt?&!L9t$L1IZEmSlyE>|f}Er@Hh z={%6Ha=1)K*SKrl(tQgmoFB)Wa+VN%nYZKqsu^2m%v-*!Xny+isanpC=ReNWuu=Fh z?DITT$MbSQZ*Q2seD8JHYp9$0e(R&fn|5^`(sq^$9tNTK{WTqrdqVD~`NffpwCL0m zSCd!zcaF_D=Im&Qq)s1H*6?ZXs2O3pVB+G;&@=brk{e~MhCI#|J3Vvga_`v83-4;P zhFB^-H@2bO6w!I$t1+=fUuu20@7>TV5mwRKMRo#Yt8MOi;g4@rxcE3^9g1GSG>;eG zUz@W?`ns$}O<`ln!dwHITy*8JIho}#HIIxg+qU+&N8;N@+vMd{-?`Gdf0@&^SFsZ} zzVez8U!6U7-e}gtg~S;f_qvQ+w&KlXsSg*DC!H<1x6Ic`Ks5 z+Lm)0${SdofvxXWG&w~oUTwc2p4(BV`FhhnW!+o0GNnU|kL$12v@X!nRhGG0)}lQP z)F*XLVac?oAFXQMBz!!>YS<*96p=@RW0yP%y9QJn9EV%O%1;80A|oBw1%|rJF`l;o zMQtdc)b=B4qrY%`Uu`I&)I{{5(rB=w6>;Yf0zei&g{p)?K?Kf!>h^WR8_&@k2q9@+ zCxNod`5>msifk*Tm^@+b2`p~mw{=g9l!9gwsTk95Y?G5B%QUlXM!zDpY3$~s=_xQ#IOs_@(S?9T|3ZUALTGaRKh5h|Tb3HrKB5JWtC z@M?p*LL-hbvWUQE(?ktkqkIW#fn#IU25`E=nQEb zueFY}P-@~Ig{aOC9&HpdKhog-5inm}GrWf5D3qFh(A6WN{fk^Z!gasUx`Mw$`#`GNkH+YoUu<9LHLQjZ=>Ej>v0+>7dwP zyhp}xySKg{)WLhm#n*={dLz?cnp7@N)NDINveOM_zkQoHg8hd^c;%cu`|WxT5sxw3 zKmIl2@Ys-du~{dNJPR0ExF-)!6aSQy}Cim(|Ap`SB*4LAnTSy=cA<~ zRk~abs5mrqjCRl7W?iUs;=I`+rnLE?Q4%#5ird$CNy^Rbe7M^4@%H&g8w>3lC!C%` z8=h_08eA9UZ`#mdJz|o*tK;b*))c*oC2n-PquWfX++W?8_5NgOo?3KikNTzyve&vo z+E;{p{CHC{JWnHEb8fw^=G&RVqG|Kb9J*4P(Zj(Sq_luQKc8^!|XU^F%JIYCICRw&i@0}n2Oi{ diff --git a/README.md b/README.md index 2c90b0c..be0c60f 100644 --- a/README.md +++ b/README.md @@ -79,3 +79,7 @@ In order to contribute to leveled, fork the repository, make a branch for your c To have rebar3 execute the full set of tests, run: `rebar3 as test do xref, dialyzer, cover --reset, eunit --cover, ct --cover, cover --verbose` + +For those with a Quickcheck license, property-based tests can also be run using: + + `rebar3 as eqc do eunit --module=leveled_simpleeqc, eunit --module=leveled_statemeqc` diff --git a/rebar.config b/rebar.config index ed8bb76..1c6298b 100644 --- a/rebar.config +++ b/rebar.config @@ -18,10 +18,12 @@ {profiles, [{eqc, [{deps, [meck, fqc]}, - {erl_opts, [debug_info, {parse_transform, eqc_cover}]}, - {extra_src_dirs, ["test"]}]}, - {test, [ - {eunit_compile_opts, [{src_dirs, ["src", "test/end_to_end"]}]} + {erl_opts, [debug_info, {d, 'EQC'}]}, + {extra_src_dirs, ["test/property"]}, + {shell, [{apps, [lz4]}]}, + {plugins, [rebar_eqc]} + ]}, + {test, [{extra_src_dirs, ["test/end_to_end", "test/property"]} ]} ]}. diff --git a/src/leveled_runner.erl b/src/leveled_runner.erl index 047a119..f9dda1f 100644 --- a/src/leveled_runner.erl +++ b/src/leveled_runner.erl @@ -805,6 +805,12 @@ wrap_runner(FoldAction, AfterAction) -> -ifdef(TEST). +%% Note in OTP 22 we see a compile error with the assertException if using the +%% eqc_cover parse_transform. This test is excluded in the eqc profle, due to +%% this error + +-ifdef(EQC). + throw_test() -> StoppedFolder = fun() -> @@ -823,6 +829,7 @@ throw_test() -> ?assertException(throw, stop_fold, wrap_runner(StoppedFolder, AfterAction)). +-endif. -endif. diff --git a/test/property/leveled_simpleeqc.erl b/test/property/leveled_simpleeqc.erl new file mode 100644 index 0000000..5e1cc4d --- /dev/null +++ b/test/property/leveled_simpleeqc.erl @@ -0,0 +1,433 @@ +%% ------------------------------------------------------------------- +%% +%% leveld_eqc: basic statem for doing things to leveled +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(leveled_simpleeqc). + +-ifdef(EQC). + +-include_lib("eqc/include/eqc.hrl"). +-include_lib("eqc/include/eqc_statem.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("../include/leveled.hrl"). + +-compile(nowarn_export_all). +-compile(export_all). + +-define(DATA_DIR, "./leveled_eqc"). + +-record(state, {leveled = undefined :: undefined | pid(), + model :: orddict:orddict(), %% The DB state on disk + %% gets set if leveled is started, and not destroyed + %% in the test. + leveled_needs_destroy = false :: boolean(), + previous_keys = [] :: list(binary()), %% Used to increase probability to pick same key + deleted_keys = [] :: list(binary()), + start_opts = [] + }). + +-define(NUMTESTS, 1000). +-define(QC_OUT(P), + eqc:on_output(fun(Str, Args) -> + io:format(user, Str, Args) end, P)). + +-type state() :: #state{}. + +eqc_test_() -> + {timeout, 60, ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(50, ?QC_OUT(prop_db()))))}. + +run() -> + run(?NUMTESTS). + +run(Count) -> + eqc:quickcheck(eqc:numtests(Count, prop_db())). + +check() -> + eqc:check(prop_db()). + +initial_state() -> + #state{model = orddict:new()}. + +%% --- Operation: init_backend --- +%% @doc init_backend_pre/1 - Precondition for generation +-spec init_backend_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +init_backend_pre(S) -> + not is_leveled_open(S). + +%% @doc init_backend_args - Argument generator +-spec init_backend_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +init_backend_args(_S) -> + [gen_opts()]. + +%% @doc init_backend - The actual operation +%% Start the database and read data from disk +init_backend(Options) -> + {ok, Bookie} = leveled_bookie:book_start(Options), + Bookie. + +%% @doc init_backend_next - Next state function +-spec init_backend_next(S, Var, Args) -> NewS + when S :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(), + Var :: eqc_statem:var() | term(), + Args :: [term()], + NewS :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(). +init_backend_next(S, LevelEdPid, [Options]) -> + S#state{leveled=LevelEdPid, leveled_needs_destroy=true, + start_opts = Options}. + +%% @doc init_backend_post - Postcondition for init_backend +-spec init_backend_post(S, Args, Res) -> true | term() + when S :: eqc_state:dynamic_state(), + Args :: [term()], + Res :: term(). +init_backend_post(_S, [_Options], LevelEdPid) -> + is_pid(LevelEdPid). + +%% --- Operation: stop --- +%% @doc stop_pre/1 - Precondition for generation +-spec stop_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +stop_pre(S) -> + is_leveled_open(S). + +%% @doc stop_args - Argument generator +-spec stop_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +stop_args(#state{leveled=Pid}) -> + [Pid]. + +%% @doc stop - The actual operation +%% Stop the server, but the values are still on disk +stop(Pid) -> + ok = leveled_bookie:book_close(Pid). + +%% @doc stop_next - Next state function +-spec stop_next(S, Var, Args) -> NewS + when S :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(), + Var :: eqc_statem:var() | term(), + Args :: [term()], + NewS :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(). +stop_next(S, _Value, [_Pid]) -> + S#state{leveled=undefined}. + +%% @doc stop_post - Postcondition for stop +-spec stop_post(S, Args, Res) -> true | term() + when S :: eqc_state:dynamic_state(), + Args :: [term()], + Res :: term(). +stop_post(_S, [Pid], _Res) -> + Mon = erlang:monitor(process, Pid), + receive + {'DOWN', Mon, _Type, Pid, _Info} -> + true + after 5000 -> + {still_a_pid, Pid} + end. + + +%% --- Operation: put --- +%% @doc put_pre/1 - Precondition for generation +-spec put_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +put_pre(S) -> + is_leveled_open(S). + +%% @doc put_args - Argument generator +-spec put_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +put_args(#state{leveled=Pid, previous_keys=PK}) -> + [gen_key(PK), gen_val(), Pid]. + +%% @doc put - The actual operation +put(Key, Value, Pid) -> + ok = leveled_bookie:book_put(Pid, Key, Key, Value, []). + +%% @doc put_next - Next state function +-spec put_next(S, Var, Args) -> NewS + when S :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(), + Var :: eqc_statem:var() | term(), + Args :: [term()], + NewS :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(). +put_next(S, _Value, [Key, Value, _Pid]) -> + #state{model=Model, previous_keys=PK} = S, + Model2 = orddict:store(Key, Value, Model), + S#state{model=Model2, previous_keys=[Key | PK]}. + +%% @doc put_features - Collects a list of features of this call with these arguments. +-spec put_features(S, Args, Res) -> list(any()) + when S :: eqc_statem:dynmic_state(), + Args :: [term()], + Res :: term(). +put_features(#state{previous_keys=PK}, [Key, _Value, _Pid], _Res) -> + case lists:member(Key, PK) of + true -> + [{put, update}]; + false -> + [{put, insert}] + end. + +%% --- Operation: get --- +%% @doc get_pre/1 - Precondition for generation +-spec get_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +get_pre(S) -> + is_leveled_open(S). + +%% @doc get_args - Argument generator +-spec get_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +get_args(#state{leveled=Pid, previous_keys=PK}) -> + [Pid, gen_key(PK)]. + +%% @doc get - The actual operation +get(Pid, Key) -> + leveled_bookie:book_get(Pid, Key, Key). + +%% @doc get_post - Postcondition for get +-spec get_post(S, Args, Res) -> true | term() + when S :: eqc_state:dynamic_state(), + Args :: [term()], + Res :: term(). +get_post(S, [_Pid, Key], Res) -> + #state{model=Model} = S, + case orddict:find(Key, Model) of + {ok, V} -> + Res == {ok, V}; + error -> + Res == not_found + end. + +%% @doc get_features - Collects a list of features of this call with these arguments. +-spec get_features(S, Args, Res) -> list(any()) + when S :: eqc_statem:dynmic_state(), + Args :: [term()], + Res :: term(). +get_features(S, [_Pid, Key], Res) -> + case Res of + not_found -> + [{get, not_found, deleted} || lists:member(Key, S#state.deleted_keys)] ++ + [{get, not_found, not_inserted} || not lists:member(Key, S#state.previous_keys)]; + {ok, B} when is_binary(B) -> + [{get, found}] + end. + +%% --- Operation: delete --- +%% @doc delete_pre/1 - Precondition for generation +-spec delete_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +delete_pre(S) -> + is_leveled_open(S). + +%% @doc delete_args - Argument generator +-spec delete_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +delete_args(#state{leveled=Pid, previous_keys=PK}) -> + [Pid, gen_key(PK)]. + +%% @doc delete - The actual operation +delete(Pid, Key) -> + ok = leveled_bookie:book_delete(Pid, Key, Key, []). + +%% @doc delete_next - Next state function +-spec delete_next(S, Var, Args) -> NewS + when S :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(), + Var :: eqc_statem:var() | term(), + Args :: [term()], + NewS :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(). +delete_next(S, _Value, [_Pid, Key]) -> + #state{model=Model, deleted_keys=DK} = S, + Model2 = orddict:erase(Key, Model), + S#state{model=Model2, deleted_keys = case orddict:is_key(Key, Model) of + true -> [Key | DK]; + false -> DK + end}. + +%% @doc delete_features - Collects a list of features of this call with these arguments. +-spec delete_features(S, Args, Res) -> list(any()) + when S :: eqc_statem:dynmic_state(), + Args :: [term()], + Res :: term(). +delete_features(S, [_Pid, Key], _Res) -> + #state{previous_keys=PK} = S, + case lists:member(Key, PK) of + true -> + [{delete, written}]; + false -> + [{delete, not_written}] + end. + +%% --- Operation: is_empty --- +%% @doc is_empty_pre/1 - Precondition for generation +-spec is_empty_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +is_empty_pre(S) -> + is_leveled_open(S). + +%% @doc is_empty_args - Argument generator +-spec is_empty_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +is_empty_args(#state{leveled=Pid}) -> + [Pid]. + +%% @doc is_empty - The actual operation +is_empty(Pid) -> + leveled_bookie:book_isempty(Pid, ?STD_TAG). + +%% @doc is_empty_post - Postcondition for is_empty +-spec is_empty_post(S, Args, Res) -> true | term() + when S :: eqc_state:dynamic_state(), + Args :: [term()], + Res :: term(). +is_empty_post(#state{model=Model}, [_Pid], Res) -> + Size = orddict:size(Model), + case Res of + true -> eq(0, Size); + false when Size == 0 -> expected_empty; + false when Size > 0 -> true + end. + +%% @doc is_empty_features - Collects a list of features of this call with these arguments. +-spec is_empty_features(S, Args, Res) -> list(any()) + when S :: eqc_statem:dynmic_state(), + Args :: [term()], + Res :: term(). +is_empty_features(_S, [_Pid], Res) -> + [{empty, Res}]. + +%% --- Operation: drop --- +%% @doc drop_pre/1 - Precondition for generation +-spec drop_pre(S :: eqc_statem:symbolic_state()) -> boolean(). +drop_pre(S) -> + is_leveled_open(S). + +%% @doc drop_args - Argument generator +-spec drop_args(S :: eqc_statem:symbolic_state()) -> eqc_gen:gen([term()]). +drop_args(#state{leveled=Pid}) -> + [Pid, gen_opts()]. + +%% @doc drop - The actual operation +%% Remove fles from disk (directory structure may remain) and start a new clean database +drop(Pid, Opts) -> + ok = leveled_bookie:book_destroy(Pid), + init_backend(Opts). + +%% @doc drop_next - Next state function +-spec drop_next(S, Var, Args) -> NewS + when S :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(), + Var :: eqc_statem:var() | term(), + Args :: [term()], + NewS :: eqc_statem:symbolic_state() | eqc_state:dynamic_state(). +drop_next(_S, NewPid, [_Pid, Opts]) -> + init_backend_next(#state{model = orddict:new()}, NewPid, [Opts]). + +%% @doc drop_post - Postcondition for drop +-spec drop_post(S, Args, Res) -> true | term() + when S :: eqc_state:dynamic_state(), + Args :: [term()], + Res :: term(). +drop_post(_S, [Pid, _Opts], NewPid) -> + Mon = erlang:monitor(process, Pid), + receive + {'DOWN', Mon, _Type, Pid, _Info} -> + is_empty(NewPid) + after 5000 -> + {still_a_pid, Pid} + end. + +drop_features(S, [_Pid, _Opts], _Res) -> + Size = orddict:size(S#state.model), + [{drop, empty} || Size == 0 ] ++ + [{drop, Size div 10} || Size > 0 ]. + + +weight(#state{previous_keys=[]}, Command) when Command == get; + Command == delete -> + 1; +weight(_S, C) when C == get; + C == put -> + 10; +weight(_S, stop) -> + 1; +weight(_, _) -> + 1. + +%% @doc check that the implementation of leveled is equivalent to a +%% sorted dict at least +-spec prop_db() -> eqc:property(). +prop_db() -> + ?FORALL(Cmds, commands(?MODULE), + begin + delete_level_data(), + + {H, S, Res} = run_commands(?MODULE, Cmds), + CallFeatures = call_features(H), + StartOptions = [{root_path, "./leveled_data"}, {log_level, error}], + AllVals = get_all_vals(S#state.leveled, S#state.leveled_needs_destroy, StartOptions), + + pretty_commands(?MODULE, Cmds, {H, S, Res}, + aggregate(command_names(Cmds), + aggregate(with_title('Features'), CallFeatures, + features(CallFeatures, + conjunction([{result, Res == ok}, + {vals, vals_equal(AllVals, S#state.model)} + ]))))) + + end). + +gen_opts() -> + [{root_path, "./leveled_data"}, {log_level, error}]. + +gen_key() -> + binary(16). + +gen_val() -> + binary(32). + +gen_key([]) -> + gen_key(); +gen_key(Previous) -> + frequency([{1, gen_key()}, + {2, elements(Previous)}]). + + +%% Helper for all those preconditions that just check that leveled Pid +%% is populated in state. +-spec is_leveled_open(state()) -> boolean(). +is_leveled_open(#state{leveled=undefined}) -> + false; +is_leveled_open(_) -> + true. + +get_all_vals(undefined, false, _) -> + []; +get_all_vals(undefined, true, Opts) -> + %% start a leveled (may have been stopped in the test) + {ok, Bookie} = leveled_bookie:book_start(Opts), + get_all_vals(Bookie); +get_all_vals(Pid, true, _) -> + %% is stopped, but not yet destroyed + get_all_vals(Pid). + +get_all_vals(Pid) -> + %% fold over all the values in leveled + Acc = [], + FoldFun = fun(_B, K, V, A) -> [{K, V} | A] end, + AllKeyQuery = {foldobjects_allkeys, o, {FoldFun, Acc}, false}, + {async, Folder} = leveled_bookie:book_returnfolder(Pid, AllKeyQuery), + AccFinal = Folder(), + ok = leveled_bookie:book_destroy(Pid), + lists:reverse(AccFinal). + +vals_equal(Leveled, Model) -> + %% We assume here that Leveled is an orddict, since Model is. + ?WHENFAIL(eqc:format("level ~p =/=\nmodel ~p\n", [Leveled, Model]), Leveled == Model). + +delete_level_data() -> + ?_assertCmd("rm -rf " ++ " ./leveled_data"). + +-endif. \ No newline at end of file diff --git a/test/property/leveled_statemeqc.erl b/test/property/leveled_statemeqc.erl new file mode 100644 index 0000000..da9a272 --- /dev/null +++ b/test/property/leveled_statemeqc.erl @@ -0,0 +1,1292 @@ +%% ------------------------------------------------------------------- +%% +%% leveld_eqc: basic statem for doing things to leveled +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(leveled_statemeqc). + +-ifdef(EQC). + +-include_lib("eqc/include/eqc.hrl"). +-include_lib("eqc/include/eqc_statem.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("../include/leveled.hrl"). + +-compile([export_all, nowarn_export_all, {nowarn_deprecated_function, + [{gen_fsm, send_event, 2}]}]). + +-define(NUMTESTS, 1000). +-define(QC_OUT(P), + eqc:on_output(fun(Str, Args) -> + io:format(user, Str, Args) end, P)). + +-define(CMD_VALID(State, Cmd, True, False), + case is_valid_cmd(State, Cmd) of + true -> True; + false -> False + end). + + +eqc_test_() -> + Timeout = 50, + {timeout, max(2 * Timeout, Timeout + 10), + ?_assertEqual(true, eqc:quickcheck(eqc:testing_time(Timeout, ?QC_OUT(prop_db()))))}. + +run() -> + run(?NUMTESTS). + +run(Count) -> + eqc:quickcheck(eqc:numtests(Count, prop_db())). + +check() -> + eqc:check(prop_db()). + +iff(B1, B2) -> B1 == B2. +implies(B1, B2) -> (not B1 orelse B2). + +%% start_opts should not be added to this map, it is added only when the system is started the first time. +initial_state() -> + #{dir => {var, dir}, + sut => sut, + leveled => undefined, %% to make adapt happy after failing pre/1 + counter => 0, + model => orddict:new(), + previous_keys => [], + deleted_keys => [], + folders => [] + }. + +%% --- Operation: init_backend --- +init_backend_pre(S) -> + not is_leveled_open(S). + +init_backend_args(#{dir := Dir, sut := Name} = S) -> + case maps:get(start_opts, S, undefined) of + undefined -> + [ default(?RIAK_TAG, ?STD_TAG), %% Just test one tag at a time + [{root_path, Dir}, {log_level, error}, + {max_sstslots, 2}, {cache_size, 10}, {max_pencillercachesize, 40}, {max_journalsize, 20000} | gen_opts()], Name ]; + Opts -> + %% root_path is part of existing options + [ maps:get(tag, S), Opts, Name ] + end. + +init_backend_pre(S, [Tag, Options, _]) -> + %% for shrinking + PreviousOptions = maps:get(start_opts, S, undefined), + maps:get(tag, S, Tag) == Tag andalso + PreviousOptions == undefined orelse PreviousOptions == Options. + +init_backend_adapt(S, [Tag, Options, Name]) -> + [ maps:get(tag, S, Tag), maps:get(start_opts, S, Options), Name]. + +%% @doc init_backend - The actual operation +%% Start the database and read data from disk +init_backend(_Tag, Options0, Name) -> + % Options0 = proplists:delete(log_level, Options), + case leveled_bookie:book_start(Options0) of + {ok, Bookie} -> + unlink(Bookie), + erlang:register(Name, Bookie), + Bookie; + Error -> Error + end. + +init_backend_next(S, LevelEdPid, [Tag, Options, _]) -> + S#{leveled => LevelEdPid, start_opts => Options, tag => Tag}. + +init_backend_post(_S, [_, _Options, _], LevelEdPid) -> + is_pid(LevelEdPid). + +init_backend_features(_S, [_Tag, Options, _], _Res) -> + [{start_options, Options}]. + + +%% --- Operation: stop --- +stop_pre(S) -> + is_leveled_open(S). + +%% @doc stop_args - Argument generator +stop_args(#{leveled := Pid}) -> + [Pid]. + +stop_pre(#{leveled := Leveled}, [Pid]) -> + %% check during shrinking + Pid == Leveled. + +stop_adapt(#{leveled := Leveled}, [_]) -> + [Leveled]. + +%% @doc stop - The actual operation +%% Stop the server, but the values are still on disk +stop(Pid) -> + ok = leveled_bookie:book_close(Pid). + +stop_next(S, _Value, [_Pid]) -> + S#{leveled => undefined, + iclerk => undefined, + folders => [], + used_folders => [], + stop_folders => maps:get(folders, S, []) ++ maps:get(used_folders, S, [])}. + +stop_post(_S, [Pid], _Res) -> + Mon = erlang:monitor(process, Pid), + receive + {'DOWN', Mon, _Type, Pid, _Info} -> + true + after 5000 -> + {still_a_pid, Pid} + end. + + +%% --- Operation: updateload --- +updateload_pre(S) -> + is_leveled_open(S). + +%% updateload for specific bucket (doesn't overlap with get/put/delete) +updateload_args(#{leveled := Pid, tag := Tag}) -> + ?LET(Categories, gen_categories(Tag), + ?LET({{Key, Bucket}, Value, IndexSpec, MetaData}, + {{gen_key(), <<"LoadBucket">>}, gen_val(), [{add, Cat, gen_index_value()} || Cat <- Categories ], []}, + case Tag of + ?STD_TAG -> [Pid, Bucket, Key, Value, Value, IndexSpec, Tag, MetaData]; + ?RIAK_TAG -> + Obj = testutil:riak_object(Bucket, Key, Value, MetaData), %% this has a side effect inserting a random nr + [Pid, Bucket, Key, Value, Obj, IndexSpec, Tag, MetaData] + end + )). + +updateload_pre(#{leveled := Leveled}, [Pid, _Bucket, _Key, _Value, _Obj, _, _, _]) -> + Pid == Leveled. + +updateload_adapt(#{leveled := Leveled}, [_, Bucket, Key, Value, Obj, Spec, Tag, MetaData]) -> + [ Leveled, Bucket, Key, Value, Obj, Spec, Tag, MetaData ]. + +%% @doc put - The actual operation +updateload(Pid, Bucket, Key, Value, Obj, Spec, Tag, MetaData) -> + Values = + case Tag of + ?STD_TAG -> multiply(100, Value); + ?RIAK_TAG -> + lists:map(fun(V) -> testutil:riak_object(Bucket, Key, V, MetaData) end, + multiply(100, Value)) + end ++ [Obj], + lists:map(fun(V) -> leveled_bookie:book_put(Pid, Bucket, Key, V, Spec, Tag) end, Values). + +multiply(1, _Value) -> + []; +multiply(N, Value) when N > 1 -> + <> = Value, + NewValue = <>, + [NewValue | multiply(N-1, NewValue)]. + +updateload_next(#{model := Model} = S, _V, [_Pid, Bucket, Key, _Value, Obj, Spec, _Tag, _MetaData]) -> + ?CMD_VALID(S, put, + begin + NewSpec = + case orddict:find({Bucket, Key}, Model) of + error -> merge_index_spec([], Spec); + {ok, {_, OldSpec}} -> + merge_index_spec(OldSpec, Spec) + end, + S#{model => orddict:store({Bucket, Key}, {Obj, NewSpec}, Model)} + end, + S). + +updateload_post(S, [_, _, _, _, _, _, _, _], Results) -> + lists:all(fun(Res) -> ?CMD_VALID(S, put, lists:member(Res, [ok, pause]), Res == {unsupported_message, put}) end, Results). + +updateload_features(#{previous_keys := PK} = S, [_Pid, Bucket, Key, _Value, _Obj, _, Tag, _], _Res) -> + ?CMD_VALID(S, put, + case + lists:member({Key, Bucket}, PK) of + true -> + [{updateload, update, Tag}]; + false -> + [{updateload, insert, Tag}] + end, + [{updateload, unsupported}]). + + +%% --- Operation: put --- +put_pre(S) -> + is_leveled_open(S). + +put_args(#{leveled := Pid, previous_keys := PK, tag := Tag}) -> + ?LET(Categories, gen_categories(Tag), + ?LET({{Key, Bucket}, Value, IndexSpec, MetaData}, + {gen_key_in_bucket(PK), gen_val(), [{add, Cat, gen_index_value()} || Cat <- Categories ], []}, + case Tag of + ?STD_TAG -> [Pid, Bucket, Key, Value, IndexSpec, elements([none, Tag])]; + ?RIAK_TAG -> + Obj = testutil:riak_object(Bucket, Key, Value, MetaData), + [Pid, Bucket, Key, Obj, IndexSpec, Tag] + end)). + +put_pre(#{leveled := Leveled}, [Pid, _Bucket, _Key, _Value, _, _]) -> + Pid == Leveled. + +put_adapt(#{leveled := Leveled}, [_, Bucket, Key, Value, Spec, Tag]) -> + [ Leveled, Bucket, Key, Value, Spec, Tag ]. + +%% @doc put - The actual operation +put(Pid, Bucket, Key, Value, Spec, none) -> + leveled_bookie:book_put(Pid, Bucket, Key, Value, Spec); +put(Pid, Bucket, Key, Value, Spec, Tag) -> + leveled_bookie:book_put(Pid, Bucket, Key, Value, Spec, Tag). + +put_next(#{model := Model, previous_keys := PK} = S, _Value, [_Pid, Bucket, Key, Value, Spec, _Tag]) -> + ?CMD_VALID(S, put, + begin + NewSpec = + case orddict:find({Bucket, Key}, Model) of + error -> merge_index_spec([], Spec); + {ok, {_, OldSpec}} -> + merge_index_spec(OldSpec, Spec) + end, + S#{model => orddict:store({Bucket, Key}, {Value, NewSpec}, Model), + previous_keys => PK ++ [{Key, Bucket}]} + end, + S). + +put_post(S, [_, _, _, _, _, _], Res) -> + ?CMD_VALID(S, put, lists:member(Res, [ok, pause]), eq(Res, {unsupported_message, put})). + +put_features(#{previous_keys := PK} = S, [_Pid, Bucket, Key, _Value, _, Tag], _Res) -> + ?CMD_VALID(S, put, + case + lists:member({Key, Bucket}, PK) of + true -> + [{put, update, Tag}]; + false -> + [{put, insert, Tag}] + end, + [{put, unsupported}]). + +merge_index_spec(Spec, []) -> + Spec; +merge_index_spec(Spec, [{add, Cat, Idx} | Rest]) -> + merge_index_spec(lists:delete({Cat, Idx}, Spec) ++ [{Cat, Idx}], Rest); +merge_index_spec(Spec, [{remove, Cat, Idx} | Rest]) -> + merge_index_spec(lists:delete({Cat, Idx}, Spec), Rest). + + +%% --- Operation: get --- +get_pre(S) -> + is_leveled_open(S). + +get_args(#{leveled := Pid, previous_keys := PK, tag := Tag}) -> + ?LET({Key, Bucket}, gen_key_in_bucket(PK), + [Pid, Bucket, Key, case Tag of ?STD_TAG -> default(none, Tag); _ -> Tag end]). + +%% @doc get - The actual operation +get(Pid, Bucket, Key, none) -> + leveled_bookie:book_get(Pid, Bucket, Key); +get(Pid, Bucket, Key, Tag) -> + leveled_bookie:book_get(Pid, Bucket, Key, Tag). + +get_pre(#{leveled := Leveled}, [Pid, _Bucket, _Key, _Tag]) -> + Pid == Leveled. + +get_adapt(#{leveled := Leveled}, [_, Bucket, Key, Tag]) -> + [Leveled, Bucket, Key, Tag]. + +get_post(#{model := Model} = S, [_Pid, Bucket, Key, Tag], Res) -> + ?CMD_VALID(S, get, + case Res of + {ok, _} -> + {ok, {Value, _}} = orddict:find({Bucket, Key}, Model), + eq(Res, {ok, Value}); + not_found -> + %% Weird to be able to supply a tag, but must be STD_TAG... + Tag =/= ?STD_TAG orelse orddict:find({Bucket, Key}, Model) == error + end, + eq(Res, {unsupported_message, get})). + +get_features(#{deleted_keys := DK, previous_keys := PK}, [_Pid, Bucket, Key, _Tag], Res) -> + case Res of + not_found -> + [{get, not_found, deleted} || lists:member({Key, Bucket}, DK)] ++ + [{get, not_found, not_inserted} || not lists:member({Key, Bucket}, PK)]; + {ok, B} when is_binary(B) -> + [{get, found}]; + {unsupported_message, _} -> + [{get, unsupported}] + end. + +%% --- Operation: mput --- +mput_pre(S) -> + is_leveled_open(S). + +%% @doc mput_args - Argument generator +%% Specification says: duplicated should be removed +%% "%% The list should be de-duplicated before it is passed to the bookie." +%% Wether this means that keys should be unique or even Action and values is unclear. +%% Slack discussion: +%% `[{add, B1, K1, SK1}, {add, B1, K1, SK2}]` should be fine (same bucket and key, different subkey) +%% +%% Really weird to have to specify a value in case of a remove action +mput_args(#{leveled := Pid, previous_keys := PK}) -> + ?LET(Objs, list({gen_key_in_bucket(PK), nat()}), + [Pid, [ {weighted_default({5, add}, {1, remove}), Bucket, Key, SubKey, gen_val()} || {{Key, Bucket}, SubKey} <- Objs ]]). + + +mput_pre(#{leveled := Leveled}, [Pid, ObjSpecs]) -> + Pid == Leveled andalso no_key_dups(ObjSpecs) == ObjSpecs. + +mput_adapt(#{leveled := Leveled}, [_, ObjSpecs]) -> + [ Leveled, no_key_dups(ObjSpecs) ]. + +mput(Pid, ObjSpecs) -> + leveled_bookie:book_mput(Pid, ObjSpecs). + +mput_next(S, _, [_Pid, ObjSpecs]) -> + ?CMD_VALID(S, mput, + lists:foldl(fun({add, Bucket, Key, _SubKey, Value}, #{model := Model, previous_keys := PK} = Acc) -> + Acc#{model => orddict:store({Bucket, Key}, {Value, []}, Model), + previous_keys => PK ++ [{Key, Bucket}]}; + ({remove, Bucket, Key, _SubKey, _Value}, #{model := Model} = Acc) -> + Acc#{model => orddict:erase({Bucket, Key}, Model)} + end, S, ObjSpecs), + S). + +mput_post(S, [_, _], Res) -> + ?CMD_VALID(S, mput, lists:member(Res, [ok, pause]), eq(Res, {unsupported_message, mput})). + +mput_features(S, [_Pid, ObjSpecs], _Res) -> + ?CMD_VALID(S, mput, + {mput, [ element(1, ObjSpec) || ObjSpec <- ObjSpecs ]}, + [{mput, unsupported}]). + +%% --- Operation: head --- +head_pre(S) -> + is_leveled_open(S). + +head_args(#{leveled := Pid, previous_keys := PK, tag := Tag}) -> + ?LET({Key, Bucket}, gen_key_in_bucket(PK), + [Pid, Bucket, Key, Tag]). + +head_pre(#{leveled := Leveled}, [Pid, _Bucket, _Key, _Tag]) -> + Pid == Leveled. + +head_adapt(#{leveled := Leveled}, [_, Bucket, Key, Tag]) -> + [Leveled, Bucket, Key, Tag]. + +head(Pid, Bucket, Key, none) -> + leveled_bookie:book_head(Pid, Bucket, Key); +head(Pid, Bucket, Key, Tag) -> + leveled_bookie:book_head(Pid, Bucket, Key, Tag). + +head_post(#{model := Model} = S, [_Pid, Bucket, Key, Tag], Res) -> + ?CMD_VALID(S, head, + case Res of + {ok, _MetaData} -> + orddict:find({Bucket, Key}, Model) =/= error; + not_found -> + %% Weird to be able to supply a tag, but must be STD_TAG... + implies(lists:member(maps:get(start_opts, S), [{head_only, with_lookup}]), + lists:member(Tag, [?STD_TAG, none, ?HEAD_TAG])) orelse + orddict:find({Bucket, Key}, Model) == error; + {unsupported_message, head} -> + Tag =/= ?HEAD_TAG + end, + eq(Res, {unsupported_message, head})). + +head_features(#{deleted_keys := DK, previous_keys := PK}, [_Pid, Bucket, Key, _Tag], Res) -> + case Res of + not_found -> + [{head, not_found, deleted} || lists:member({Key, Bucket}, DK)] ++ + [{head, not_found, not_inserted} || not lists:member({Key, Bucket}, PK)]; + {ok, {_, _, _}} -> %% Metadata + [{head, found}]; + {ok, Bin} when is_binary(Bin) -> + [{head, found_riak_object}]; + {unsupported_message, _} -> + [{head, unsupported}] + end. + + +%% --- Operation: delete --- +delete_pre(S) -> + is_leveled_open(S). + +delete_args(#{leveled := Pid, previous_keys := PK, tag := Tag}) -> + ?LET({Key, Bucket}, gen_key_in_bucket(PK), + [Pid, Bucket, Key, [], Tag]). + +delete_pre(#{leveled := Leveled, model := Model}, [Pid, Bucket, Key, Spec, _Tag]) -> + Pid == Leveled andalso + case orddict:find({Bucket, Key}, Model) of + error -> true; + {ok, {_, OldSpec}} -> + Spec == OldSpec + end. + +delete_adapt(#{leveled := Leveled, model := Model}, [_, Bucket, Key, Spec, Tag]) -> + NewSpec = + case orddict:find({Bucket, Key}, Model) of + error -> Spec; + {ok, {_, OldSpec}} -> + Spec == OldSpec + end, + [ Leveled, Bucket, Key, NewSpec, Tag ]. + +delete(Pid, Bucket, Key, Spec, ?STD_TAG) -> + leveled_bookie:book_delete(Pid, Bucket, Key, Spec); +delete(Pid, Bucket, Key, Spec, Tag) -> + leveled_bookie:book_put(Pid, Bucket, Key, delete, Spec, Tag). + +delete_next(#{model := Model, deleted_keys := DK} = S, _Value, [_Pid, Bucket, Key, _, _]) -> + ?CMD_VALID(S, delete, + S#{model => orddict:erase({Bucket, Key}, Model), + deleted_keys => DK ++ [{Key, Bucket} || orddict:is_key({Key, Bucket}, Model)]}, + S). + +delete_post(S, [_Pid, _Bucket, _Key, _, _], Res) -> + ?CMD_VALID(S, delete, + lists:member(Res, [ok, pause]), + case Res of + {unsupported_message, _} -> true; + _ -> Res + end). + +delete_features(#{previous_keys := PK} = S, [_Pid, Bucket, Key, _, _], _Res) -> + ?CMD_VALID(S, delete, + case lists:member({Key, Bucket}, PK) of + true -> + [{delete, existing}]; + false -> + [{delete, none_existing}] + end, + [{delete, unsupported}]). + +%% --- Operation: is_empty --- +is_empty_pre(S) -> + is_leveled_open(S). + +is_empty_args(#{leveled := Pid, tag := Tag}) -> + [Pid, Tag]. + + +is_empty_pre(#{leveled := Leveled}, [Pid, _]) -> + Pid == Leveled. + +is_empty_adapt(#{leveled := Leveled}, [_, Tag]) -> + [Leveled, Tag]. + +%% @doc is_empty - The actual operation +is_empty(Pid, Tag) -> + leveled_bookie:book_isempty(Pid, Tag). + +is_empty_post(#{model := Model}, [_Pid, _Tag], Res) -> + Size = orddict:size(Model), + case Res of + true -> eq(0, Size); + false when Size == 0 -> expected_empty; + false when Size > 0 -> true + end. + +is_empty_features(_S, [_Pid, _], Res) -> + [{empty, Res}]. + +%% --- Operation: drop --- +drop_pre(S) -> + is_leveled_open(S). + +drop_args(#{leveled := Pid, dir := Dir} = S) -> + ?LET([Tag, _, Name], init_backend_args(S), + [Pid, Tag, [{root_path, Dir} | gen_opts()], Name]). + +drop_pre(#{leveled := Leveled} = S, [Pid, Tag, Opts, Name]) -> + Pid == Leveled andalso init_backend_pre(S, [Tag, Opts, Name]). + +drop_adapt(#{leveled := Leveled} = S, [_Pid, Tag, Opts, Name]) -> + [Leveled | init_backend_adapt(S, [Tag, Opts, Name])]. + +%% @doc drop - The actual operation +%% Remove fles from disk (directory structure may remain) and start a new clean database +drop(Pid, Tag, Opts, Name) -> + Mon = erlang:monitor(process, Pid), + ok = leveled_bookie:book_destroy(Pid), + receive + {'DOWN', Mon, _Type, Pid, _Info} -> + init_backend(Tag, Opts, Name) + after 5000 -> + {still_alive, Pid, Name} + end. + +drop_next(S, Value, [Pid, Tag, Opts, Name]) -> + S1 = stop_next(S, Value, [Pid]), + init_backend_next(S1#{model => orddict:new()}, + Value, [Tag, Opts, Name]). + +drop_post(_S, [_Pid, _Tag, _Opts, _], Res) -> + case is_pid(Res) of + true -> true; + false -> Res + end. + +drop_features(#{model := Model}, [_Pid, _Tag, _Opts, _], _Res) -> + Size = orddict:size(Model), + [{drop, empty} || Size == 0 ] ++ + [{drop, small} || Size > 0 andalso Size < 20 ] ++ + [{drop, medium} || Size >= 20 andalso Size < 1000 ] ++ + [{drop, large} || Size >= 1000 ]. + + + +%% --- Operation: kill --- +%% Test that killing the root Pid of leveled has the same effect as closing it nicely +%% that means, we don't loose data! Not even when parallel successful puts are going on. +kill_pre(S) -> + is_leveled_open(S). + +kill_args(#{leveled := Pid}) -> + [Pid]. + +kill_pre(#{leveled := Leveled}, [Pid]) -> + Pid == Leveled. + +kill_adapt(#{leveled := Leveled}, [_]) -> + [ Leveled ]. + +kill(Pid) -> + exit(Pid, kill), + timer:sleep(1). + +kill_next(S, Value, [Pid]) -> + stop_next(S, Value, [Pid]). + + + + +%% --- Operation: compacthappened --- + +compacthappened(DataDir) -> + PostCompact = filename:join(DataDir, "journal/journal_files/post_compact"), + case filelib:is_dir(PostCompact) of + true -> + {ok, Files} = file:list_dir(PostCompact), + Files; + false -> + [] + end. + +ledgerpersisted(DataDir) -> + LedgerPath = filename:join(DataDir, "ledger/ledger_files"), + case filelib:is_dir(LedgerPath) of + true -> + {ok, Files} = file:list_dir(LedgerPath), + Files; + false -> + [] + end. + +journalwritten(DataDir) -> + JournalPath = filename:join(DataDir, "journal/journal_files"), + case filelib:is_dir(JournalPath) of + true -> + {ok, Files} = file:list_dir(JournalPath), + Files; + false -> + [] + end. + + +%% --- Operation: compact journal --- + +compact_pre(S) -> + is_leveled_open(S). + +compact_args(#{leveled := Pid}) -> + [Pid, nat()]. + +compact_pre(#{leveled := Leveled}, [Pid, _TS]) -> + Pid == Leveled. + +compact_adapt(#{leveled := Leveled}, [_Pid, TS]) -> + [ Leveled, TS ]. + +compact(Pid, TS) -> + leveled_bookie:book_compactjournal(Pid, TS). + +compact_next(S, R, [_Pid, _TS]) -> + case {R, maps:get(previous_compact, S, undefined)} of + {ok, undefined} -> + S#{previous_compact => true}; + _ -> + S + end. + +compact_post(S, [_Pid, _TS], Res) -> + case Res of + ok -> + true; + busy -> + true == maps:get(previous_compact, S, undefined) + end. + +compact_features(S, [_Pid, _TS], _Res) -> + case maps:get(previous_compact, S, undefined) of + undefined -> + [{compact, fresh}]; + _ -> + [{compact, repeat}] + end. + + +%% Testing fold: +%% Note async and sync mode! +%% see https://github.com/martinsumner/riak_kv/blob/mas-2.2.5-tictactaae/src/riak_kv_leveled_backend.erl#L238-L419 + +%% --- Operation: index folding --- +indexfold_pre(S) -> + is_leveled_open(S). + +indexfold_args(#{leveled := Pid, counter := Counter, previous_keys := PK}) -> + ?LET({Key, Bucket}, gen_key_in_bucket(PK), + [Pid, default(Bucket, {Bucket, Key}), gen_foldacc(3), + ?LET({[N], M}, {gen_index_value(), choose(0,2)}, {gen_category(), [N], [N+M]}), + {bool(), + oneof([undefined, gen_index_value()])}, + Counter %% add a unique counter + ]). + +indexfold_pre(#{leveled := Leveled}, [Pid, _Constraint, _FoldAccT, _Range, _TermHandling, _Counter]) -> + %% Make sure we operate on an existing Pid when shrinking + %% Check start options validity as well? + Pid == Leveled. + +indexfold_adapt(#{leveled := Leveled}, [_, Constraint, FoldAccT, Range, TermHandling, Counter]) -> + %% Keep the counter! + [Leveled, Constraint, FoldAccT, Range, TermHandling, Counter]. + +indexfold(Pid, Constraint, FoldAccT, Range, {_, undefined} = TermHandling, _Counter) -> + {async, Folder} = leveled_bookie:book_indexfold(Pid, Constraint, FoldAccT, Range, TermHandling), + Folder; +indexfold(Pid, Constraint, FoldAccT, Range, {ReturnTerms, RegExp}, _Counter) -> + {ok, RE} = re:compile(RegExp), + {async, Folder} = leveled_bookie:book_indexfold(Pid, Constraint, FoldAccT, Range, {ReturnTerms, RE}), + Folder. + +indexfold_next(#{folders := Folders} = S, SymFolder, + [_, Constraint, {Fun, Acc}, {Category, From, To}, {ReturnTerms, RegExp}, Counter]) -> + ConstraintFun = + fun(B, K, Bool) -> + case Constraint of + {B, KStart} -> not Bool orelse K >= KStart; + B -> true; + _ -> false + end + end, + S#{folders => + Folders ++ + [#{counter => Counter, + type => indexfold, + folder => SymFolder, + reusable => true, + result => fun(Model) -> + Select = + lists:sort( + orddict:fold(fun({B, K}, {_V, Spec}, A) -> + [ {B, {Idx, K}} + || {Cat, Idx} <- Spec, + Idx >= From, Idx =< To, + Cat == Category, + ConstraintFun(B, K, Idx == From), + RegExp == undefined orelse string:find(Idx, RegExp) =/= nomatch + ] ++ A + end, [], Model)), + lists:foldl(fun({B, NK}, A) when ReturnTerms -> + Fun(B, NK, A); + ({B, {_, NK}}, A) -> + Fun(B, NK, A) + end, Acc, Select) + end + }], + counter => Counter + 1}. + +indexfold_post(_S, _, Res) -> + is_function(Res). + +indexfold_features(_S, [_Pid, Constraint, FoldAccT, _Range, {ReturnTerms, _}, _Counter], _Res) -> + [{foldAccT, FoldAccT}] ++ %% This will be extracted for printing later + [{index_fold, bucket} || not is_tuple(Constraint) ] ++ + [{index_fold, bucket_and_primary_key} || is_tuple(Constraint)] ++ + [{index_fold, return_terms, ReturnTerms} ]. + + + + +%% --- Operation: keylist folding --- +%% slack discussion: "`book_keylist` only passes `Bucket` and `Key` into the accumulator, ignoring SubKey - +%% so I don't think this can be used in head_only mode to return results that make sense" +%% +%% There are also keylist functions that take a specific bucket and range into account. Not considered yet. +keylistfold_pre(S) -> + is_leveled_open(S). + +keylistfold_args(#{leveled := Pid, counter := Counter, tag := Tag}) -> + [Pid, Tag, gen_foldacc(3), + Counter %% add a unique counter + ]. + +keylistfold_pre(#{leveled := Leveled}, [Pid, _Tag, _FoldAccT, _Counter]) -> + %% Make sure we operate on an existing Pid when shrinking + %% Check start options validity as well? + Pid == Leveled. + +keylistfold_adapt(#{leveled := Leveled}, [_, Tag, FoldAccT, Counter]) -> + %% Keep the counter! + [Leveled, Tag, FoldAccT, Counter]. + +keylistfold(Pid, Tag, FoldAccT, _Counter) -> + {async, Folder} = leveled_bookie:book_keylist(Pid, Tag, FoldAccT), + Folder. + +keylistfold_next(#{folders := Folders, model := Model} = S, SymFolder, + [_, _Tag, {Fun, Acc}, Counter]) -> + S#{folders => + Folders ++ + [#{counter => Counter, + type => keylist, + folder => SymFolder, + reusable => false, + result => fun(_) -> orddict:fold(fun({B, K}, _V, A) -> Fun(B, K, A) end, Acc, Model) end + }], + counter => Counter + 1}. + +keylistfold_post(_S, _, Res) -> + is_function(Res). + +keylistfold_features(_S, [_Pid, _Tag, FoldAccT, _Counter], _Res) -> + [{foldAccT, FoldAccT}]. %% This will be extracted for printing later + + +%% --- Operation: bucketlistfold --- +bucketlistfold_pre(S) -> + is_leveled_open(S). + +bucketlistfold_args(#{leveled := Pid, counter := Counter, tag := Tag}) -> + [Pid, Tag, gen_foldacc(2), elements([first, all]), Counter]. + +bucketlistfold_pre(#{leveled := Leveled}, [Pid, _Tag, _FoldAccT, _Constraints, _]) -> + Pid == Leveled. + +bucketlistfold_adapt(#{leveled := Leveled}, [_Pid, Tag, FoldAccT, Constraints, Counter]) -> + [Leveled, Tag, FoldAccT, Constraints, Counter]. + +bucketlistfold(Pid, Tag, FoldAccT, Constraints, _) -> + {async, Folder} = leveled_bookie:book_bucketlist(Pid, Tag, FoldAccT, Constraints), + Folder. + +bucketlistfold_next(#{folders := Folders} = S, SymFolder, + [_, _, {Fun, Acc}, Constraints, Counter]) -> + S#{folders => + Folders ++ + [#{counter => Counter, + type => bucketlist, + folder => SymFolder, + reusable => true, + result => fun(Model) -> + Bs = orddict:fold(fun({B, _K}, _V, A) -> A ++ [B || not lists:member(B, A)] end, [], Model), + case {Constraints, Bs} of + {all, _} -> + lists:foldl(fun(B, A) -> Fun(B, A) end, Acc, Bs); + {first, []} -> + Acc; + {first, [First|_]} -> + lists:foldl(fun(B, A) -> Fun(B, A) end, Acc, [First]) + end + end + }], + counter => Counter + 1}. + +bucketlistfold_post(_S, [_Pid, _Tag, _FoldAccT, _Constraints, _], Res) -> + is_function(Res). + +bucketlistfold_features(_S, [_Pid, _Tag, FoldAccT, _Constraints, _], _Res) -> + [ {foldAccT, FoldAccT} ]. + +%% --- Operation: objectfold --- +objectfold_pre(S) -> + is_leveled_open(S). + +objectfold_args(#{leveled := Pid, counter := Counter, tag := Tag}) -> + [Pid, Tag, gen_foldacc(4), bool(), Counter]. + +objectfold_pre(#{leveled := Leveled}, [Pid, _Tag, _FoldAccT, _Snapshot, _Counter]) -> + Leveled == Pid. + +objectfold_adapt(#{leveled := Leveled}, [_Pid, Tag, FoldAccT, Snapshot, Counter]) -> + [Leveled, Tag, FoldAccT, Snapshot, Counter]. + +objectfold(Pid, Tag, FoldAccT, Snapshot, _Counter) -> + {async, Folder} = leveled_bookie:book_objectfold(Pid, Tag, FoldAccT, Snapshot), + Folder. + +objectfold_next(#{folders := Folders, model := Model} = S, SymFolder, + [_Pid, _Tag, {Fun, Acc}, Snapshot, Counter]) -> + S#{folders => + Folders ++ + [#{counter => Counter, + type => objectfold, + folder => SymFolder, + reusable => not Snapshot, + result => fun(M) -> + OnModel = + case Snapshot of + true -> Model; + false -> M + end, + Objs = orddict:fold(fun({B, K}, {V, _}, A) -> [{B, K, V} | A] end, [], OnModel), + lists:foldr(fun({B, K, V}, A) -> Fun(B, K, V, A) end, Acc, Objs) + end + }], + counter => Counter + 1}. + +objectfold_post(_S, [_Pid, _Tag, _FoldAccT, _Snapshot, _Counter], Res) -> + is_function(Res). + +objectfold_features(_S, [_Pid, _Tag, FoldAccT, _Snapshot, _Counter], _Res) -> + [{foldAccT, FoldAccT}]. %% This will be extracted for printing later + + + + +%% --- Operation: fold_run --- +fold_run_pre(S) -> + maps:get(folders, S, []) =/= []. + +fold_run_args(#{folders := Folders}) -> + ?LET(#{counter := Counter, folder := Folder}, elements(Folders), + [Counter, Folder]). + +fold_run_pre(#{folders := Folders}, [Counter, _Folder]) -> + %% Ensure membership even under shrinking + %% Counter is fixed at first generation and does not shrink! + get_foldobj(Folders, Counter) =/= undefined. + +fold_run(_, Folder) -> + catch Folder(). + +fold_run_next(#{folders := Folders} = S, _Value, [Counter, _Folder]) -> + %% leveled_runner comment: "Iterators should de-register themselves from the Penciller on completion." + FoldObj = get_foldobj(Folders, Counter), + case FoldObj of + #{reusable := false} -> + UsedFolders = maps:get(used_folders, S, []), + S#{folders => Folders -- [FoldObj], + used_folders => UsedFolders ++ [FoldObj]}; + _ -> + S + end. + +fold_run_post(#{folders := Folders, leveled := Leveled, model := Model}, [Count, _], Res) -> + case Leveled of + undefined -> + is_exit(Res); + _ -> + #{result := ResFun} = get_foldobj(Folders, Count), + eq(Res, ResFun(Model)) + end. + +fold_run_features(#{folders := Folders, leveled := Leveled}, [Count, _Folder], Res) -> + #{type := Type} = get_foldobj(Folders, Count), + [ {fold_run, Type} || Leveled =/= undefined ] ++ + [ fold_run_on_stopped_leveled || Leveled == undefined ] ++ + [ {fold_run, found_list, length(Res)}|| is_list(Res) ] ++ + [ {fold_run, found_integer}|| is_integer(Res) ]. + + +%% --- Operation: fold_run on already used folder --- +%% A fold that has already ran to completion should results in an exception when re-used. +%% leveled_runner comment: "Iterators should de-register themselves from the Penciller on completion." +noreuse_fold_pre(S) -> + maps:get(used_folders, S, []) =/= []. + +noreuse_fold_args(#{used_folders := Folders}) -> + ?LET(#{counter := Counter, folder := Folder}, elements(Folders), + [Counter, Folder]). + +noreuse_fold_pre(S, [Counter, _Folder]) -> + %% Ensure membership even under shrinking + %% Counter is fixed at first generation and does not shrink! + lists:member(Counter, + [ maps:get(counter, Used) || Used <- maps:get(used_folders, S, []) ]). + +noreuse_fold(_, Folder) -> + catch Folder(). + +noreuse_fold_post(_S, [_, _], Res) -> + is_exit(Res). + +noreuse_fold_features(_, [_, _], _) -> + [ reuse_fold ]. + + +%% --- Operation: fold_run on folder that survived a crash --- +%% A fold that has already ran to completion should results in an exception when re-used. +stop_fold_pre(S) -> + maps:get(stop_folders, S, []) =/= []. + +stop_fold_args(#{stop_folders := Folders}) -> + ?LET(#{counter := Counter, folder := Folder}, elements(Folders), + [Counter, Folder]). + +stop_fold_pre(S, [Counter, _Folder]) -> + %% Ensure membership even under shrinking + %% Counter is fixed at first generation and does not shrink! + lists:member(Counter, + [ maps:get(counter, Used) || Used <- maps:get(stop_folders, S, []) ]). + +stop_fold(_, Folder) -> + catch Folder(). + +stop_fold_post(_S, [_Counter, _], Res) -> + is_exit(Res). + +stop_fold_features(S, [_, _], _) -> + [ case maps:get(leveled, S) of + undefined -> + stop_fold_when_closed; + _ -> + stop_fold_when_open + end ]. + + +weight(#{previous_keys := []}, get) -> + 1; +weight(#{previous_keys := []}, delete) -> + 1; +weight(S, C) when C == get; + C == put; + C == delete; + C == updateload -> + ?CMD_VALID(S, put, 10, 1); +weight(_S, stop) -> + 1; +weight(_, _) -> + 1. + + +is_valid_cmd(S, put) -> + not in_head_only_mode(S); +is_valid_cmd(S, delete) -> + is_valid_cmd(S, put); +is_valid_cmd(S, get) -> + not in_head_only_mode(S); +is_valid_cmd(S, head) -> + not lists:member({head_only, no_lookup}, maps:get(start_opts, S, [])); +is_valid_cmd(S, mput) -> + in_head_only_mode(S). + + + +%% @doc check that the implementation of leveled is equivalent to a +%% sorted dict at least +-spec prop_db() -> eqc:property(). +prop_db() -> + Dir = "./leveled_data", + eqc:dont_print_counterexample( + ?LET(Shrinking, parameter(shrinking, false), + ?FORALL({Kind, Cmds}, oneof([{seq, more_commands(20, commands(?MODULE))}, + {par, more_commands(2, parallel_commands(?MODULE))} + ]), + begin + delete_level_data(Dir), + ?IMPLIES(empty_dir(Dir), + ?ALWAYS(if Shrinking -> 10; true -> 1 end, + begin + Procs = erlang:processes(), + StartTime = erlang:system_time(millisecond), + + RunResult = execute(Kind, Cmds, [{dir, Dir}]), + %% Do not extract the 'state' from this tuple, since parallel commands + %% miss the notion of final state. + CallFeatures = [ Feature || Feature <- call_features(history(RunResult)), + not is_foldaccT(Feature), + not (is_tuple(Feature) andalso element(1, Feature) == start_options) + ], + StartOptionFeatures = [ lists:keydelete(root_path, 1, Feature) || {start_options, Feature} <- call_features(history(RunResult)) ], + + timer:sleep(1000), + CompactionFiles = compacthappened(Dir), + LedgerFiles = ledgerpersisted(Dir), + JournalFiles = journalwritten(Dir), + % io:format("File counts: Compacted ~w Journal ~w Ledger ~w~n", [length(CompactionFiles), length(LedgerFiles), length(JournalFiles)]), + + + + case whereis(maps:get(sut, initial_state())) of + undefined -> + % io:format("Init state undefined - deleting~n"), + delete_level_data(Dir); + Pid when is_pid(Pid) -> + % io:format("Init state defined - destroying~n"), + leveled_bookie:book_destroy(Pid) + end, + + Wait = wait_for_procs(Procs, 12000), + % Wait at least for delete_pending timeout + 1s + % However, even then can hit the issue of the Quviq license + % call spawning processes + lists:foreach(fun(P) -> + io:format("~nProcess info for ~w:~n~w~n", + [P, process_info(P)]), + io:format("Stacktrace:~n ~w~n", + [process_info(P, current_stacktrace)]), + io:format("Monitored by:~n ~w~n", + [process_info(P, monitored_by)]), + io:format("~n") + end, + Wait), + RunTime = erlang:system_time(millisecond) - StartTime, + + %% Since in parallel commands we don't have access to the state, we retrieve functions + %% from the features + FoldAccTs = [ FoldAccT || Entry <- history(RunResult), + {foldAccT, FoldAccT} <- eqc_statem:history_features(Entry)], + + pretty_commands(?MODULE, Cmds, RunResult, + measure(time_per_test, RunTime, + aggregate(command_names(Cmds), + collect(Kind, + measure(compaction_files, length(CompactionFiles), + measure(ledger_files, length(LedgerFiles), + measure(journal_files, length(JournalFiles), + aggregate(with_title('Features'), CallFeatures, + aggregate(with_title('Start Options'), StartOptionFeatures, + features(CallFeatures, + conjunction([{result, + ?WHENFAIL([ begin + eqc:format("~p with acc ~p:\n~s\n", [F, Acc, + show_function(F)]) + end || {F, Acc} <- FoldAccTs ], + result(RunResult) == ok)}, + {data_cleanup, + ?WHENFAIL(eqc:format("~s\n", [os:cmd("ls -Rl " ++ Dir)]), + empty_dir(Dir))}, + {pid_cleanup, equals(Wait, [])}]))))))))))) + end)) + end))). + +history({H, _, _}) -> H. +result({_, _, Res}) -> Res. + +execute(seq, Cmds, Env) -> + run_commands(Cmds, Env); +execute(par, Cmds, Env) -> + run_parallel_commands(Cmds, Env). + +is_exit({'EXIT', _}) -> + true; +is_exit(Other) -> + {expected_exit, Other}. + +is_foldaccT({foldAccT, _}) -> + true; +is_foldaccT(_) -> + false. + +show_function(F) -> + case proplists:get_value(module, erlang:fun_info(F)) of + eqc_fun -> + eqc_fun:show_function(F); + _ -> + proplists:get_value(name, erlang:fun_info(F)) + end. + + +%% slack discussion: +%% `max_journalsize` should be at least 2048 + byte_size(smallest_object) + byte_size(smallest_object's key) + overhead (which is a few bytes per K/V pair). +gen_opts() -> + options([%% {head_only, elements([false, no_lookup, with_lookup])} we don't test head_only mode + {compression_method, elements([native, lz4])} + , {compression_point, elements([on_compact, on_receipt])} + %% , {max_journalsize, ?LET(N, nat(), 2048 + 1000 + 32 + 16 + 16 + N)} + %% , {cache_size, oneof([nat(), 2048, 2060, 5000])} + ]). + +options(GenList) -> + ?LET(Bools, vector(length(GenList), bool()), + [ Opt || {Opt, true} <- lists:zip(GenList, Bools)]). + + +gen_key() -> + binary(16). + +%% Cannot be atoms! +%% key() type specified: should be binary(). +gen_bucket() -> + elements([<<"bucket1">>, <<"bucket2">>, <<"bucket3">>]). + +gen_val() -> + noshrink(binary(256)). + +gen_categories(?RIAK_TAG) -> + sublist(categories()); +gen_categories(_) -> + %% for ?STD_TAG this seems to make little sense + []. + + +categories() -> + [dep, lib]. + +gen_category() -> + elements(categories()). + +gen_index_value() -> + %% Carefully selected to have one out-layer and several border cases. + [elements("arts")]. + +gen_key_in_bucket([]) -> + {gen_key(), gen_bucket()}; +gen_key_in_bucket(Previous) -> + ?LET({K, B}, elements(Previous), + frequency([{1, gen_key_in_bucket([])}, + {1, {K, gen_bucket()}}, + {2, {K, B}}])). + +gen_foldacc(2) -> + ?SHRINK(oneof([{eqc_fun:function2(int()), int()}, + {eqc_fun:function2(list(int())), list(int())}]), + [fold_buckets()]); +gen_foldacc(3) -> + ?SHRINK(oneof([{eqc_fun:function3(int()), int()}, + {eqc_fun:function3(list(int())), list(int())}]), + [fold_collect()]); +gen_foldacc(4) -> + ?SHRINK(oneof([{eqc_fun:function4(int()), int()}, + {eqc_fun:function4(list(int())), list(int())}]), + [fold_objects()]). + + + +fold_buckets() -> + {fun(B, Acc) -> [B | Acc] end, []}. + +fold_collect() -> + {fun(X, Y, Acc) -> [{X, Y} | Acc] end, []}. + +fold_objects() -> + {fun(X, Y, Z, Acc) -> [{X, Y, Z} | Acc] end, []}. + +%% This makes system fall over +fold_collect_no_acc() -> + fun(X, Y, Z) -> [{X, Y} | Z] end. + +fold_count() -> + {fun(_X, _Y, Z) -> Z + 1 end, 0}. + +fold_keys() -> + {fun(X, _Y, Z) -> [X | Z] end, []}. + + +empty_dir(Dir) -> + case file:list_dir(Dir) of + {error, enoent} -> true; + {ok, Ds} -> + lists:all(fun(D) -> empty_dir(filename:join(Dir, D)) end, Ds); + _ -> + false + end. + +get_foldobj([], _Counter) -> + undefined; +get_foldobj([#{counter := Counter} = Map | _Rest], Counter) -> + Map; +get_foldobj([_ | Rest], Counter) -> + get_foldobj(Rest, Counter). + + +%% Helper for all those preconditions that just check that leveled Pid +%% is populated in state. (We cannot check with is_pid, since that's +%% symbolic in test case generation!). +is_leveled_open(S) -> + maps:get(leveled, S, undefined) =/= undefined. + +in_head_only_mode(S) -> + proplists:get_value(head_only, maps:get(start_opts, S, []), false) =/= false. + +wait_for_procs(Known, Timeout) -> + case filtered_processes(Known) of + [] -> []; + _NonEmptyList -> + if + Timeout > 0 -> + timer:sleep(200), + wait_for_procs(Known, Timeout - 200); + true -> + filtered_processes(Known) + end + end. + +filtered_processes(Known) -> + FilterFun = + fun(P) -> + case process_info(P, current_stacktrace) of + {current_stacktrace, ST} -> + case lists:keymember(eqc_licence, 1, ST) or + lists:keymember(eqc_group_commands, 1, ST) of + true -> + % This is an eqc background process + false; + false -> + case process_info(P, dictionary) of + {dictionary, PD} -> + HTTPCall = {'$initial_call',{httpc_handler,init,1}}, + DNSCall = {'$initial_call',{supervisor_bridge,inet_gethost_native,1}}, + case lists:member(HTTPCall, PD) + or lists:member(DNSCall, PD) of + true -> + % This is the HTTP/DNS call for the licence + false; + _ -> + case process_info(P, current_function) of + {current_function,{inet_gethost_native,main_loop,1}} -> + % This is par tof the HTTP/DNS call + false; + _ -> + true + end + end; + undefined -> + %Eh ? + true + end + end; + undefined -> + % Eh? + true + end + end, + lists:filter(FilterFun, erlang:processes() -- Known). + +delete_level_data(Dir) -> + os:cmd("rm -rf " ++ Dir). + +%% Slack discussion: +%% `[{add, B1, K1, SK1}, {add, B1, K1, SK2}]` should be fine (same bucket and key, different subkey) +no_key_dups([]) -> + []; +no_key_dups([{_Action, Bucket, Key, SubKey, _Value} = E | Es]) -> + [E | no_key_dups([ {A, B, K, SK, V} || {A, B, K, SK, V} <- Es, + {B, K, SK} =/= {Bucket, Key, SubKey}])]. + +-endif. \ No newline at end of file