From 92c044eadb8c1605f7e11fe9bd31aec45a41487a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Nov 2022 13:35:34 +0000 Subject: [PATCH 01/72] Bump loader-utils in /javascript/examples/create-react-app Bumps [loader-utils](https://github.com/webpack/loader-utils) from 2.0.2 to 2.0.4. - [Release notes](https://github.com/webpack/loader-utils/releases) - [Changelog](https://github.com/webpack/loader-utils/blob/v2.0.4/CHANGELOG.md) - [Commits](https://github.com/webpack/loader-utils/compare/v2.0.2...v2.0.4) --- updated-dependencies: - dependency-name: loader-utils dependency-type: indirect ... Signed-off-by: dependabot[bot] --- .../examples/create-react-app/yarn.lock | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/javascript/examples/create-react-app/yarn.lock b/javascript/examples/create-react-app/yarn.lock index 90a1592b..d6e5d93f 100644 --- a/javascript/examples/create-react-app/yarn.lock +++ b/javascript/examples/create-react-app/yarn.lock @@ -24,17 +24,17 @@ jsonpointer "^5.0.0" leven "^3.1.0" -"@automerge/automerge-wasm@0.1.9": - version "0.1.9" - resolved "http://localhost:4873/@automerge%2fautomerge-wasm/-/automerge-wasm-0.1.9.tgz#b2def5e8b643f1802bc696843b7755dc444dc2eb" - integrity sha512-S+sjJUJ3aPn2F37vKYAzKxz8CDgbHpOOGVjKSgkLjkAqe1pQ+wp4BpiELXafX73w8DVIrGx1zzru4w3t+Eo8gw== +"@automerge/automerge-wasm@0.1.12": + version "0.1.12" + resolved "https://registry.yarnpkg.com/@automerge/automerge-wasm/-/automerge-wasm-0.1.12.tgz#8ce25255d95d4ed6fb387de6858f7b7b7e2ed4a9" + integrity sha512-/xjX1217QYJ+QaoT6iHQw4hGNUIoc3xc65c9eCnfX5v9J9BkTOl05p2Cnr51O2rPc/M6TqZLmlvpvNVdcH9JpA== -"@automerge/automerge@2.0.0-alpha.4": - version "2.0.0-alpha.4" - resolved "http://localhost:4873/@automerge%2fautomerge/-/automerge-2.0.0-alpha.4.tgz#df406f5364960a4d21040044da55ebd47406ea3a" - integrity sha512-PVRD1dmLy0U4GttyMvlWr99wyr6xvskJbOkxJDHnp+W2VAFfcqa4QKouaFbJ4W3iIsYX8DfQJ+uhRxa6UnvkHg== +"@automerge/automerge@2.0.0-alpha.7": + version "2.0.0-alpha.7" + resolved "https://registry.yarnpkg.com/@automerge/automerge/-/automerge-2.0.0-alpha.7.tgz#2ee220d51bcd796074a18af74eeabb5f177e1f36" + integrity sha512-Wd2/GNeqtBybUtXclEE7bWBmmEkhv3q2ITQmLh18V0VvMPbqMBpcOKYzQFnKCyiPyRe5XcYeQAyGyunhE5V0ug== dependencies: - "@automerge/automerge-wasm" "0.1.9" + "@automerge/automerge-wasm" "0.1.12" uuid "^8.3" "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.18.6", "@babel/code-frame@^7.8.3": @@ -2827,7 +2827,7 @@ bfj@^7.0.2: big.js@^5.2.2: version "5.2.2" - resolved "http://localhost:4873/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== binary-extensions@^2.0.0: @@ -3817,7 +3817,7 @@ emoji-regex@^9.2.2: emojis-list@^3.0.0: version "3.0.0" - resolved "http://localhost:4873/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== encodeurl@~1.0.2: @@ -5942,9 +5942,9 @@ loader-runner@^4.2.0: integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg== loader-utils@^2.0.0: - version "2.0.2" - resolved "http://localhost:4873/loader-utils/-/loader-utils-2.0.2.tgz#d6e3b4fb81870721ae4e0868ab11dd638368c129" - integrity sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A== + version "2.0.4" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.4.tgz#8b5cb38b5c34a9a018ee1fc0e6a066d1dfcc528c" + integrity sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw== dependencies: big.js "^5.2.2" emojis-list "^3.0.0" From b7415d18eb23dc1a398c4ff93e93dd20be608dd1 Mon Sep 17 00:00:00 2001 From: Orion Henry Date: Fri, 9 Dec 2022 17:44:07 -0600 Subject: [PATCH 02/72] adding a badmessage performance case --- rust/Cargo.toml | 1 + rust/badmessage/.gitignore | 6 ++++++ rust/badmessage/Cargo.toml | 18 ++++++++++++++++++ rust/badmessage/automerge-js.js | 26 ++++++++++++++++++++++++++ rust/badmessage/automerge-wasm.js | 26 ++++++++++++++++++++++++++ rust/badmessage/badmessage | Bin 0 -> 577800 bytes rust/badmessage/package.json | 13 +++++++++++++ rust/badmessage/src/main.rs | 25 +++++++++++++++++++++++++ 8 files changed, 115 insertions(+) create mode 100644 rust/badmessage/.gitignore create mode 100644 rust/badmessage/Cargo.toml create mode 100644 rust/badmessage/automerge-js.js create mode 100644 rust/badmessage/automerge-wasm.js create mode 100755 rust/badmessage/badmessage create mode 100644 rust/badmessage/package.json create mode 100644 rust/badmessage/src/main.rs diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 938100cf..9989b96c 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -6,6 +6,7 @@ members = [ "automerge-test", "automerge-wasm", "edit-trace", + "badmessage", ] resolver = "2" diff --git a/rust/badmessage/.gitignore b/rust/badmessage/.gitignore new file mode 100644 index 00000000..55778aca --- /dev/null +++ b/rust/badmessage/.gitignore @@ -0,0 +1,6 @@ +/target +Cargo.lock +node_modules +yarn.lock +flamegraph.svg +/prof diff --git a/rust/badmessage/Cargo.toml b/rust/badmessage/Cargo.toml new file mode 100644 index 00000000..e6b93447 --- /dev/null +++ b/rust/badmessage/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "badmessage" +version = "0.1.0" +edition = "2021" +license = "MIT" + +[dependencies] +automerge = { path = "../automerge" } +criterion = "0.3.5" +json = "0.12.4" +rand = "^0.8" + + +[[bin]] +name = "badmessage" +doc = false +bench = false + diff --git a/rust/badmessage/automerge-js.js b/rust/badmessage/automerge-js.js new file mode 100644 index 00000000..80d19051 --- /dev/null +++ b/rust/badmessage/automerge-js.js @@ -0,0 +1,26 @@ +// Apply the paper editing trace to an Automerge.Text object, one char at a time +const Automerge = require('../../javascript') + +const fs = require('fs'); + +const start = new Date() + +let contents = fs.readFileSync("badmessage"); +let doc = Automerge.init(); +let state = Automerge.initSyncState(); +[doc,state] = Automerge.receiveSyncMessage(doc, state, contents); + +console.log(`doc.receiveSyncMessage in ${new Date() - start} ms`) + +let t_time = new Date() +let saved = Automerge.save(doc); +console.log(`doc.save in ${new Date() - t_time} ms`) + +t_time = new Date() +Automerge.load(saved) +console.log(`doc.load in ${new Date() - t_time} ms`) + +t_time = new Date() +let doc2 = Automerge.init() +doc2 = Automerge.loadIncremental(doc2,saved) +console.log(`doc.loadIncremental in ${new Date() - t_time} ms`) diff --git a/rust/badmessage/automerge-wasm.js b/rust/badmessage/automerge-wasm.js new file mode 100644 index 00000000..33ac46bc --- /dev/null +++ b/rust/badmessage/automerge-wasm.js @@ -0,0 +1,26 @@ +const Automerge = require('../automerge-wasm') +const fs = require('fs'); + + +let contents = fs.readFileSync("badmessage"); +let doc = Automerge.create(); +let state = Automerge.initSyncState(); + +let t_time = new Date() + +doc.receiveSyncMessage(state,contents); + +console.log(`doc.receiveSyncMessage in ${new Date() - t_time} ms`) + +t_time = new Date() +let saved = doc.save() +console.log(`doc.save in ${new Date() - t_time} ms`) + +t_time = new Date() +Automerge.load(saved) +console.log(`doc.load in ${new Date() - t_time} ms`) + +t_time = new Date() +let doc2 = Automerge.create() +doc2.loadIncremental(saved) +console.log(`doc.loadIncremental in ${new Date() - t_time} ms`) diff --git a/rust/badmessage/badmessage b/rust/badmessage/badmessage new file mode 100755 index 0000000000000000000000000000000000000000..f2d5ff2952a638daa83b4a481a4f7cccbbe1d904 GIT binary patch literal 577800 zcma&ucRben|2TdxGb525k*vr_GLw;HWRtzOjATWK%#cl1NXn|n9w8$mD_N0j86lg< z_`O~i*ZZ8_ug~xG{+`?I+|IfFx$ck0bFa&FU|pN1r`p8mAx6Ex^tl{uH)gzu;3qU+ zeXXY%W^-A;t6=~bQQRxc1b7um&6&g}Q?D1B$$xMdB$C|;b#|!pbzU~_keB}z zF@9zgLr&#&My3gj_v?}+ALA<4fwQDz=kJ(Od69FTofzfs=GGzIl4FoKb?42}MCa6U zj4pdqev$T`v?wE{q@mMwE~{D~%8yz;*oou*laI6vq9;h$U%PIemp*~enWwB#p#>o;>YA*FDNhdhO53_vN&KD=J*d)dpO~^SQXbqe|>K#D)3u1 z+Un0xt-gYH4!}By{^lO@s>A zS1w?*@qAG86bQ5{yHPIYFCkmj)%%W3SV8$$f|=>Uxjk|j;c0FD-*)dRcF+Ie=Zhda zS&fs~99Y)*!Z$yuQ*nxMYx4j15?bJ1f^*=TK zQ(QIN_bEa#aNhHT=oYfF~^lLom@{*nj7BJ?S)DH>0t)mU7f;iotEtu3cD zePi3pjd}e%J}y~j(4Rv@WrT=eY0>fBK4+G z!tJ-&E!yLEN>KI2$~muj!oeg#{ZHqzEJan7y2~V0+`gteBfE(Z55b4{sW2r`M)e;x zkC=re-3Q;|r~nMEd(rWM895Zq-LLjj{Q^fU2egA4rA7M2ZtO-T=J)lzGf4UIZb?c@ zSwg|L=^e3k>@AizQp?3Ff(!+LR_DTTx_#((?Zt02K9wWCJ3eQmHRGO%|CEHV{%%o9 zkOuRDV0ek|(!2&nlLbpTT`@T;uE0jr0|T7wPxen6J?XLAcOJ;aTx-g$urk8W+|T%Y z9-BdSp-L*CBIoUJw}+1;`N5)pHcb*v={>Wn%n`ymqxQg2fNePu7u+D-4!PC8I7 zTwc)-uMjOKQ~eU_tdkk2GPkkK@-(08^)12AJz~bH)ckds*jd&M3@07Dv5h)bBLerT zujcn})w}jT(@^~_gwvH5B^Gh+SDVnAAy51)1E{w*@~W< zrpU z|1d7|DCoGa7x$YjI--VEd-{GvH2J_wnT}1Iw7Xd*%>GF`n1{P73zx9#yKU9mf6fI3 zKWS`zA5LkUn@*0(eJBsGsud@?8`_JuPofS z-t5Qj^cLs!R2Hs3r$^&>xXKv&s#>H)Prk0Qyr|`v38_;@|p^7&%` z8|vUob!XKhBLld_#))$5ASg$1w zjZ-+U=madJaPB9DX3x2q&>C!q2V;M-rgdmnFEfm59S&iLu;Q0lrxkE$AYzU1|J8pZ zcfj&;c^8vAwy{!A-(G0Col9Ay_HVm1nyOw63nMx_b*Bf0GB+Zat4KKeWG+OT#X8lnPgyImwQUxM90#&Qo>Qx`V)tYWI^S@pTeA(68<~OH?a9j zzuM|P4QvwV)DlSRUbq}WQtS3<@ijx!(oMnJ;(P<@%lBiV zlgtPzHSi|KLM+EHUGC*&wS_RO5_PQn*29bm_`>U6-ttJE3r2Y(m=236TL_8ALv3BPOjg>P=FJ^9p%OMAAa$)P}V?c#;} z{zEx(t1!+NSzE&+z1*kGd^yh5W@New7RfRW1$bk3$P3i@J1hsyEV=Q7_!~F+u;MWC zdVf}bdgh9!8aFp*e|UzsdHhIW*lD~XV=t+t$_Qfo8%zo_cLAz<(hsrtE;fA|AuFCG z;uoV*iSE(0>1RJ-botECFAHIZgZ4KQ_!g%oEE$cGPT;@LdL=g4Xz|BeNlq$_->QPy z;`VAnsr<oaNvA9<2jFO?fN4BuPw|6QN_VDp)G_FiS*hKkkW3n`-y z-e6SDbnoUpy$m?z)&;ZEc=fS(sg>_gC5(MP+nV#miA?%2)|>G>xkvZ>ovC}N1+ZOs z%H8Ozl6gPXPSkcT8W%CYh-c)8q^-l{w~IVwHc!#;bbNipc^sgi4KGr65%ueS?I%!% z`NwM^xJ9s=zTvR7OK9-^7k2_b2l1ByThdbwGel7*<;;TAJJR=}gIHd-4iY^}C^Th` z4^w(yU{lC^^NHLqmbp@`;0HcCA1xO==CA;2M}eBrkZu*uUt`R=TVvT+kABxJP5t;e zHS+2icdAHES(BBJOGpI6K!c0b4qw2t?Vz7akrkmB#O7a_)t>s zxyx<#GMSbpYQJ~ke|{!Nvg1?xM`eF6maQQ;PaxXd@;EztW;!ey_hX2a!$EnVeeKnx zh_23!vh5SaOUna~6SG#H4$#b}Dp9ko*Gjq8Ih;@aJ;!(_b*8cMYKZ2k$R{sD{9lJY zpSR7y;Oeav{Dfgyz^YX{-~RJWjWm87CilZkqK(q$Tn_6Of0OdB-pS+SpsC0`cgyxQ zMtx_RN-jmi%L-bZd$HQ7+$ll{$s+32>Zybhn?1(;7x`FCZ^pdiP0pyMjOEbJWzb*o zSsW#08~A-g^Z=UR2GH+XAq(OQoOd6`ihc+K9E|ZhV0nQy2^TO8ke<=kneQ%(IRO#gMe^ zCft3;6~9|`CZlXUZZwz9X($@cYk5ec_v{S(n6*FGpgT%8^wUt8r(*o-&I?+r@xh2Pa=*U!dN0s8QefW8KJO)IUXx>2|7EGxbZE4H z^4p2Y;&28Y3AzNw0TYraD{-gs-{gfkoL7jt}iJKNln8Jq72@d)p+#AvKsZ6PH5yuUx^Ui_o| zq~JE|Ed2*1`Th&)j`!|QuF}PXwZAhbOYr(J=u7Q|^=yxBp6PVpf~DA30?Q{k28#JA zTzMYC8M&2gL)DXxJGVa4hdl@v#<|s)JK_dBf9=L?_`UnlnIT@rto7~buAD;753$RB zld80~t&AFJjc4Rn=T6crZGJt$-}OB6lJ#2u?Sek$c59t_*H;}~acr+$dP1j*(#q%J zYd@TS1}GKP1e_fCI{dl1un(g`a{A{SYiYvMm&*p%`E<|GG1|K5iBMv%8p$xVhi{TO z5!AiU(M$A=W$rM`G2^;_Cz*3}$+e9P^P49|Y{t&Uz!DqO4nG;M2Ba6O`ZVMkJ~E`WlqYzpV!f4vC_sp z3~b#k#pU}WK&5tjM9(EKxB8o5dTNfsD9Cnfk)eU(zDw|UZkwi= ztlJ-)YZYul8${N3%r>=d$%)K6m$0Vc^Y?ly{@W+(D zBS+m6J9*b0Tq^!Pr}7AM_{X}LEqP!Rka)^F&FA@S0#F~;=KRKwiV>Gym2NCiGA-)6 z$iv)sm{Gys%&KSESN7PvXB7_^`|?J=h(1)}PGF+cEX}5L+4kulEC^|gI~=ze=6hMX zUWnDP!G^s%kKyw#6>p6*#+uZsg`81 zDPFz1nc^I{@Rj0uYDMyE;Nq$Gc^1lEVF_g}3l0TZ>)r)FGx>k%5TswM5Yb=}v5FrR z|8{6HMg1T%LR{Y=Frz@AF0S;upJL>2zYn$A_5NnnwmDJCN96Sh#v`%A*FW;m7~|@) zbgzCWp5EzPR-?4`uu13~eO+B1k7*?ll)q=sLHC&6;YF0OEvW$q)5`u*H0>Q_qOqmT z^X`I8W&*8TuT<@G;`mLzJwF|;ucCKFp;1^(+%z+UmNvAPt@--5*!mM*C2lRhjn)j` z@qf9)*1bTx_iU&sFOQWx-M!P`;&pA+Oq1J0o7lx(<0(HMME-G7xX7Ht=g-0!?M_PD zZEK`q!1e03N+;&z`Q(%rXTDh9B&Fib8NIwk9AxI|Xc@Hfb@+LLc6R%S(9KU+S53vR zy(mGhgQGca+-67D{=GtaYeV2mxO-!HuScKu4^ z-P#Zul5vKZLuxiHEHcT)_u_ma>oo@9?(`Y0|6n z6YYt*euP7u%2ux?&n3iozpZm?DAsodB0k?^2^854&N})s&Cv zOQu>2L$-fo5u<+2n~8)}&0sho@zm)7*V)aI4&^?_t1WuvFUx%ZB1 z3T0T=X?ks!HTb4!mxKe{-;t;9Ciu5uDS6?YyrCn0HS#hsW~9Sj(Cs%T&fXTTOyBVa z?7I7733R{bd}x;9iwo)yeWx~5f$hEeseJ*@*}Ij8JpQywtIr9Yuf$lzq%IVq0ZaO~ z2G;XcJ>$P`Vy?BQf1(741wx6uHUrXKhE>l0=(+pdmog$bw&TzU7w-bKL*BeH&y>we z(iExgqHlB28^f_U)eQako*ztc<=FXHu3e8@eqP<*xj9R<{C)LtLZ4XU`QVQHuJu}y zG)ytW(!Elp+?5qyowcy5JPr}TPYL+;*Yr;^42ffhBs11szg2nl7yapn23pr9TDS8~ zunhKojoraWr7A9YKdJWedS3X8gqvh|JK%PB^H+9=)8#|bWcRD;XDr_4TZeIURYjXR z7~IqSM1UoJsIZOMVJ>t#DfD{P`TNg+q8$tedn{xc_O~~lMO`2eOLq%rKATEOYJs!aYf$!}fjKKS^Fl`N zpA}~Hz`OGPKT~V80XYm3d6qfLSw*GbH@21ugTsFg0009610TS~#3sY0IB^B@I+iAu z7T}190r+5m{{ql|eXw0D++8pL0C47n|Sc}Z=5?zOcP|=`cvF#@y{beL%XaQSD1i>jmioXNdFJR2Mfx~htlF$ zx&N4P3sfNNw$j-vs%y{b>$|kVw|%1s3bZThKdM*{Ra^*wE7&-Q3ijWMu!z?^P=T<7 zRkF+sF3!7Bne0!-@2uV*;HCc)+FR{>TgQ?PmJ1FpqJndzxE$vqfK`|dDiB6s!zyw5 zU^iWgaCD|yl5T_X%fP4HImuIA&JXQj3N9X^f_tRc94lJV6(XPlVHd?RC&Z`8N!9P;Y9V-KpH_m_xgbA7P{r)I)DPpwDKAl|k`nJ(du8EDmpVz#v zmmy3cAVgFMjufS1W$e+xJ5YfzOnk=()@3H=X%Z?ni}K|`_Gx+6;{FRVM8AH(dn6$d zqC$A2+&xyNQxEn)1;QxLN_oGe(WpD+Y#eVU_<*17{HZw!AkY5y;chJ~7owAh3ek~* zZpjkaQv?5yLt)^3njV?qCG5~dKJLR5&46zAg-8*xgI1{DZ% z)xsiBk>fKh?_ z4Y`A|!mk50ZL-Olo+~hg^faPEdZh3k=aPlTJOL^Y#zAG3u*|jh;?Bxdl^CD5fxEY+ zR=oZ7Y!$`M+rpI7WQfY?BZcQ!v0SfM1{DbNN~`VKChoQhr@0lXgEyQ_LfFSwkNMk| zP%0rDrjU^%DrA4>a_WVF0;oV(iSp~o-suPN1)byOVUGyBPWFtfNtY)yI6w&kVHV&|48I0X+^Va8CO{+}_Zw`WyCKEvP`4O=E>H-z}fk#rOcpQ4wRDid#vc)4R<#Rwb*k zU<%C{M1|(>Rz16Z8Co?#*tNHjF>x17Hd*9il>eq@c&N zeB0ZVpaNloHfKtclJGB{RT=deUR=+{Nt!&k@?)e>Ahy2)rqG>5ROtRzs89LYg9?Pj zn)D04Xo(vnsFHrevOKGx#G8`v{o)NWA0De7m~xgLQ8|00a2<~}r)3QKKn22Z4d`?U zlIe;Z0-xWECI1=O zeiIa?`^x$EPH4<3v((FW#-NWdh2b2c!f>Rh9Ots!(k=ih5H`E*^T3v>zPOIxGV3+g zxlh53_XN^-oHDImF)PB9bBu_}xg*8oSTXO6ZwD0!)9YCNqrOAy-tJZ>{MG>ANN84s-@rnq?7w!;uFzG?wi(nA!c zFfk)4Oh*d3_BqmIqd^72zU*M8F@|nS{XW-vA^F5q@Z2LM+0zw8@86Z5(1R(=EQkv8 zkz#+Gi+fu~AgDmton{S!v{H&*O}+eh|7FRD$Wk6d_tM{UR|~G6gefenhziS*g06iQ zcM>;Hfw0O6wk@fKg%QpNT9}csr5!@^wK$}IVg*ir`LYdDSlJL2)+5FII2XbXi$6gH z!b(V^60pp^C8(H)xyp+KKMo>4ry%?Dx5^mtw=I~$b^%dgJ5tc=OpEn5te^s6kK;B4 zw>POnbyKt-iZ`jtp6N)14P!3Ofg)!hWQnSLT0)-Wq}mgq6Q?xPI@TM?~lW;f2Za zhaS~;?S#Zt47a;XQkP%~$3;Yi<4CbSt{05gmC%d~VQrsO-wL+bo~l+dQ+dlOSt#I< zUoKS9etDw)6eUc#$cd<2{JUOyU4*MZ1;VUN-#*vNp`yM;-9WMyc%JVIyKc|ks``VM zucZ!Q3MUt$!ufZ-WQCs70Tl?VF>5eqaN|B%NQ4y@^yuc~3C{Yr)z(e(WG|@pVG0*F zqQZ5gp!*BGt}q#>K-lMxX*=tbTiE^~VZ5k{#}>$ozPr{A8PeO!JhB`hH;ve09Nd2~pwsTlqYt#sex4 zCc+bMC6;){=Ke2h7C-q?GVcaUqWnt@Z`R0C9>H?C#EYn0I#SR(*$khA=b!>%1N=GN zKN7D6ifEU38r;>oNmj1T|A=?7^E^I=7fj*3jHvJ)Dd_&bgY`I z3k^M=s^P1PEBuJcl_LdxzN9Ql1dWyuc6r4zZ>c#}IRsl%o5X79*+nMR80RO}&%9UP zsKavM7eG|_j}&z6&qx)mg9?OEzj_$E=plDssy4!(pY5gUYsb;s#MTFx3L^)BFhxKR zQ4u&&(EUX%Ob4nL2(x_~aW_{hEM6+FS$WNx3RBjhb89PD6PTL11Xl!w5Ea2A1wHah zt@N;i3WWJD;)sSUc~6~LGr3bQ5G?H`Ql1^EJ@du*OM5vi7a?IpMd(OzJ06p(WE8zY z1;Q-Zbk5l@+*)1Mh0 z&4^jO9e*ZCx>rT#hLh91h`x#_?}u15LNG<-DxxBCq@ZWyye|AqP=PRKrLp%@#@Db*k7@~6RNI~}(ed3B)P=T-=)BF=Uvb&+_1G+>6oX(!IPkQ3pSh z5tl$z#E%qkOtyqhOZG7Z1p$2k!YY|_L^glWUB$=Se9YZHd)*qN@72%LM2wSudvdT` zBqaZ(VEy;)g-;#o(!_@nY|1F6v+&F^$Uxdj_p$I;0ZWFXR9lWW@ zdU=N(#z?fefU1ei)PB$JBRu!Gp6m@c4}QbYI6i{EPNr|v7$bE9 zk!d_;z?lri7?*3-f()dwO%RmdaoTd2HWw)sySgsrms@YY+CC`z@>8S_jJY9=$b39z zzzrFS*=xjZ1{p|G#r4$nn|2}2%1o3enmBR&OBO{-5$$5Q)$E8SjFG;H$TS@@VBbVB zEA~MrKnBt*-8Ltw0@dyHGluo=wcZ5|##&x?b!0M25wO757&m1QndV~#-2`MMQB45S zR!PL)l&BUEIZ)AS>$$(Ru*od6v`a8(GOSY&gJmNli^#MbGw3Fe-bJDhW&>%BSwc=s zIq@D9K)BD2r(^P?Rc=S)(op_ouNf{FBP)lPBnZa1mr64V(BxSQ6_)H%wuSL}$$; zf%(w5MampPEQYFTHW(v+3z2C%W^NoaYD|1NAOmSex9gUKovuo}Tgmr&Cc2=k_IX%9 zL?&yubg3X7#@tdsWZI9Jo5u{34%;}$K-x&Mp1FOnn_?>4vYbD^jU15^xyad`-2Rq@ zS@=zsf+8Z*am=6_^zN<5%OC@3*)&uf0eP=wK4=%+KRukrmY$EZYxy*bpUa(d50;Ih z5+c)i%z$eRRNXk^C@6por1gAgD!6VZANyHBM|x%CcjF}WWiz&<1XUX!Quta!Ng0vp zI%d!#^y#Jy8jyjs-!XwABJ4p+)ngwTT1!c3KlkZsH}O+PUD-4lhh?Lzg2;S6X28`3 zDx0*_v-2PWX-%Z5AI0T0X0WU%e<||3cq?qruXAcv?H!C!QAK3Bj~Q^efnvmO ziCh60Nc$NE6eI`NI=*+xV-0vwgvpq-Oq?mvhvD^PA`ix>sv$C8ju|H(3|naS{1I=T z0+;}h#?F6GuQJ3}K5Nj5Yi_GjbK@J)(g#nK`n5vthcHG>9c0jV4OmzHxobdQF!X@4 z^gsWPN~y%=gddm^L~c~Z3x0ezK=d{?>y4M0;WKY7%;=@JQ!YA7tiNHFy2k%gO6UrD z{J;gW(7>(PAOy55xhsq`{XFvvI zAdT|eSDDpq0=(uo6~9iKFwOcU<18p(49nu5r+}X}-PS~8`i~j#*c`gL8v+ICyoT$nFh)xok@(paqed1kad%)vTU= z#GcI;FGLz3@bIIayvv=QQROK3c1=h3e^_7tWqmy=zO|IL>tOLgB+ltWj1T}$WzyI+F}Tu%d#mQZ-kGqF7di@sGSX^@}6 zT)y#P`sJ96({ITM+hE0~r;o@C|5Hn-#(55 zD@MVBm)UDb2hQjlATrd6uKu*nMg_Lf(#+yMgsLVDaz;E#Jt7{*^YHN>NG zP8X+pk-tmeJY_}l=oQ$v8ySP&e&Rphesocb<@1lgs%QZ{qw*QUu!bCkAJTh(Apmh2 zb6L&b7QC03*y@LyBeiQ)EtDT0oYbhJC;WXC<`|p&a~SGl6K;d^!y)4w2W7<3baTu>=ZVBCRH z3S%hZ0gM3%l+f1u^LEetmAvFi%xq0Tfkb8iP4hUnvfJtUPFPB&W`9XIFi8`VY*5tX z(OB?|WFj3L3m|dDhxbghz4Qv-UQtgt!@=g8EOKYl)?yj!M<>)^l$rTo6fTUiL{SbX zYYM{-N~@M)$OAA1AhP78U#7#?`GpZ1*8=YV>6H}qNBQ?dNJXgCj5%SJxy4@=9?Wt( zvaC?pG=>AJS$ynQ0?Yx(bb4d9pEpdD!B*1S=ND`hU#)~Y^(~^M=J3X}Jq)w3{1=9$ zbpnP30ms@o6!!z;F2u>Kk1zoifC8x2^!=nX9S#3fINwm4dv*AR+I8ueA=@t5ypn@8 z7-wnq7l#kyp8khZLU}V7j*$1ZG2{@i1R(ZSg~6ApaLti-33s{f4tF>umA?L>u${O36eB$> z>(jef{+$vSXl?TsNC*SV{skWOf;kK)NVLqMS^%s82p$7JGp}IhC0^&fpLzK)Rk%RO zcPXRTpW%INWiZj^&R-%COlFIpscuzpu!m^w`6mZwpLE}fPZK!d zSEt1|l#YcvqZT!(gY_dHX4=|;CjRlG7WgsE=<#3=zRyBUU2VFoq96ll8qcV{MbGs# z3Ogr|_9|}ZPLky zxV?U^p9cFAZRz?hZY?&!7<&gqX6cwg9|fGh_V6pnKpM7~O>+x{dU?~G3o-Yy1}g_G z)$~`R6sYcHsdd2^hr5W(uVV(>1fUA-te+JR$UvI+xc7Dp<5wEXPq+6gbONZ~5=`bh z5i~oiM8@^On7fXM%=!k<2;T33S;g$ zAu_AS4EWXnRcO9!H={uY(&)o~&3vP~DFP@fWn+b%dWOAe^T6XXRz=q-XZUW$$r+JZ zJ7&-gnwh?717sjAjznP(J99y?fx0eB*3;UrD~ew6*5NJVr3~XzST@csh|Kzb)lDM1 zku}IbnsZVfU7~6%Z_hl&RQ}#%yPUSA)6AjFvU_5ZIgD{}MPxRP8FWiaV&0Pg8A#K% z*(){@;P)FJi658qCSIQ1CDxVj-SoBvg4(`}=HAOmT>n2Mtx+XI^B z=#0`MGH3@M)yT|}-QZmu<>-Rn(z@M8WVVhO^lpYu<_ol{fHXckQ_P!=pXo!sGl5_X?AJ*2ttgWM_eRoy95-dK5)Z!{nxig#Ox-S;?M6K@Y z3{BUZMF^28 z%Nvo|J7&;>XemxJH0(gyFR`ij!aonIN_HO(YdTz340D^b)AkeQRuHL!KR@?=h{)_8 zGvM(PYN59>djBi9(1SGT_)}@AKOL5c42zO0akf>q3SUb+^U0$fBcRfO71~4a$0q-2 z+Bp9@7sA2Dzy`1}eEwiy0od5!&pQ6|3AK%4H`6`s`FVGk{RbSZjl0W$#NyxRuxFb+)29~1_4h|01U!PH1MeWc* ze{ib^;dWi#cju#Jo~=8_-3f1R!j8Pge&ar;19h4FPxu|89}tRMnCK}Gyx>DYTi$s~ zAOv~+^wG)Am`s6cg_9MseiKTUSyjX(J&YJ+bA_8>h3OB3Awyss=->6ApdI{KE)ase z&8t)_nCC5wl)sDLA-j~D@h)|vs%SObVj{*KzF-Xio*+YD94H8!pHR@dWbWr61bKFC zk~EW>tQ;I01Itu$*|&n!aa&yW>qQO-*Wu4c1A%a42#f;-f#+=~$iirC0)!y%!A&Z! zx@;j@TW+%!+M!S)V%^I0+I*Yqe$@5wOP(Mg0vQ71KtbR#1qG=Majb$6V%DhQK&b5V%l5L2eON1t0`@&w?cDBx&2d zcO7ruthHr*RMYzQvy&gS_wy5K@XLh9Kol|r#({#s5fB9p-Mc>mLXa0pYC#_%p|f@J z1K;J)OMZH%SE3Wi0W%#sh1v#KMj=2nG6cqfg3yhj`>yRf5Q03d3pc{r!*!m^O3K*s z;HjuYEZrNwalWX;D%=GAk~|cML59FMP!Ko~qB0UC*T4cH$or)yPC0rHyC|0F)QsQG zA1F0hQkfp}yHi~|M99Yg!X1qvVpdF;^--dwffWt3b>p-d;#N(>TY7K=>$ zwWmyQ5`GFF0X#*9z&QS@qae;3g&+iZ^G~qbnrL1;ZWz41-(9-=J)}gO$b9QQXCv(f zd_s%_5|AM<4iuz*oKfcwYibaJyoVoBe$6}FQJYUFVdSKBjA$A05t2R;8ee>>7rv+j ze*=#UfpMT9a1V=W4F31q#Xt!1s{L`;H!rbo3ED0J!KS4eI!avGWZx>M#6Jmm!|Es+ zNJ56dI8YGy3IGMwKgNMFg1pVG5o}7$v?p#aXgeER2WFM{0?Opx?Y>raZGit)A_hoC zhQK)f%1GU2{VWJU-YH(^@}@^2`@WAvCsx!{7NSjt+4ua5$y-(h;CE)RKngMh#({#s zAqaI!w78AK0iF^;UcUL&eU}EnC(&cvA$*kVJiD=Xn%C8_Es?~s^RS|e1D-(;dKK~4 z`sjEbPXMrie`a;)6RHe<=-*>|=6m93v+`vOO8F3~z;Z zsCXa)IW=_iLl5IdxYzPQ2=eArs<*ChJW5pV(q?QEb-&eSEXwD!SF(krQ9KDt?J4jA z83N<@tFj2GOvph9@@Sl|Xf%jFHs!QC8N1#SccX;gnfgx(#x=P{J5?Bz0AwOVU>tun zKXKAEWe|eAZyM9v=2PGQBnF?+ZeuS_qB~=HIqEE%9dlNy2MkICULr$a9DijbHudH$ z2tl5%_Jg;sN|jY!X7>iP!wk;&I_XG{Fr?nEoP2i&1|)=>+)liy1XDaWpUcN$Ru75zu`;;kFd5Pl?6WWU#4~MtxL$y1E+IDd7M4*8kc|w1aiAdd4Cv%1t_DJoCpzClUL0`y zzB5fQnZoI+a{P#B>-y@jds~Oyb1>)`kb?|?aiAb@mxe03;>!=RKnU_M_OPFMIs!hH z%Vr$ssg3W|y`7#kN(jaFm}6XoL8(A4G6cqfg23lBC`fBGNgIS9@BSYZGU~%0F{OVN zDkNCw$OL1M-j|Fpj^9F8^@*7YIS#cZc6NixY`0JmR+_O)||V zbOLxM*=ioyrc>;|$D(xLH8KRofr8M(_;~IT5ePwEe&&=N^G>38`=u%jIWx`{G116& z+-R4BxiNY|SkXNP-XKF@9Dilhy^^XALXhXVV5Pe`A6mZEl|#x?7=o2z2tC$C4t;e2A;{Y%CE<5@8qFLWXWx&n)kkEVU0YKk#~qAW&jr7! zc>&}jLtq>z2)$7XApfodLXbBfG}BP(W!i~dJjA^!n{_j?j02dR(6}ibIR_ttGJyhQ z2#f;-q1VQSDf}lu2=a>Qfjt_m@n2V~ErxyRNQ|pFrSXl6Jw8?c8i)Vb_9gHR83N-# zL1xFz`yGojBM3nrPAkdMX1ERlQ=IR|D>o}g6h1xXKBUl9qMfmCgf)gNpa>ZP<3K^^=1mlY z&kjP6_xgR$Yek7G?RJ^pTF>46Kt$O_xMdzxHnUdy06wT@1I5S?7zYXhUn`m`z@OaY042x}7zYYM52{ara$9AfTd+uoiN>t|W|r5Usz%8?;34itp$M=u3tRX_;x3Zy+N)kICE zE_!a2ESO!zwjnR)7!HWS zJ3o6?8YZINvFXO|@b_^Az zm1;5Y0T}}0KtbS7|4?-_JpM5cgdlI3hF-<&J0TO(v&gjCu-`9;UR@weYg`H%QwrCB zWmE#xAwyssC1C7q5{t_~N7rXoev4_8jwH*B9tNp@I3& z89yefg`0=zJqSUb?KQkz*_m^1i@Qo-8Px!vkRdP*6ol@xK?yz2)!gCe|Pr)gdor4jm|}!IW0?^io%jW^UlbJ zpEtMmna7yV8om4h%cuqDMTWpQ{(@#V$G1TU^0Mbci?2LBQ2h4sgkI=vk00Ip`(`LEdFN`=;j|9^Au=wOrE_m>NK(>Ej};q^ePq zLTy+^pMZX32#f;-q1RsK8f0l81bLH5KbZ%?zfbWt;hyek%$yC{EW-^Bms|x~$!q*D zs0|oEhQK&b5V|p7S8fo45ae-6KI9gPs=?Q8qg+sPNr_szpe8eFJ;cS)@9G4D+JUdg z5EutKBh+d5Cud~+EncZjWZf>KWV@HURr?@ezAuKT4hD4qgAjxs zUjI4`$HTw_@GyM70Js1i9@f7;p@s;f4?jG?)FAlE_P$H|j_-w4De0xz06bPIZ^J<| zDa@Brs*UR~u@e|WP7U3mz-JIBXvrd(6NDgdu@4Z<6u9=L5K~Av?8U7U#CfX32Ys}6 zE1YnGVNe$^jtqfupvn-vP}X`73*DYV9`5B8v&Y2MnbcK@`|~nyF=vI(7~tyenumQbBlr|-o+P$-0{(lF zZeS7_0^>lp3RKZ0JuH;~A;^oVX2rRhOKe~IKT; zG6cr)SJC<9?+1VoAbp;0H1U#({#+7tfsL ze}01yQU=|GO17?sRFb))io|wd_6`-9h zqn2Jr+GKYNrJu$n*Z# zEEs1WkI#2Gd#p|&{e!UBo#kzUh{zqn2Ju%&j=zyL(K%R3tE0*r5 zmx~jWr?d9Ui?0yB58O72z7n;+Dk%?xz5?^e5E#c_koj69bhrk2rZ}qHW>{*}HcegQ zdSg=^51X;0LO*vt9*G@-Z)OI8pU4mx2MR(@Ob0)>9)S?#ZGU903QOd0<|7U_rm%N$ ztz7kfagl9Q#J+3^zIYu17LXw@4ito*n7kaaG(ZUQ3YCb>=Hu=P5oWoq$FUOPo90R9)@wazME8?u6)n_H z5)~U`Ou(RFU75Q03r7k#f=cU*X`l8^2#T-7|UEN3$Iz?A%)?_34E zd4C6fAwyssCLGP;My|X?GA#ohFZCgNOfsa?PK(^^K(zZGkSRIW4tH=--2MR*p z!QfvHfo?1y&zmG5#dn4bM}tej)Xii0DVra4vrH-;{hnzKyw!~ZYse572MR)ub#`+2 z(BlBe!^tX3u{xzk%$~_?E+)^bLPJ9qo=aM$A3uRx4a;Z(SVxAyI8YF}c`I>q&w>!- z6-?gq!w0JDf~w-n>3>of_FbgYuicjsDs1`q0R~M18^{nC2MR(@6IWKS7C;E{;;#9zUi3|$>X;I7_*gduY$8Ko9Dfzvs*2?n2ti)L&G3UK_V|vP zQ?$cX3umZpEOQx#Ob#u!gsl}|8BGIQ$PgF@3PLYV{La2j0U^jctePiUIJjB9P$qXW zJU^{fS7e)>i&396r@E052K@lGks&Y+bRD7g=t6ekG~gZ`@_KT7)5^nbr+>VE*!s@U zKu~vSo~~@rDql-10KVLs0e(Xeddd2Kod|6IYmdHtbRrDpkYe%KIy+}?5 zY7p9fl9jJ{QN`)fI$PO2+jqlNlu=@n{68>erAy(D`+oul|4Uo_m$rJOnP8lJ0csE` zZ)4_ka%p`%roOUAl#dc)f^LPvfdJT6D>*v=%WVNT{9oG2zqFMjZEfg^5vV~Zr^us+ ziwgsJJ9gr1dSxnS#YszDTCU;j)e(PnhG~m{FXsPfm&^asmXFSkLMNV9gWU~6C9Kp6 z&M?|8X1rl2+wDK};%UOF{NQByr7}5Q5vDBxk09;;|5sV)X$w7@j7}#*XGf4HDPjM9 zxtd;#skJzC42NMQv36*FH->w|p}aj72K@s3kRdP*RKG^gkEsob&@C6_z4x?8cc1@I zcw6<$YuAKI_owv4bo40?CnX-^n8KiCz#ka`<3K^^X)EXS#c&XUyzgw^l7x!)1%p}0 zC@2Ssv)-l}X7BoEFz!}dG=f1ZKmalX#({#+(-!fyxJ3|xykNoeyyOOH8QhZJ)N~6m zzH8D~cGJq6mC9AP^Y><3K^^*+e7hwj~Hbo?5Ez$uC~n#DwpvzFs-p-EV21 z?{mK;6ea!iKD_AGfFNWDi~|Ls=SRZoUTD7tdF>>>pG$5yty?g$lim1TJM#E#jQw(q z{)$F`>nT`9>p(Cv1jd1a(6dQ8tISIfg1kucSLX4fd*mbhbt?*5fAlr1R{=gzUHM^& z)$cH919*%KfpPp*NA!(@_#gy%V}48Dc1q4$q=7$bBVG%4#JJ5kTvb=iQ~c876%5)0 zLXaUaj=wUZ6EYJ9A;`Pt_|Ug=%5aOh;{PM<&f{|E9>@Q8U6D3PBqc4{L}?H0B+(*L zQlhjkN+?81+V@qfN@-IPLM76o(jrSLQnZ&y8?DsuoMAqyf!-sMut#>@ za7t^evN!AMGqm)I6Xne+npxhQXVLY&DN+z+2y|c&I2f>tmDfe8o8V#vJ+BqEoZc_u z*R}}S99KL$QFt)oa1-sT;f?xstHzK`H%$tL5Pl*4PZz5I^4}LL_%BTCD1U1JL(z3XwE0SPCb-VNxKvMPUQ?K?7xbCzRlu8{`CQzGZLnU!7}9-H42FmJ~&KW8xoS{F60p-hBx` z(Cez%DdsX}#pdWSJyj>16xP8cbZG6Xu+C+)=IGPbIZ`xb2y|e34L?`h;?srSUVz>v z($W2#uBvU9N4#C=x47;9xI9Lij<=peu329I8PQMDRmu?PSaiTU`y1Q>5cKTiXf_0H zu2M}{7|F7j>ZZN1*IXs}m6`~BjkPKQ{UXIshCl}f;ip-@Q$p~e7xYHleL8GTalDJk z@}R$xc6oTVLMw*(*YImi_eu0Mqj^#+We9XE8quECI5_}8uSqE`J}>;Lt&G)MD<$EW z?OxLbHPNoU&Xt|FhmjF2km4vqpaX;OkMKbc`Az^q&$89rg2rqArgfqD9Z`FCH5Gv_ljA8vpaX;Oqt551;|~Bq@5CMk7dm#66@r~>Ya)2An_W!4bC!Ai z?1`yUQVs~@Nlu^)fes8Z`?DA;YhfA#An0+vDp20^`SR+5Q#a2g6kWDxyYuQ>_6GKz zotcv8%J~&?B4r44U=aRNjzSDS7ct^5% z3K@|X`5I*ibYKvEg%XaEa|96dw2YX#e%M{kWaZyk{%}FkPAJf&&0s;pq$xrRUApup zCsBq#2L|CU6PScbuLKbE?6zIoD0QZE`SPl!Zz5G@;_lD&%bI9H?s(7GpzGK^@NAO#C@pVn*uSa+5a(?2G?wbu_s*=4Ri+&&0k9?gn1UfJX z|EAVDb?^Z57zuj!$3!3Ydqp;W?eOs05K2Ynbk>qp3t$=euyS}UG9rI+DrE?CU=TP3 zVRunAg3W8d+#hKJ1InU;6EQmYvetOW&mYMK>j{`pUURh|y68*@!{pbjlkM z-&^>3oUGIhzq19svO*h;{a=iu`DKv0_W4Fy(Y%LgMg5!81)A2!BTzW`CS?e8U=V%@6s7iuUw4DvlXrq`Ty!rR zqSi0iE7Wgd=Xx5zz|)~I*f(v7&SE3TIg}yLfkF72P4O+P@XKM)V-PN|4{f-!#aK^Y z-(+;9f=hG3qY&AfkLtZf(Mwz zRUkEc+knY~t8~H_J?f&!wo^|}9nA+nF6$$69^(1Aht!BG2abrpc1H#SjZf6)5=`gPd5!@!I1QDhf(zt=wXt?|N9{?3&L7zNbV);5%K zrDriABZ?vCQ-(kX2H{Hs`__rGgAqY*~y>G!XD8?OGN0eSXmaL zYxJ??JCq^NfkF7e;A$!iS5=_*wNQF0)i9;#c5=V5dS3Isb956^D(zn0b-l+zkrBm_ z?^1?92L?I(xz=8dS;z$t^vue-eH)*cly}QWs5`3N(c2oVc`cAjH2vxWE`0=wC*PwC zfes7;2LpC*=3}Z<0`ASAx7+Wc4s-p8-&)%jKfiyyU2}6&D~%{k^OYj0jp*AR3FHC@ z;g`TgcY}ZK&GHxBo56o!V$Vj8T|k8JJ-Y&xr}Un;U%UBjo+4{D9g#?4yP4}TP-?uA zgbXc_T=-wbKW{5!lmC8q0safSaLi2}dJQ%vgbgJeKG`>tU7PKj%S>+0-E;A@jgY$Y zI+S!ibsHjHBbQR%nD}Y-;-9_d_Ld4>3ZVynt#4FCsA|63iS_-Z)jIK+NBdOx zxdyxuD2e=(G6XuXy>{sjlwh~17C_L;wdD2Pb^7MI?mTT(EvoxFCO2`x*>u;*6_g>c zWibe!#o8RHgWpX|1>O?2)e&cxrM%Ity0Vu;=(vNZS|YPqcv3&RfWun^N+nlPhCs)n z5lxDS!Y7o_vw7zI_IbO-b{?;!5Z5I^78U+Em&M{4+0^-N`65sn`59#hbS#22`St7p z1ib?aPWR)~Ux&Po4-avFAl5r=?$J(FA;x&S!0-eD-5^&{hCl}f;q!7X<@+xI1ig*! zdV;}R`O0R8N84Fcm#!S?5A!=RkXmNa`wV>%JDprj83G-PMs#NPJ!JqvZ~9>Xhs4Al z0lp{qoue$nu?s#2fPIWk3F zh&SL;Ik&`VE6#LuA7zv4C_|uQ(WbjvR}c>%=q+v0WgWcz>ujftbG?yr@ztMJMzVZ+ z_RlTpA1Fsgbdy|983G*`WdG+}N3-HHT=$1wn|_k5vg8}TjF_1t(E=qY;dc8p-;92G zYF?vz6oGQc4U{3!v1rq&X56^}An2tvc9bo2JkaqRx6j_YEqRA6o9FEbMc3Svdt>Og z#B#|mC_|tFgYZf6&g2Iq071{sN zDMO$GgYdI=WO)L7kp+6CY3|!Yb`FZ)e@}YoHhgc>*@;%)6mZc4W?vUS5hCs)nO_%aOA0FP& z<6jf4rew8L{K0J58jiE(r)+-mM%EhlM|HU#RzRS;rAbL<_`tWn})q(!9(f^CQ|OKsz1EfokXB}eD&S+ z$0zq%k*(}~Kd;|&WZgpMc$G61gg^!4X37xgz##mGb~sM6!dVdXBo$q>ekn8Lp19~v zBmX>U%%$&#(?g>kZ{xIJ^se?kxrH(WIxq;ozu0?eGyL8g^h$eT535-ny^v=U#8RyE zwVC1YxtX+Iyz-eJD`SxnJs`JIhCl}f;kRbDUO2-CbB>HDx zKe%Q%CA&8)k;%D-&vf@T{0V`_sAg*FLW z-dfS79FaqRiKLMHo-zbFFi7anDQKR~13vJEUTo&m>4=!jeOa0$L;a~HRyx^@4yIJK z7F42w=yw>3$ZeD%(6MMAY5JSOM~~2pyOyXevwGLCc6^s_p8qwMlSZVtT`W^6#a7$V zw+4#IA1Fhh1B38$oi~TtJ^(>)wVm|FH2&_NIx6|ejY+jk7yV|lWDeZZ78iFSBm3wP zxt%fuIxqr_T-he=**Diy4>JJ~>UvJCLyMRDX$z7Bo(1Aht1X93t))_$1BR@3j z*Q{V(b+7s6{F(Fwg%K6Q==JKM`X9FMNAF?F$la77(1AhtDM)H&g%Nx<&o~k` zC39+1T~5zJ5OaVtS&D4qwj9}X735yZ5a_@le3I`%uX`0h(0gM;%Sm~pf}Utz*+F|!VhmZwP(Hnf*x}~f%8l@+n$E<%X@h*MCVK|_{xn{N^K7w zy@Ou(s>y?tA<%(A_)+&(?eJ#+K~KtR>x@!(z_iLrf8mzo*V1a2W_U&-*lF`G^d3S+ zR73tu83G*`gkQ=}e$9ZFa_AlO@|wFQ_ld1?|JmuKan^+k2hP#3tC53a#vRb*+~?#W z$`I(lApG#I9n|Os5cF(ka)%km>&G>_%!Z$>nc<@4Q#;Tn6=B`i=_iehsFwVNG6Xs> z2!DzElbyB}fS~ts$DZcu-i;HNqZ8vB>iyCiBtO`xG)$g>6vfZeNdoDxZ`k}FY1P^D*Bqp3-SnM2y|c&euaJcR_FzQpeJJc zb$s;ma?5Ka=1IronA7*vuXuk(JTScHsph^qh;6 z!CTQeN?%iFHo^eeM{mgADMO%R5#-vt6h69w-sK{yTNZ(|TnFtsl?C#ib<*D9W!|^t zt$#;IGrH-T$Ui7UpaX;O&ykd0&H{j-XJ47CEc8k>?e_TLJ6^|5#2NQJm5tB!8fC4$ zv=bT8Tk<4j2y|c&zM$c*Y8DD0=nZfS(kk4(pke>Y>);6A`=g_4MpT^Wnd`Jxla3=$ zGkJNmfgrP#nrE!_lPO(i3$R>kf$j_ zpaX;O&yi}d+Ccz8ui&gB$JI`~8>M*>^DTbS_bYt2u$=BX%HEsxU=o2^$upE8(1Aht zQ$IJMHLCywy$yqNoLUn;!>eqA-99=S^c=IVac4K?Tpd|(@j3#%BhONXKnDillM}n+ z9i{++UZMEzM!A^x;^l4$HwtL^HORh8?+2*ujr{4h>=pvOC(luaKnDilXNG+f1~UMH zUbSm~VfnH-LQ!Am0g`|Iee zqZKm(J{9W+y58~5V-w98dHJdjJ-pk=^OPabfkF6XLVndTI17T_@-4HGqfwtvFOS(h zZ@Bb*hq(GyrKAj{$1M?Z=(wAb=Y)XnN1K8I|&kL1hL6kkIJ9gFtSUMmZDHvv7_;WMwBx{ns@J8`URo0o^v zRIy>ozWf@4dkx7f2=s~UNf`ni7=)i0@>)vZh@cm|VArt3yEC>Y?mgWu{s=!t`fU09T4Sa^U~c z{QlDXsIjLM9HO7@gN+E$yZ*tOeY;F<=I9M5m3hYQR1)2;d-jcWW!M*2be-!nIf(K` z#P=5d(*jSt`QSMMdXL}N9_Z)vJZ`CE>Cg9WM({4Fxu)BrtmnHE^J?U<7$OH#hCm0l zxA1eshbA&S>p@SKx=GQkh^j^*@ktYT-BHE2v@e4*s#!uZTTh|CQ1pcyLKy-b_+f!< zI{!!C^#KIExmW$l(X?_Wa#=*A*HsAws@wT0W+h8rGoYQA+2mlhe&ThT<=YhA6`pVMPrGIe$X7mM=36bD(4PM3q-`H!lvS0sk8H|Ss^!c2A%^UuZ{%>w5a_@l{G+=>$qgD6x{uWEx5-*|!Y5vBZ-#>a1U=iS9XrmxviUGL z6fZ39buCTR+Tc~`(R=lIop;gm_!v2gG6Xs>2*0mzS$E?AfS?yAT6^%sM%keuh6=5) zRX<*IZ!VM3-B+;U*n+1MGNN&EG-U{MV36{kv);xBx)I%6~aO-jUR4Fp7U(2d4>$@J2~cmX{CQ@rN6b0wJRcl22t&8rm^cd4(xwqm$>1< z)v6X*W5Z4+-QGF<6_Xx__JbVzzqG8sw5;FSOsu6l&>*_)!@1Yvibu2p8w^HFSd}|8 zuGCxQ-U_f-Z5BsDv`KQ@|I#x5(lUQ*%$fa(K!fOsu%DApgDuOihs9Os@`mb2wItMu zw$$CP9?RW}XjA0)|D~1xrIr8IHaIL502)NMh4H^wT4?%hyNf&ZL!MWMM0v!{uG`j} zF%n$Qj%d^5g#V?L{iT)t);KL4>4C@y; zg)#&>FbH2YDEcVw4WE6X7J8HRfQ)P)`%1~mpyoI2U;xC4-%d}aV2sBT=P8k9n zi=acE>)^!;dJ1|%g*5^fnX+@=N0DAHAq`pBJ0v#BHPXhaFE*S#|dK)r+ zsb9UBJ02Z1;iw~6@b2l6Eqs?q`sCoBw@x7=xhbB2zoy=sYf+_ zz0VYoj0%>KF7g{@9cMaHV)s4%r}`EI@}kb541o>|!dDGM%H!Z$r_g)Gr_#unQN6PG zx+NRyiAvJa>1Rpb&H2)3%JQxvkT-QEWe9X&5Izai<_on35cGzE>8;01cO4h_(ZtWb zA$@ZqkC^0H2JKHG%C!d($cH+MG6Xs>2>%>aXPHj`2zuk=rJK$_*Ae7-X2<{BCs$&7 zd-NNl{Ni@|z}M)bL0{@@$`I&Sw2#sT-s}Pp^gc4T?mK#LQ+TMaR)9C%6~^x}=C>dD zW_|MRZKgp+2wx#3?_LF0cA(cc(Pz0fR&r9Rh&He2@LH}f9VX*x+ANI5 zpBd1bd4K90$`I&SwCQC1EHnWGy~1yw?k)*Xc;9wwz<%q7R5R7tUzr~Dcj*JZ1|=aQ z3ZTxV41o>|!WUe4x!d5^H=(yfP-K~C4>`;%oP{TOUCGXgRX0^t%DMV!O?1%b#DUbe zC_|tFgYd^rxzxRW0D_)>IRiB}v!%HeGe;NOA**6BT~X@zEB9HJ^0ZbVBMPFBJ|u!oR$6H5tjC%6})VL$cVzJ?@@+82L|D2J7D8zix3 zu1S?|`_<4eokP!y6*g1n%S1)EEJL6O>H^9T=)fTSW@_DQ`5gd)o^#;GDdph(n>)Rg z6CIBW-@lQ$iRW?}ZRuE|F8VA#lKMVn2y`skN4B=ba8Uw!wIW8%nqxF0&iTPJrGvYZ zzC4g#(IYs`rx0*S3K>xp^#jTf=)fTS1DpEjkqdyJxBmijnT%{&d*1iD)LXSu6Pu=o zE;P9H(r3RcLsvhdsUK2?K*yp@7hPro-`<8^-Hzq@tEEmyXZ8is=~^;=)Pamqdb#YF zLeFm6&m34Xde`ZQPF>iL`?blt{&b{b;GlPJ|7nU0J(36L!eB5(#^3_BFRcKE{`kRR zorP4j6FcV<{YXZPm2LyFWdTy{m#)rbxFIn45h0k$1B5Pu6-tJl02umZCJvHL+~wc1B~u1DadPYA*w6obKK2ZNh0gi8S!`jSBrom3pEt=}{b-qdzx`jKU* z-BEacI?!TA{S5@BDJ2Ml&_$cNR&ncj07L)dK!d=_xhP(7?WJ3%uU|;}ELOYvPVkaL z`TGhj5qQ~Cf-nfhU~ompM(pY3HwIwnU-gkx6A#l{=fu*Lzfh^ae0ipN+TBmGf^Urc zgAkauj35j`F&IquF!;E7@eqKauh+ma_GmCWM^>rt%f{h)(^cAW4PuhPL8OV2*qIhb8PIm z(*eNH-{bs^_L6!=dR2^mnVA|3&tC78B*RMU^My(r^a#AXiXaR^F&I3&#YRl}vX%+J z(6{|&BC+5rUZWQ3Ho3s}^d{MKh5Ot2n#3`;>-`AKR80^Dp^M;cK^l<&hWGQl53aq&(m$p2wS^dc>9FQ@Us&LyrPC63_>v&%=)kq8*Fe_05J3!2dq4+?!@`1 zCQq%^R+uC&yC!$8`PysRf=?>32+aJPAPhn=7(c3eK4l&UF!Yb>pE@QK$ujp{!uxA| zHMdB5ZG5TYbf=_+_qYH8udF2qgHQ~X_%q^zWjY%H4E@!rENNvMUI%eTD>Hk}$ZmX{ z5pAZ#H_wgJ4bp&A$ioufjrj8mi1~ByRw%$_=a#yJK;C-sFOJKb~E04{Yr4p-B z)E{Z~AR}H?PY?#77%b&M6-I({pY<)ltHGcACK-SNVJ5j%4`sO~%8zDuombb-8JaI! zmS(E2L6gyPmwAs@of?=I|PqVVnGJNFccZ;vde#qxpWF^|V@nhPZ1CHmBAo7cU9|XQR|L3VJ*`w}H zkn@ENqA&=CeR^m2)R!qpnX|6zm%qL3M;XnQ@7JX#?}(~vY9m3^jsHHV_OGDYKS8&q zVy0mb4Es=R`C!Y-vkr8HzqDoN)Y3I#xIYw{S{kIxy6r@Qmc0D;LC^mRdj2QKSH`Ib z2Enk5eVn#sdWX!}jEr@9Y4md#V|QE0e3);wbANsY30nH<-v`zF6;$&lDEWkT7z~18 zZn11GQyke^O(tnf9p$>DbSdf@yUkSZKIN1sBSAE;|9w#PUqRJ>fQ7JzuUH-of?>kE9xS!B z9(+D>D;p2TJdg@+ZHzneb)VJMfEPEAAljyXAN1_6pl5%AUPsu9z#tgLaJtF7ZTFT& zn-weKIHruv&+Dvw#t{}+82wIwj0DlW{r5qYe+5 zwu_OQ+TZM|w_%k{FwZGNf*4xTL?4HCrI`tO6v{t7Dl6BIf}8v%o0*rhMD8#+6cr3NIfbk;m|3Q?3} ziV7SrAD$Ug}2!<_-zi#lZVy^rlBPYjTYeWodN9lIA z&0XduZ#;~UAg1^KKB)Aspwd4-K``uaXy=`D3FkaJ^{867q|V!%*_wk7t1b5{~N?pydDmG8vTzD90wL_<}+d2Yo)AmII%O!JX^UA&vu)YZcGEUHwG%?6qC; zM{}(m=-$7PL*Uh)2*Mx~gYg>?t?Ya~&)@qbIS*nA;Y~2K5 z5Q@S0eLZ*fW4Hzj{gBUbMa+Vi)eH>fRw#Vj(=qm4DdnR{@DYuD!)p+DZ4W^hgkmsQ zWybDu3l6RC2Qc)#GQWMR=%{?&JO6`0x;l09p&lN#UJ+rFZ?-pNF{_03Ak{ z+-h8CG1FlC?6Y|zO#ai6w<5oXl|% z1|v1!Sh%QO2VBs9U{P=P<;zM$=lMcd|M-{w@waX)ZWIM{7~$T1j`?0)QI(Egc!uFh zr3X_J!xyg>g*5JZ(;$K9yk80H9e?Q^zx5*D6O%xP5!2%ew6#mOX)*iJnhYLsQ%gSJ z=*Yv~Qf(+~I*;fZh6(BZ8cnXLT3|{ zPJjO_Gf{zMCQM)Hvl<=>Fe0<{asHZTW8e3^k`XI4`({3Lx2TV+caK5Fl=u;3HH#H4 zG7;1NSyQ3{Yf6~@D{;*(pu>nu9l?FSjLpA3V=_uo=#|vsDVNjvRJ7L6?ee~2L|+3} zl!)p7EFe*N)c)2Re9ATLAlC|OGaX!B^^NQEoi)t~=0~fXruZ3$x z#PolbhNwJhe(ORNv73PoBUr>8C%fX_UX(N{y8COD(4DNuCko?+6Bf2Sg z|Fh^r5Pcn7@*$@G zv%*8=@$9$$C62!j=rAIeW1nZ&$06!7nS1Ok62%1`QhWI7+cY0Ady-v&=$vqUhnW7) zat@V8yqz?z8)^-5Yzuz zyP@)^_%rrdaZRAZi0nHWDog8_+xI0i&pfQ%F}LIjiX>kr7f?`6eY~_DYpEd7dTDyYxE~4|mH5p?1KT9xF z9#4Pk`=l3qfDR)vcTS!TSDpBg)N$*VqvET^CkI}yvE3rMb^EVu5k%*O3oyj=f7V;5 zJW7A-&rMn-fDR*WZe@OHVBaa#v&mxb9lr9<0fF88Us$Nx97Gjt5Pbt&Z6T)rv$#U# z@#MGOCAVn`=rH1YX|`aQn7`wh*|VOjADT3+D{@lJOBXGzI;|Ft=zMT#g_!=&N(z<7 zXjI-`NxifvMN7_zBMFn)f^TTx%V){SJCR83J zzx6;*;d-FM2tV$(Y^Q!64_-3wdyYwg!EU+x#UYM!4~mVlwxe%d2*6bnBKrS3*}|^x z->at_fDR+5W&7>kAA5XzbmaI+nV^M3_Q^ZlCfAXdjo8@TLAJc$e_kn}G`IO@W<>>N zR`J-9QJ?CCPEsg|45xf`0WV%1-QUll>#ZWDzFp_?K$nQv<&LPwk!$-@JRB=m)+ zwO_ilz|A%c91RGdIaSt~#>+VUYdpKBqf4_i?DSzl-4gTrCJ0O(B?MD>fKcpa8-LSk zYf!>Z07GAU0~M$IJ{qSW>-oVQcP;6R4|BRJoR6$lyl!;~fvLv`!XOlb@kh!e#(22A z0R5evr*{7;*&PvhRX9DfbE)mQyL_K4RE{{z+Qhy@;3eY(VGz0q7N5NI7{Jh1{gIHt zb9i~+wMBt?p1Yr=0!D4?FRQ5k;HwQ5E?;BaTrUj0t zeb70kB$O{Zp;C11ehCBji|!pdrU*>)ogfTCF?jnQ_=e-DO#p_zq?cWd;98UMSRSiG zHIq(;G&`sSUJhyX6Y7 zxzIdKlQp|1-~2c&0@F?sgh40<@BcGm(#z7d0EYg)k|kv)L{3@pYAe`Qt2-II3o2aB zW-{g|zs33j0@FaK~_SKl5}o|7MRk4d{Q$ZnIe za<=MJ4SH15PZNYeCcNQK`vhl3>o}ux+Exj>H$BVbB z?A6%z`_izCi`a|3TvTHV6lFO67@ny~Y)9#_T1Qt!|gLrynEm@;QPq z2*qH9Kbx9u%&if?(C6KG=JV#Sy93i^1!`FvKN;T>7qU5ZQ{}?3h3Du+g6Stg7=&W5 z(jPd(BR~Ye(ARh;Tx2-p*Bwj8Zr_yu_CkwkX-kE^ps_R)mpd}z6~73=Aav2DZq#)2 z1u*oRPdssx{4p;nR8p0$Ir6!oc4vmLtKqTpmu6ae5twyXE01W*- zRu6dfHa@>ncGP0pmv(`^#K0@Dt>Sdhmkm!pBJe6tf-nfhVDKB2*a^hXPYhn2pr4sz zQ21q?XdT_XGtrjm4CzwEC zgkms$S&SbHP6sgbi#)riRTNHZcc&=^^O@yewURg4c8{gzz(uiZW(dp~NDv007_9nd z#1$$P@O511_sJ>W2w62g%U2}lm*W?^`|#>UNgL_=7V~3Dp$NP_h#(9?F&KX|%YB`F z0l?6|TUohmD;vED{p!JWj*`|4(S{CmnV&>Ps49{e5tu8OAPhn=7@zE|b-ZE{_+=%me_~s4F+2fm?xAV3_=&dI|_HfuTDUJdBsk@knq%P8`qe8 zcp({Dle{u-*$ao!BPVu$Q$t|hFoG}$#bErDS(^2&7{Jh{^H=j!Dg8lzS+{SgW4wm< zbIlKW*32C2DhVcy2)rSjAPhn=82{>o;P8Do5rFIE;b-2gc$+D95u*@$Xrw8PcYVKICgZZ--Zim^G5LhspAPhn= zSnki6OXit`0f3=z6m&FvzmvC8>Et@*7hTuZFs)Y_r*Q zXHvM}WDoid(;iGX8o&GADiw0t?3ygh40<<5wriyGOeK4E+Yu z;J#a@T9nFO=TC;$S*mjFl;*6g=Z>Lyz#@ado8kzDeT_ZQy&jC_0pfj7thN0`#eW84z_U3n^xxFwkWb$6%?&|!pu z@=)cduBC_DyWGz`clT}M-5}t*T;u8b$Pl4zh%S;qSdaZnkNvF+1wUv8I*drFc}z3v zI1;kU@H6$}n6a2^+E+VE&x>5(p0x@(b_&zJTMWcf`~p!xl>-2!l}kSi?q~)iMO<_t4jUBo|$$ywYv?A)Cw0 zc?YcY==Z&E-KeCrXU<&=fyI*v!XR|f$NKrsoIU_Uf4@96^>&^Eg43?aI!9c*BG?@I zxgLZsxp=%hq@4jIs*P=GS;|ZzdyijT`SbrHr)kXbS|E zN+k$`Pz=WJ)A>UU3IPoLugSO0yN&Ab#ylx3O}ZmvB9(nUOh;Urr9JMwCjv{S5rjb~ z27@1r!457fu2dTULw~54M_%KLc-|DX*e>^Tm%h;03;4c$6a2F0-D6DzmbpO?2BC}g zaroBxHvooy_M-J&R@LAxtx|vF5x41#?a#&Sq#iXPw0&mSA2!qf?`}n9`ha`ZZZ~OIp zsl>5{dumR~Wo+wf81@P@WnR+o8ywNQUXQ@rG6}*UbkU}6IbNXyVCY{=`nHC2+0=og zF7=&UlJ_U1gJL?Ky=vTk2W{a_HG?~4bz_`t@ zicVkoeDd4n$z_`M4QCKoE}I|>LNOTs93RxAgAZk(??v^cXF2a-F@uj|E={k8u4GV) zACNk2IvSUM551L=zex}Vp%{$c!yPQ?5d<*w#jCyV44tiOwK_BrtNwypZ_O$uuN}*p zniY2EdLScK$RP-WPz=UbpEg!FcS9KXp8PM}Z8&Qllx<;tU4MP2ta-4Yt>M!=-)oT< zgb-LUmmmy6F&Mv<(ys(q*VRsUHOwYbD;o`|>vbzSw^SXEOFY~75L13j@ z1Yr=0!T9y~(JtD10EWJTu0X>(#j`_)n-@-dsPaX9mYVn7OLMCF;rk^92&|k(5C)+b zeD=>Nvwc*$0>IEOx43((SS>(6P*`-v!=m5n39aLa2LkPOLT#z&^0mrsf-nfhVEq03 ztQyu(07KtEHc(8a=yclLptOmqg{x#D+o$f?>*ua)*l_g2Sx)J z`afPT4ci>@NLfaE-&mV5pPH6kYQV;W{QbduJGLY6&N~EQ5Q@S0)rrPH6fX2ZU#Zg| z+u)3M%@xm;D?BeUg*ONsPJJ0Qd-pue@zn^ddY2#!LNORWxGu;GIRO~@RRK=#1D!(z z+qW>>P(Bv2*Y2xb#Dk+E(IW-a2?)IF9zhs{VlaNnj5%Wi*J7aG9OOE|us3XNf7geL z{ToxRg_L+xyX7uWaXB}j&%e|P2*Mx~gYnDacwqBh07Ku}+ThWx(KtoxOEqgbj(Lnd z)k_oL_83^UXQ8hZ8S(D>1Yr=0!T2dNj^*bP07GA#=4jp*wWf5>X9HI~J9jejyju3$ z*MBI#p#4cP0;@kD2!qf?Bd!=f_!hv>KV6_0@YTtut7_S%xRD*dZc+8F&3f*2Cf=s4 z=qm#6c}Nfjp%{#xGW8_gSpW?ExnoAc@r=&53f6z=SNZ5vH!XcOQL@S6w$ut;^saSp zAwd{~VlaMGhntGRA9aAfQ|7bv6I2`0eW~QtznU^m`Onr#$$E;3lqgGtBO}%*A_#*} z491@ZHE7wj0~q=x=@Y+h47Sg8@f+UtP|*CA)m6H2qtE)6Ue}g~Ah2dJK^TN$Fg~wN zu8D;shCa(xnv_$%YuRjN@<*VOsGkuU-mHQBZwH^_KK_~{}*W(tI=ga_x ze&6cW$OX0AKMpP&j?CO4tg^vFdCH&NLc{*zy(9$IE+Ghm&_(;0Z9-@TfT7QsvpH%@ znRwF07nwTo6*og&c2lRQd6by{ELgP?f%iQo2!l`z#y`i_#o_Sz7xa$`jvQ`S_v>iI zii44bG}*R%hE$L2W516HALZYGz&cL|!XR|fh&|d(&H)(uXT+j1Qf)45@Vytxd`8^p z3iFw1quh7Z-JWp_-3YwDlpqX3F&JOWEo>C01u*nqZ{1!!v0z(%MY*Iq_zK(A?{zuR zR!Kd3TH+) zeTlU-$cS~z2*Mx~gYm1APWMK5R71b@R*VFjE5{lk$pFpr$oD5T!+jV-XvjupH@)5> z@WFC|FbKt9{OUx#RvbQRgnp~@uz#7NzRIPNq-Fbe@ZJyEp{#WzNarQbGd(f_>s1hh zLFl4QeaWh_6u{8ey|H?{D!8Yenoi;~-T539U(c-)l{9MwFKqjD9f9>L3Bn*0gTbH9 z!EVey=1NTh82a*7ei6gX{i0HjBR4--pgJIb(=b(zTfl+tag7K9A9_X*2B8>?UnI1Q zue$*l`t2`AnRz#*@m$z@*fLpuJb_wywMuMak;(Mri6aPXP(=_1p^Nr$nSwwwfT2&X zW#w$j_Ec4f*IlgOz~F%O=WXt9&aA3aDbsk4z=x{|!XOlb@pE-dzLhF~q3@KxgvS4x za$EY~uD!;p0{XYJ-}ucrIPPGHIJN_U4QmL(AQXe~&v8Z|cLso=ZzKBZnZ)OlH07En zM(ksMd=)e3T!VIw!|pot zg~*7H))Rz5CqqnKY z8VJH5bkU|>b}Y*Tz|c=nUnnL?UN7GglryizKEHQW-^fs(Oe?qeLxcCoh)rG)gh40< z<7Y0Tmg`#p4EvS z0buCc9du;c*z%2UZ~l>&N%gFXR>SvJy!epHQ+sOJS_D4+k{}F1F&O-93hZ3%{ZWt} zz|db!eQ@%V(s$Q=$7|aa5`tuIhF=Y0RFxo4+OqgHQ|>^Ps|>Ks|W=1AfN|`Zu@D z4m#3K)f&AZy}KxFm80Hq|Jj}zvj3Co<9-Ni@#a6ml%9XR{_7dg>)+3SxSq_wU$=x2 zTZio@?YrgoywHu^WaG+tOmg1-*KCVv>s8a6=xec-P5)8<@6UknVdKAGO1Gf^PD7w? zdqw}ky7BwP8&mtJENWV{gfV=v7iy#8qwi1LvCT{Pl*r;XhK4E@%6%h|@8lB*7t8k+V5T1G9rmlx)eZ>OH= zH9{Z7*uN(TgHQ~{SIf7FMn4BI^aqpztKW;V9V-m*EOQFx^kP|(A)=kA!K}69kS?;R zPqYz)K_~{}7v)de9Si{seU0-LCBwa~XI7qEW&G~O;A-dZ?!v}&e z2*qH0UGezE$`AlU-=$!bTKdxc6#=1168oY})z)Om9a+DTCplDqVIu-NwiARw=%Rhx zv_x4Gz|dFlu{##aF5`FCP&mPAg?D#=PwbVe#wAsvmm^9L_+$q`7=&Ulegn13I_U_2 zp}%F7-ia4Jzhf`pN%g0){SOZ&PInT7K_~{}AE%*Z4xRvpewxfy@7c-PN167+pAt^F z%mqwj_-$xA$|r2#Qi<&2GhGB>5Q@S03XF8lb$I|ozoh)qZJC8;T08SPz=VOh-@onfKNoAzrAyX#1ZbJu^L`wx~k(g+G*)#kd$er4TN~1sO53QJzOxiJ zr9^PjAUs@rULd1U}zS5C)+b zj6ZzZ!NMR5VCaV{JJ#uK-+xBY(D)>^oR(wi)@>a#m$qL&FD-)3aa{)p!XOlb@f#?W zqMsuGhCb&E-N%c|%%?^jR&srl zY>bPx(oQmvOV#lU%QQJNS1R~w0sKAn|MzR7ZbJlN5Q@S0LWr5r3u*vEzjPw&@u{M% z;rd!GH#8sUDkK<*JM0SFd!%n&OBVuP`a%!}p^HWwF7ZVjz|ap%o|C^`7b`!vr^>7` zKR(?FX6J>jf0O=p+9-x%%g1qxn-Hz8Kz&C+{l08 z*pnNY9#vE!8Bu`19>WA-5Q@Rzdh7~+mC94pWDV&g3HmN4tzUMT=z8Q!cre@VOOX!n z@e(&TD-h16zI7XcFO$9zltC#bgHsnK=MGf~0vQHOSfBj44E!buJ#U@xoX9SFOR*a| z-}OG~aqZlKzI5$L8X+iyQcMPy2~5@_Cy;;)10F6dw6FP`8Ts|J&cL{sL-F~2ha9C9 z?HlM08z~}VzCs!$D1%Z=1}85}uBVIr3}hG}GPq`2_Nf@wtL@*@o;-XU#+|6#+qJH# zE6v-T9g)3AV+3VTipk)!jLBq%!>mAt0a;eN%yBW9@x)gdAj1HjCELO(@2*<9^oZh?yq4|z8JVwl8}DNmj=tx+ z4jHo#X@Z~(N--I%N?9M$GG75Xkew0olM@wCIZYxq;M?q$f<_9sme zltC#bgX`n(t?hDm8<1gu_^vZIGK{OX_U1Bf+*uZRYt^AadOC@I+PZ*j^ecV=q$z?j zC|$I*ANrlnhg%y2ycxCJ&Yp8B|6ca1AG_lxg~v%R1lvrCi%WvIFCt?OBux{PK`AEV zXU{J;D}{j!15WD6hOkCt=yLB8b>z5lw_5(dx!X&>54V?v>|#UYAkqv$8I)o&nA~Gq zd+PwVG>~Dy%@L>KHyTq*Co8rwMDIPB8~#&L7TM2Xq+bMOP>RXmUJBdKLe$c5Kf{1Khu`w2 zuiUd=ZaA1KL@D4=Zdb^2zBAI#Ea?pD5ILMQPf!M>nC$RJ7FoMn6UZEE2TGm@b|xU3Hp^8j&MN3j}3Qipls97Frn{1Y{UcQufe<-z?-o)OC}} zC2=JeUND+Fawiplsn%=5lQ7LZ{8RnDDF9uL*GWO+pi>dOaBW-Bs# z^4#F+?pd}TUDCZu@**gM(nb4unfVSQAj5#Yf@wE*&;ldpJ6Kgi<7DyAOQ zaQHziGUgbPH$fScVlsYg`*^h20T~7~i1J(&;Ygi7FkR?Uw&eWI*+fTnVVx-6%Cev6 zi-fTxAA&L{#bo>lvor@k?{Ja?12kOTUKG8eGxNAXd}_SPK5?(VZI83XXWyVJMTd|v z$B}#q%AgdJ@p;$}_D(3nfXrg<$Ddi2+w|n!(@;ET=IUlDpj9*>8Pd4@O(`PBll%zE zpmfogUu!Ke0vQIJH(nY#zHG`h58~ zkYNBT$+J-9%FO9*P33P{)BNvbzZ89Zoph++s@x;=yq!b}A}E8>Me^(9C2;Nu1N6Mf zmnC_&Z?&ncu3f-)$@WUy3$9bu_oU*7{V3{cDX?jsUa zy8oShhkk0|#QJN_Yi?XlO0V0a=7awBz;#k6K^c@T8uMfA?>~VI1I$M1gj_x`87Mk% zhV4jXojt3<_-sjo(%CozJ@mT;siZK1GAPAl{EG4aQFh<)T=kFR|Gi!ZMbeUrl9G%< z$ta3QLyBmTCRs_5m3D}-_udKFdz9>~24!cj>@6eV_k5n~I-l?5_Pnn5?{;qQ>z{7# zKkkp`Ip=l8^9&YIvFCsc14oM#edMnE6VYuD-WzR$YO7vJ2MsXo2 zgHl`u(52gop>BJOe!{R4xnR_@L0Sb1%vwfCyC@)GV!wU-Cb$Dv)xnC;vtwJ#WB&Ypo3}sk^t6NI$~E1WrC(h#$X60W&xGGny#JN|?=sC5$ao7Wpu1J|tyOx@7ON z?sT3vkYT`L+xN_XR;Jklz6ygXpIC0XcMVSs)YBQMl=t}~i#&$nOHu}<#4inB+QZ)j zPXZYRxOBPRSG`2np><4&L37Aty~vd}lbiEo+pSYyx+8Kd#gC*6N|)?iScR40bq5R( zb;xh`Grb*hWc?EX)8PvxpIM9Dy6Y{+_ZrT)B61wXpQH>*ahW(z*`Dnh2V@vvP|Ybt zPm@Y>JLS}PlB0{Oq5I{8>Xx@kg1X1hBWFA%fTRpcahW(~yEv$C1u_hXJBsNYeZ+n5 zbI~h??C6J^HN^zMu6Yl>p)Bt1{;CeffB|d5&`%H6q z7|-jshe{pK2=QiVq;8dHI)ccFlpvBaD8*&s>_4`w1J1U?02ymWT7%Dw^7%U97S&01 zIeQNK+!kZb3TJw~1$|s8i4sgw2Bk~Jyqz`52gop>Z}+&}p?XjA)AC#0)^6P}u=Cf> zm6s%c`%OJ4ltspzObH<=gHl`ub0_!#^)asg~_-Kg8 zYhp7CPQB`$=!1tTl+b^b|JU&=-Q{!3Y1f2X71Z^8m{oR)kF)tUk=(waJb)b5~li%i^e- zdhz<_)UkCePj!&xo<@27ul#@S!HG3a%vs$X=FkN)42aFG9n!pey2Im8y6=tic9m>X z)R>fViTUZ)b?C!s>6CDiGAPB@IB|Z+(#)X`WEk*GHgcJ>k6-qKN0j=Q60`oEASqU+ z#N1ye%8bxwJTfS6NXnoTmxzSqQ*Bx_Ll3KZeBF-$;;GNvb-Kx z$w?!PEbUB61W6f`E*Y~ZSAiIiVL)tDgN(yfHU7OE*NmUmow=;ECRL2ph>^2Mu*MFN zvnY`yWl)OC#C}xT@q8|jVZe#T-J4IWTIqffp{-DULt@O#Qz zk}@d8W$+jxF3U#pc>x&)EFbLUPc0nqQqs#^|BU*}GsCU__^M0FI$~@I9LSinDbXZl zP>RdosXSa(zGwOn$S`2nNMd|lRJDk;_#d?on^*32oTP6J+I?TpC05D=kv~vkNXnoT zmx*J?su6ctAj5#KO4p^bp5J>hy4NiBvAA%_=Buo!J^B0#lad!^5jlqvOHu}+x1pyfbG+bm;*HbZkZPvP6e9TIar90#D=y?nG;&itbPejh8#F3OiDK3N0zv4@q zBVtDhkYT{=+WUtd@QquVdX7w{p7ma*ubXW*@AdYb=gV9rM9!nclaxUzE)%~hrUj89 zAj5zu&$e3|MYm)a(G~BR&NwNkYT`P>!F=2 zf5d-(mUtIkUsoFzR2$uI=eK*q;b4(OL@uNxk(5CxE)!>=MiV|UK!yRBALf@QuxsS{ zOZP>OWldL4^q=VNT`#jR+Z=;_^tOnSOi~7=xJ=wH1)t-BdlDEBPuHL#BzWoeC7$-qJxYG0q>z+BDJ~ONSedFaMnHxES2?2n&lE%?>H6MlIxaV{ zrT)PU?M;q{qc^S$M`sj@DXAo7P>Rdo!(e~y!{TktfeZuQyZR|N_48Fp|efZZmn zifNPZA&$D?4eiU;BI~(?l15SnrMOJ&!x%a`I)JPU0tSaJ3)S|ncfL7NxLDg>xhp_z zp`r8;mi#%62mK^jDJ7kx3`%jCIOjJlxP2DLFhIg!)!9uce%_Bb44-*v8P?0wap&Io z*h6=ak5>&Da~UOrqzp=Nnb^X1oMqY$WEgP%0-uMH%Ml&-wm@04Pi8eon>R-E9$)Yn zx>kkGyO&ckNy?xUmx;aSx;>sNfeZsIjtY12y^!Cwr+!@Ya9sL>Jubo3pYxlJMVXy` zgpBzUC5xmCN^u!{$r0be;^Gd&dxtO}rgFOYpwu6%OhT(gO|Us)VnFeXRL{t7Evr`r zi2RxIj-(7qahaI$zs7Ss3CJ+OYL1Ur_grO9e&8lihjwM5p9!a&)ahce*t6A;W02u~Eoh}<*7u38us-fiN@l4L_+J=62f$HgN>Qc4nlNVnp*(7C9 zip#{_(`IqvF_2*Z{|dG$u7lB+h1c+fscLKNV0FFgq*uPFVm_SX4>INo$_J7%D8*&s zh(YaJ4fmcfAfhSkrQNX97a7klsA|2BSG0(;Ea5wU~f%X~{RtPuG-C6A;GN^zMuCwb<}>j-2Rko1S& zs0}+&!jYnMRR5mb-B4$yZ$IT$?+Rl7R)ok^lzfshC|$C&Z%$r$3}Qiq3dTT{yuf*M!K`lme16C|$C&?P+OJfD8l5IHybO_Q&gL1*R^X zI-xl(^fFY!f4v<~WQ-ho#HgVZl9WN|lBNBj`2A%d!+>_RUoRbg2G+Xr4Bgu<8(k!- zu(~+vfc5UHUyD1DG1pRxNXnoTmx(ijtTUQlfeZuUblL;gCXAV%6IgJyJdzfE>{ztd z2w!iVdA~h+m!ppIk)#YtmyCHg`{6Vo!vKlgt_*j_E&Nf8+0z%|&3pI%^tPO-%{VD? zdeaqT%=MIFk}@d8W#X>o7p}A}Aj5zoRZ)R#&WBkg7xhCV4#u6nqCydouxHfz)v?9` zksBx_BxO*F%f#OEGFO=}kYNCO+_9a$w#(9Gft^S_7@YMJc(ev+57gDgGan zQj#(##bxliCVrIM;1YEL$S~kf&JIO?pWAUpF=ncI8BaP&*XKKwj)+N|91Uhc#@t9L zBPoN@C1alP_IV3r7;uPJy+SUzHdo)jQN%y%ZkErU%!}FVnI?TsSNIUQiBe8d2Bo-6 z%+Sp58-sT(VL)q>INdMFq2RLg!>_Ime)>?$X5DJ4pp#i<;h%}f&6H0hWl)OC#5u{p z!&7g73DgVY)!4<*P$e7zGUrEZK6qkv!UJI^(-9Ux`rXol8 zYM*1#U<@$Y)$!55?x;fDy21qt&k+M3^p(1HN(D(7lrCA?%Hjt@fD8k|@A$jioA-YG zE|e+p#Gy-_tJc_Yzl_#tO#7{5j*Pj3Qb|$nH9I-hMWEe2#$Gl8vM(|9O?@`9x&UZ!|g2XG+ zzua9J9Q39hk$+OENy?xUmxltJl|^(?L>@EOQ3z<#$kzr!NyvGg}`ukX}cW=h|7H2>i7p6iqiqv%sb zy_8y#GAJd+jNiqfSvAxSZll0}O`NUR#Z!?i$*!Uw3h8|8X0o_OH7*^BFgm10Gg*RtH1jJ#c6Hsk>Hr6Pt&V&$4X0 zhVcEAdeVH&Klqxz?&75WWc&!0JFG~`Dq%eL%w!+8<%ZSTT=8wwYH0<-*@^ZGWtY%D z^f5qb_*eeF@8S?^oR|%C#k8k^3&HBRioEf@OXTQV@9x`x%6?tS5})p^EVb~)=zXy*-oblCTo41AA8e_unC zMv^ip#bx5@Od8rf^FW3HX(#5(h0p8G?@wxGPt6N1nebX{N$F(Z53F`qMppJPrHP~r zN^zOkrzAfc)c`UKC@(fJo#CcgHd1FBP$6`jzOE|wjII5d*-=C9HbfqwG?SD;De?Ql z&js}_Zk_}(47e7cmH5eEXhcEX=hr9pXD?}0x2;g)y6F?ftG@z~M=32NWl)OC#98Ru zjCpt;9tK!1P6@o`K9a(n8Y^Kqb$D-sn3%zVQ`4qwdBt&vJVt3HDT7j6CXQNfe_%F1 zh5?5PTj|fUBvedN+;z;0Q#u4j1hgFe|I8&l`awbDaY`FW8I^DS`31O{Baa+fnR zvP?R_+KJ2Qg+j$D?j!RDj;}rHz09%;8S^BigQN^fmyFp#QA-)fFd$IC`kZWm+mpH{P{Y;?(p8 z^9%QAa@=IKf}SxbmS09+*`1R64e~RTA zP*`R0T@{gMDLo`*P>RdMc~M8pCpI9%fW(Y2+6R@cu9KJdXRa5cUnzHFsNlJ+I_=fu z&w+^io6<{C2Bo-c`1h6ct|Zf|K!yRasweCHAHNsPw(t{iC}{l~JW!@Ubx>KcfzlltC#j6Gw~{jK4ns z83wR_Pj;#`Z2jV>^`L)Wp$Vh!?Mv5IJo@e-c=H(g_QZ4Q2uT@~E?L?pZrlq%h5k}@d8Wn%AHIBX8j3}C?Yz^?3` z$R{pOy=zqjh6@=~W3Mq-Zw=u-yo*~BS=v_AF_JPUT{7lec_}y>2m@$Oq#6Wjnf&;9 zeMfGj{Tp@j8+4PuOAVcQy6Nl@*_t{|QU;~COx(FrO#cMuYG8omaJ`!)@2yFj6k*Fh zH>4C448-2f9uw|9;VFvFy}qDMkd#3wE)!cAA6GWKhKB)7x9ypdOUr^M!oGdEStU~L zz`gI)bq#sTU3ae?LB?!Dog^uPQd}mE&*_|lwm^mf9IE;^GGwwvm=4D_xdctrmKqP& zxMg-!Qy2ZtA+jy?7fBhE;xh5J&*Cl;Se+?9z*Os_<=P7@$0Qcly$f!OsQJdtX}_m7KfH+sw% zk`;ZI^c8iQqzp=Nnb>=7v{knPG7JdwJ@Q)3=YdS{x4eU9=jVD>)b!`D-u^>#_{EGM zvb61}GbCkDxfck?&ykXJznYFnEh1aiXbOMcA)+yDTC4_ zW3J>d)dMmNxGZh+WxnyZeU{(*JFL~Q8ZAuyhkNHQSW3S6eHoD*sdFS{P>RdM^>dcu z-dP~S07rX~N(M2hvnS1T&dW`dpSYYeFW;=|zFa;&a14>1sDDVxpcI#h{WeYJuS6ij z06WjCM(w=fzb`8J`E8Ktb6a+wf4u4q&lFGYY4kHD&eVC5GAPAm@Hb%b>*pLUvjaee z0V)CP0_x_2PkMW^iyt^T45q$h^H|CKL_M{ag3j){P!~wbpcI#h`@vTZ9fo(`VL+&u zWQjbJl)bF^_RDYEnltXU7FcA8oo?)knMdbIT&asBWl)OC#B9Zj)f@7F3Q6#>_&Y?Px3wKzhz=i5@Sn^05S|<5nPp|zb|1x zm;PeA?aR;1N7+Y1#?&dQd25H#kuA)f`kbUp%vNmwdp+MxUJKuDgaNkYf*rqB&);TT z1qjEk(WDb|d=TDG#b8NtrnJ1hORszKGSkJ6nYUUNZy*KaSFv250W( z3Hmcy*fldf=w^{D*WxP|u;mgPx`}yGt^SoJrarb-ix9+t!z9Q(Y3OSjEb_sq)c=Oc7OcYTak?ll#$v-U&30`5h9 z^{)y4Jr7CjQ;27YHfFF70~rR$v}@;n$*Vkg_ciCIcFnP#C-X1A+uy&p-@&3DeIC-A zYDZEArTA7(%+<`bsrmyM2CQ;Ui2E+EQ*?TVoacA#3eVP|VIGHDJBGx5_@Rs4hiXq! z2BoU}|cF|q2sfg@HbtEZ+(k1(p zlC_0!mIwyezZkJ-7~CoQXJ}hrM17jVsj|8kw&EU}-tcM+AhJKziKGllahW)FxKs3w z0T~8lWvTnPuRd+Tk$3b`i|Lekqq3j&S=VJVPmRQ=h#Ww5CMknbTqfS(b-bGM3&=3w z`cCHeHck0QKV84<_5GzqdIf{B-A*}e)ApTJX^0$1bs;H(Qd}nXquBN0G9bf%17bWD zMPt`kpJEHA8XxM~-#zldNl&(N&+YRI=Pk`urMOI7!|Rq5!+X*&z_zCB^y<;N z6L~EI!y854^Q(QFVyL5YWq6~y8NHqjrn-@oK`Aa1R~sH)m*DfwFo6CUeWAx2kM&K< zo`{`T_{1@&I9?EC=A7BXv^p4B+96bTk}@d8W#SpojE6tlfeZs=e9|KnX*r$e5|r4l zRQRfG&0Vgzx@pzn%TEe~5jm9VK~e^#OV)Ga+%){&1Po9z6N|OeIrf?Q{8w72xw8Aw zm0UIFUX@zRZR?0dG7QM-*SVvxVp*BCPpb4n^D`soQ<`h*Gq4}uKb&1a z#{7orLsAB%xJ*37lyNkD9gtzbjR6^r6QUW-mY(j%KvH}YNMG>$^ncp~-nzw-ZmwLz?L;@DAr{Y@5- zVL*j$XH7#;<7wGnZpuZ=wIApOeQLk*Qp$~sBOHD9Hi;TeQU;~?8Yj++R(G-Z0T~9U zGNo*Jv+1$Yc=ja?UPGO?-s^%34)OBX-g|vU4jFSY^$kfGlrGuI#kLOT02u~wRcEbS zD-@A(@>{6jkt+>PJMMLQ@^`ZO7vJeZuWVDO5hP_$N^Ir$dLC$!hR@!@fRqI`O4n5B zdmpy@UoRR4v&kR(X6|CCCb^z&6kX4$)JT#tC|$C&OS4rQfD8jz=<0)n^j{x(to31L z54&5u(XPoGGb?sVPi((~F6}gG6iFGB;xaLJB34*?3dk^EV$F@V^PbQ321fR*v`{O} zln87dbn5ESOmdTBMb>jV^({#mlrCA%-DhJ$;nD^H=ik#&7hU6<^mz(CYB_DOr7$86=GvScQ|D)L@BFeyxug<(&smEleL%&O) zF(V>pQDaHUpcI#h*@FqD#*09P0UwQ$_7=1DRv$RUT%5_{p7PB$Nwz1)pRPU-*|2Kas7lYHi(U!Gv$gQ_Vewk(5vQhO={79MQe6I_VMAE=2WWl)OC#Egl~ z>?GWizyKN6m5&_F4H&JiZhK&@&d2qr<6IK=;-5!~k}@d8Wn!LR>+7}sPzC`v zhISfFD(FFLKO(iLVQd}nPlGey~MgtiJJUlv|-*->) zy1i_;YW2?iPp1cq&!yRHdSl_mc>r0@1=KW>GAPAm;)szNp?(g?FrcifwdAX>z3La0 z?&Z&%JKAp+3$P3w7$5riOL_{C3#sWOWl)OC#J2r`DV!U~Fkrn{NBxK1XZmU{OB8h8 z?7zohka+93Rf*A^VOR8CWf3)lqzp=NnYdS}lei3C!^421xv3U+r543iX+~r%=y-2-e%(85P<=D7uoE_OMk!SW%_>E&C=KP6P>SD z#qgz@c_%*DBXTh{i=+%nm&hu4j_`R%7$B$I7sa+K`=G7NB%`ud4`WBi3O{>pWE!Iwp2BRZd|#B>e*=rKdT z_Ek!KPf`Y@OU8UNq;3|-FyOFKTuDp2>*~t}v=Z60UsMm=Z*|;T6zR+CqlZ3MQ%21u zDT7j6CXQ9y_jbagBn;?&zj?2Zz4N(uDHl{|%csuBxu`T_LbSEV)S0_hBr0|L7|*ahW()8SRmQ`!E>L zw9VoG*SExqr|bBS&z)ly>iVh2l=Egf!Y|cu6EfxsY5_?Zl;Scm8_1WH0-tVy0iAQ) z%+F*Eho1QF%8%lElP+z{Anvb{ny=KQDS^n9)IyRnC|$C&b2d7`vmqE@XZY3Y&-1r` zczK2RS*mVc{@~@h?$kilQ{fiQ4T$`WT0~L?rMOJopFK4HbqvTbAgQ|G*viwN7a5fs zEe{x*s$>RS;E=fOaxv4$48247o%)fa3`%jCxI?LrY1ac82FUX|y!>*Pk=5;@r|ON8 zCYnF38Mj1#v80~1Vn(;^DrzxF8I&B<96z-MUO zny3{dWl)OC#PPC*>D@CR!vMB(>&n&{R|m@6;#HKnx29b#p;zbos4D%N?VHdUlV)lq zNg0&lGO;(yk_^VLb3nkcJ&WI~ZuzM)zuHvN`Blz5XS`dSKU6g?_*6Lyvb0;M-$=@! z6qkuDtUqs$A{a9a=p8>gSu%(*zsr`Hp%HrOTXQa{T&8sE)q#(j(P#BqsozP;pcI#h zEo@`8SOAb=z%wr9tJ{petc-YW`e&k9${^C=VJ-wEwB@7VCXQ?hW?n^A^3-Mv^ip#bsjKmKi)w0-lsu<@0j*oHt^lX zq4aw)VQY$q1~TS8Y7^)l_nTP`!23#q>6ZSrIxOz`NM;hOCMnNYQPbGRu?Zn%a zm&K7W4^Uf3%AgdNi7V-S6HM@ZT^KNawzKio>BGvIl;6g?@+3YT`4lI;R-zSb8t&br?rxPuEOaeY6L5~g$+~NNy?xU zmx*~uw%xVMfD8kyb)7#}eb+VqTKw28ezn3#mr;CMs65t4abZB$^9Z$rqzp=Nnb@|) z!tcOqI~btQYOs9ZCHGhPZ`-6eF6BM&JVs|z5jelv-Yo|`4;!U+l9WLyE)(Y@JhX=J zoCF3)`tR>5d%j`oeZyqGgXORdMT#dzRYfm7< z0NX2WZ?5O&C-;xEXue9+s!Kf_r~1$(RkQ0=W*s7rQ@csZpcI#h{kB`(fnXrR0L53Q zCF45BJtgC}J(SOs%pU&`60mdC&ZKW^KB0fpWPdt7LJ2w`Y^ye(D`gr z(O6rGHiuUWwqlVJJM-$@aLb{nm^$=c$|SXiqzp=NnYcH4Iq@5O?+XUJa}sbYrF_;A z*wAJAaNN0Zmw1i&RVm{~Iy*SdBTM@iwU?v}N^zO^Jxji#RRS^$s2}>(D3$o}g5Leg z{gLUGUoVhhz>oJ9lbRdMuPVly9)9l?2IxJZ?cO|gNK|z3l{j~_q4zdU zgD8<)gJ*}DpYV8DLA`48eAxrup#C+0iPP8C@R>{MZD zusYk8zsQZqv(!P7GALa#W(&=I@GcGv@HqXkC|@;6^H*cz$@N}yI&v-DJ5pXd(T_fq zQAFh5)FF~GD8*&sE{@QY34ALD1|&9opvf|`Y-X%=?!D$Wu}Zk6ceHBNld7q{sq2V5 zM;#_9gHl{3w(V)pbKO9O0j~Ukkz*U3*o?BNJOA8t`YAB1#`SxxWy{o>_2_KiALNrU`>hJe~h%Kx&w6`3{ zFd*_$Xc*U%?3nNUE!*gHbWJZjy;FTB>GrV{hh^yd_s_5ik}@b=va~r@T&)5!3<%mu zx92i7v3!&6W|Ibut`DoqDWbc-wJzHmQZ$2%`8hU8QU;|#EM= z^ZP0ce7AP03Q~%1A9{Xd*S>EZY5}^4Y>E9MDT7j6Ccel1vx_YX$S~j$YdLFMO$~_;A&p)$bIzpP8(cB zA3S}D&5@KrDJ~PU6%iLJ;G4oQz%c8=7vr01_Um3{E^cEElROyEFRkj$YSkq&fPOdS z750au3`%jCIQOJV*ueKVVL(f9qxQiO!D~-kS9OjjbG6c$x9DDcYD=AUc!$2PV290< zltC#j6W?PuuUE7+{GAw<6wd`s1Oy3?`ASr`V zTqcgszTr>r0~rQbr!!tpShenRmWcI?X<4w0n%2SPmkUU`h5Iz7q1jA>%<4XiQTD)?1VihDTC4_OZ(Qh@(Ccr zfH0+8%S%VZK1JOh;Aqskt-{t)_T))Rv-nfhaP+UjJ7bn4Wl*|gX}7w**$!kFKrg%1 zVDovS?5**oSwj|ufmiBwpA8rBcywm`JNh>ST`((>GAPAmVjrem_)GxEFo2IKatrr^ z1h(;}Yw>?dmDIByrVYPk{Nv)?jiFD_yJFTPWl%~i?Z4jFRSJ?f2Jh>_g0gRHH{a|$ zka3Ckd>VJ*i=3kNy0O()c%8lRhW%YCAW;RQ(@NStpee<&9KsK_ixnth{GXeXT z_Xpno3m z=JT)of8VC8`uh)Q6DNemjjr&b76u$RYr~!YkhW*VsVbJQi4TTXRr{y&gba(m3{a~@ z2JVUZl9WLyvD)#6*(0~l!A%_ojQUrJ*fPexlN8VjJ^Ainsiu7C+c1BsT3WW&A|iWX zek5g3xgoC zhM|MofD8k4YqA5zACBmqs50+~?Ach_7U*P>6RmEb{U>@kBKu$gBxO*#L@r3#bOFdP zAd)sXN)k&5bCQ;SR>o5O<>3n7)LnxWVW-}!d_rVjERduON^zOE%o56!hoA6+0bAq8 zS0?0|Of}Bf4nG>)nHILab$-=-p+6EzFbAQWrNzo1ELqiOBv~Fi9Dd;xcg)sjaOBP9ea6lMn6Ql_=f4W%WWv zTy1oC@77CQktzdy#(pX3&k#8P3n3|kQd}mc_{WWJ><2OoXx8Q)x{al*Uo2CIPf<;6 z8kl7ATezAkl}nd4kH~>oC`lQV;xchU$ft4%ew+vfh|qCc?G~VCt`Ny?xUmx+mXTj9s>l}Z>86(y*=_k7u@s!gLug8iaGODj(t9k+L~ z304S1ZwiNC;Ur~Hip#`hR^f6p_%J&R*mKw5OmI4f&6vb`Mz#E1T4p}Y=bfXj(8Tnd zqCv(SioGEzgHl{39%g5{V)zt}83ZV#*hKAATcu{kH_G?pK=VL@YU@KcO5ka>*a;Iv z4#Ofy%AgdNiPP^5kK_3Yv?FcNIqzp=NnV58D z7t4UJxxs*QTHE#pw;VW>;LxFxxvAmIh6EL5sdRZ~mWO%hu__XaAt{4WTqZsw;+>QT zZ;-+OsjXHn4K^o5+pT*xo(%R_FkV5EdZeki1be&63K?@07E4kFrMOI-es5BofzzNc zz&~DYIOXaoS*yrT$JaA(q&5w+is#7|Z|vT?oDGrRVsRv8P>RdMw(UBb2|u;~1HzxL zeOO!3?|fit{?<5O?9JUSmo5Z9Ka#)y6dU?5Pc#-!QU;~COdP8y%6jl~voOG)DKT`5 zdFt((a+?nxdLetfP@`@2W5&jyD}D1t$e3fW1d=i+#bsjKK2Ug>5y&v$r!!Oh`||qQ zXW|xBX_vZx_hl7Y-;ucZZF1iL`Uq+)mPk?trMOH?UhHbK;sG)YP^MR3wRWS`dBfaY ztHQbEMQCI64w?=c+j?3ips$F;VM!!qP>RdMO(cuSduxCU10t_T_djQDXZxL!C(}`{ z{{CnQo%Aj<~(zdZr>j5$hNT6?38*Y30r>#zy+ga~do|IwV zo!2F&;_29y#~^Y7mO@enrMOINVY}bN(E}L<7|uDHT)#DPF!I**ccB40)@rgg1opB> zpJ3bF{}YiDu~d>WD8*&sSM{9PzyQcFU|VhZi>9{s7`>dodCYP>Omjujha;NLOvN;V z(4%A$mPS$rrMOI-liZ$;f-k4QfD?TxEEQW8yFRT|UMbD(wcaM1e!#|O^G^y(r8hF> zWGtPe3`%jCI0M|N7@`Jb7+}k%bqtF=8RBTWEnhz(k;hHrUiRTe)<5TaOr;Px1PZ)1VU(X2sHx6_n$;d->9uD$1icS#oNf7ycm*{d`xi=+%nahce{lFrh= zD>@kP#bBF^-%D@L7lP9=mfn{`Z>`#pyJ|@HozV9?=&SeX*gKLkD8*&s)%(f0^>ILk z0Y{{TI4$>AhW-}ly~vf!H@$D0`aLH8!VAwGexX}f2KJt$3`%jCI5W_mmW5-60lgBV zrhRiS7`N}pZYgdqVR&&rPSbID%*;r{ zt;wBZw_(@K)AqWK_X9URL(fUxVL2paP>RdMImv{zF`Vv(0nNYsfs?ltJl|rCp8fhmQrqfP@!oWp0eitbg*VE=-hVb{XyVyB;S_ zW_LFm=?Wt92P~hY3`%jCIQRUKP7AMYU_kV78A|mL!Pcgv#Pro%D`mL_s|5o>{doKK zRK7su9ISw(3`%jC_*E5%Xs-Y=4EXZ1!CF8?P3q1z!I2)Z+}XL)iA_aU)~2*_o;ZTY zxmY1d8IL+7kNGze1>1$VzwR$8SbNyjAh$C6mofSRd>&Rr zQU;|Ir+3TMX`^Q*wUze$%q-=g)_YynZyB^3v_!_7k9{O5gVH5S z+x`-D3dk^kV^Nc@UxeksI>(15zO16^x->M+55@1-+ZFIIN$x~6n`OBLQ)2$xJ;aH=gca>eHaXw z@SdN2#5I?7V$IqnBg^4{N{T=;xJ(=+W!|2Le|!oC1b?A* z+;-j|*VwsG?uB`8@1onTnSwiv&-zX&7a&XfBUVOI2Bk}ub{&5uyfq60wy%zDT-g7~ z)+Wl)OC#C4VCD_wZ3f&uxbo0wlbU*wd(5iwkASRhsV z=;ZbLqhn#4hUC$?-ZJb9Nf|u+gUiGn6dB$e_+SeR2u~Il=!m7Yqu~-NXFKiGWt^8Y z9$|Ny)>^?<92s*t_LZazN(p%!|In?5?FwB=4}}5~N-ZlRnw-?$zG&5+d~Q)#-g5n` z#~q353v<7@91;5yR`DM;M#a`(>*z(Ove*UeqM;!+N<+c_qj~yu*$54V#t{D(tFDQO zp|OOJkfE8bppKr2uBnEg-d!P$+omQrH1&m~Z{F23HPE_aa>!gugIieE(9~GhQ11@+ zJ{fM|W8go{yLU{$|9e}{*aZA*&tA=&8o~z-2<#UT6BQ8A5EB;IfAglMfQZ)Ko0|J> zh-ho=(;B7u>+{jk-(JCM3^;QB61V;PWaq@H7W17rzkh5zy}xnF`e^yy3(^n%AS3>a z8^2$rvO6jk$S~kYTFGQc(4X+(0e{vDT+i3=1=dTZ%Gl8UOpwh(lv4KhPPS+83tGxx`eHjIF~p{ao#I?>E7?3C$LQVec_1}LgU{M`74%BQU;~C zeD?3z$2F0i`+y7sXqWkgDA0EIZrhkD`g5+xj90jV-dJSK*euV!qljFA6_AucDJ~P| zo(uC;CxHwDHd}9Vdfg(~X@2f^)lRDu!ap1~(dloOtlVQX#e>L|SRqLnl;SdRHdGd0 zpbcagpvU^uQbG3SYo#lllZO;H?Ku2c==n9H1GAIS+a?kD8&*V82Bk~JJiRFg&cwk0 zzC6_uje8D7S2-?pW!|fyvk$Pf&N4hup?daFJR*O`K9ZC{DJ~O_iwW$KDg-hNVBnJb z#2V=l@3RdM8KA6l6P)XX0ZhL0bmttl z_P0K7+TvOurNH81_Ut3Q$M6wL9rTD%jg^p;K`Aa1X9lvo`58cl0TJh3*NlydPFM|G z;s{(h=8W-{Fd7Q=4+z~nj6UR1gO!q$K`Aa9{r#)rzNelFWe^aSx>6}dLS)4ef9Ca5 zTWPFzX`I~0r7ObO{9<J;AuVfiKGllahbSllwFf%31k?smp{ZtLvmq!IPLtjPE+9c=R1|-5f1kE zWd!umR|y-i&m`rXzh&ag;Ct;&_>u$+kZwJAT6ScOfEUZM%CeN0s^5}Y)RVXWl)Z~D zsX*5A59|v`8I#0R5=u7`-+ zhLKs~hZBR)!DmlvPdLMnZq6w=Y zDT7j6CiWygbNpw33(VQo9*{%(zJX(x%atqSs4Ybi$QktXc_Yj$S~k`{Uef>B6Nt#I*msgLC|xq<9(Lh+ zAj5#~*+q|8Du3*{AHc%eDE~U7C@N(>H#__-_2OFemC-h=ilhumahbT5lnQy%Nwmfc6}#PR2kY%*N%i|CEco~y(__keh|GKt0pOfQd}mE7%8>|(m;j*q09T; zF;e1qCu7Z-iyw=HoTRATHh4W@|MlxdZDeV8U^OIVP>RdMLzp>F-pT+O1~A;{Np7v) za%eOCHa+JBMSD%AgdNiR-F@TmC~ph5^-i9r-iwZadX( z^ir>9mA@BP{#7+QpyX1Oy!HoV%w1R=Ng0$bS=#ZRk3;|&25df1>{=pBpXYyV}o;wRQXQU;~COdKVznKA?d83yFiFgiaED>^)#JG*bpte1}KRq@6l z@nR7dVP5n_uO93NNg0&lGBNuoCcHrh$S|Pr>FuY#!d~9=vk)rZ`b#dUcc5v~bicCx z{gaNo$a?O@8cE8a6qku3#vIGSBOt>7Au+`UUG}|s{AUesPkm>uVT{Zb=m}ll+O8yk z-UaT%nn=o^6qkuJgG>u+I5z|XRvYNEWbE=+om^-#=22_^7B20{z3eP^o$(oe^aHQ` zSTji(l;SdRe6HMo^&XI6z(}fBT;u&h?F~kDx;&|4KV+f@%BK=7jM=#P(7E0Ltc9cu zN^zNZ8th_#ZGK`Aa1 zdr!MyX?R5k1KfY5V!PMtFh+#yZx#F*HxQ!pAf+=+U;Os9r{9q+YzS*3DT7j6mjC;F zdzq(@6_8|W4Ny?yf$(Y+jE8&=7 zKx{k1GSdfRcFNxzej3kM<jNdYp%a7>oXneYhB;R?dMXDC8KxBMzKzkGAPAm;`q$JdIBCLVSs`i(^ZL! zYaP$sldw^`FPtp0^~yGhW|4QR$AfPpOM49KA}NDXTqcfH>jI~502u~wAFT~%_;q1h z(qg^*_9H_V<@Ks^n16KQd}mE7#|;ehR;^OfS6?*#k<`YF8fmN zo-uuH$Za0q&rB!URM}-Uiaz5yf&C;YgHl{3epPH2F2c_;!T?=ryT`(P7Oa?GRm}9_ zk!Lc_>07m2#P|m&cRi7%J&E;@ltC#j6Mx95cwC@OHu}> zT8JCV?R#28MdT^0kE9GrahaGW3HhxLpO%LK?5%Zdd8OW3(z53oZ9DeO&?gRhKIeAc zIsNkB1R_sk{Ul{jx@0|DHkT9u83u$L@wt~|*8X8I%kz%-0qeJc{g3a5cfP$B?teEN zk!P?0k}@d8Wnxcq`(57$Aj5#9u1n?fUkm~+Ui&e;Y-sS5>pJ7vQPmQq%0mL^-Go_e zkfaPsahbTf=~>zG0LU;P=wqa5OW^)wVLzI#v3uEjjWViyavMX-5B}i#jg0v>Hbhbe zrMOHybD{C95zf+2Bo-6 z>^*sA**Sp>1DG@#L)kYE4h;2;I;tOkb0vAmcyaFRuk%GztIvr12OA+NgHl{3_9Pp_ z7~u!5V89_xtEwBJZcGJw_s+iAyfxgAcAu$ei;({V&+Z{ap2tQ>%AgdNi6@rI2G}11 z83t_Ff3S#!UQNrCgKge?I4q4>@B-`E778VBXc67E7qBssGAPAm;<~D7Q_(UY!vM*+ ziLWcR2i@|yFvbyRAb!4b&6Rw=^EVr6Om<5mV_w9@Ny?xUmx*K5wMOb)Aj1H`*>n1u z&etMkyt_^^d9IESOW5uBIr&1P~m3`%jC_*L;4pL`F-3g+U3$1y`(+-j zPj2#|2-no(!l}0q*_w8mqzp<48GogX&L?>_c%=;nuw!zH;#cKvs2Ex7Gqa-@tm7^I z(q4FWV!`t&dPnpH?aaT*#9MX$_Db9I^8fcIai*8!H?7Ww-8=#Y4l546R$8_pXct>4 zEp6l^zw!#EuEuW(ckgTZRvz7s4BUox_Fwt`ex;2VIQWav_`u`LJ~x7a!+>ipf|JU_ zj}LwK!frk3d-rpX&SJRD>2=2pxf9qC*_QVAzsmn>;Isc2`0QT;m$~=>-qC;+?x!AZ zZ=2SUYdF63&i-5V57yj$lCyfJ#-89rEA(x~m$Y;L%K!flT0QM>q;Ii`ZD+{+CL;^P>OGB#2MMiwq!V00|SiL^98dg zonn1tn)^y+$5Z;4xD>YsjeXZ&)ahW(HTXRUEAILBuw7}yTo8r-q ztw-7_9N26}^&npj{*>gHl{3W=u@XZ|VaX1_;Pqu$Zv;V!mBRY-b6b zWMpL6-Nd}QFQS{cGMteyJJLR5Aa@TOl;ScmOSFA&1)O7t0lYS^{p0WFzJ9pD@}b$F zcY5c}`K!A))T`phZuBCu6YX=7GAPAm;v8YyTk38g!+_u}>a`obNu2afoVNPm33n~q*pI@4N`ltC#j6Xytg6n<|Y!+^CR$phC9uBzto>RWEE!BXa)xpI6! z!pxxjQ+gLNW*1s3k}@b=vb1TIgAXc$_u4=JpYWMPF|N!aW*)&Ft%eKQ7vsEMS^3N; zNfoo*Lu6N4Ymzc3#bx3g;pgII8z94g^AbUS-rt>FRhh)hE_gD>k8{tKgXiyv$}9dU zIEKh>v@b}?pcI#h<7M8fC+R?j0rh$IBlh%X%=CW5r)o5<(0P*7yN}b^=Nok);}9ad z)7p@fK`Aa1cX3QBzvTiM2CTVxJgc?ubM}a;)%xmFS;uZJ-_u!tL@H8C<`Md)l?SaY zNg0&lGO_nG8~g;HA%_9FvaIpjpY5G8I{3BZ#KYuixm?%8Anq-%ce$vwAY=BVeMwRV zrAwA}!hzW5K!yRTX5D<;wOs?o)w>Uz7aeq%2%mk-dz1B%iH#VB$X>LsNXnoTmx)jA zAKW@s4P+S5-lP6VVlreGWlPtAG4LnSGoJFTGvGLGB+ax<50SlT?MTX?bji|QcxDXW z%zy#1%|+X6zufsbTgOGU^`4^o%SxO6u-H8m`?2DPstk( zYu`K@y@B`JeaTqgG0otu*m0~rQvu$g4AmuN}VdVciY`0;hijJ4IC zuSp4ES^TjJy{ZhRbt5T*Qd}m^NkVx>;8V>oz^Uiq7_XnzC*8iZ^v9X$nN6FRsIn(? z#-+K+JCLOvLhDXa2Bo-6?6-Bk7Q=T!U_f{BY-(Ws+Z?7G*FU><|J?6=YlISF|kE*h%To+zzV5WvF?Fd?5k}@d8Wn$Zw z6nFujkpwQyI*YN_=_0V|L(N74F|ltC#j6I3K?s{FXL=qzp=NnRrk21OGetj2aB+ z{nA+BWE;F9e)!y>ZJ8Fj-H(n#NNExAwL<6Cew$Rj2p`u7q zibRr<5>fWZUdi5jWbZ9|hm7ozo&9}2&(-;UF2D1BU%%V=-F~<0`s3<${c(Rh&pEF% zpXVselt_vGiJglWLGZ;XgHl9Bx3CwBIq-Qf4ETN0iO!&3F(jae+pkt4Pep%EmNVUu zlJ6_7Ev%R^M-u#S%AgdH(R=Kx>123=2nION^<3m1&zmi>wLPI*>FoIJO6%ME74|)L zG3F;QaumTIrwmFF8GUjU(9a5=zQcft_iPn1U(&4h5WoCBlyxCma)i5j(oK zVB~1R7o0LEUAMGH8tUK{1_NHwHSxYk$~Bx+sFfk*9nnyHe*`;V&OhUjv)l# zltC#XqvxK+0zrR(3wf?l1_+i2n=TxA%G9)+U$yeZ-+pef=Wy|^j#`P* zXzX{M;s}8_Wl)O9=oMYuP$rzC1_PSO9$s_hYr-=(SZRj8PUX8~Kk#fO=k7Op?S=j1 zXgnbZrwmHb^^DwtT2rv0-E4WER2`iR_veor0%Q)8<4m112epWj4IU#G`Y7bYF z@r!2|K9LZDn-BaiKJY($WMteSz{3hT#p*ckhvND&s+*2j-C8_Zl}61 z%RPkwaXaK{VDth5^MV^v_&aqs(~sCct$s^X=iC6sf$%C+zGtT_gBoaVmJYDy>0T15QS3)rR$dV zwO&EEV}Sv&n?qQ+cfKPyNQ;nhVhZPZ~8?k}>N!lMstj2Bqu9EGK^@4ahJ+Q6T?5 z<*?SA0`3r^N0-5p(+c?9`zVf+o>`kg{LOf0xlp->E=(;1)w*X`qpj%$>r)h)zbis)- z$%bah;Ev}zGKIJ;{W%||oyUwhhme3%2BnCM?!y9QuPg!?27K;SnWJRp*I*??S8fvb zpZq*|^w@~^3sqM3v)I3XluJm&DT7i(MlU%oMo$F+83xR!=;-hcJ&32g5~|C%qRd%X zRu@H1|6aU>r6UtF<~%|YP8pOUGWsx{n|$&nkYRu?<%9HGm)hNjQ}k@>6VK!yQZhE*kfJVuqj*mj+M)ZjmH@ceVvDyyQn z+x@hS7`cFuf>Q>ih>V_=1>S1<3S=1Yu_AGi~1GcuFHVwP_@r@Y!pc$t>|KVuEi&m-!q?-+^r4KrfKTuR8mDT7i(Mz?MI zQ-#}r3^YC2CJzu9q%GE+b^(ltC#Xqx-N6&%VOj zV=&;&)Tia@^mDuYiOLt3Ym^f1&K72PzH?xH%wCp=k;@5LIAu_Z$mpJ=eDr+`kYT`L ziKX~-);&7G2wD9HNnwqun+~|N>h|y~?XAovl{HbfYp?S~zXba8P7mWvaFmfd!2d4~55gFZYi)#IWQ&3?*7z=m0hR;lf z*MoB!4`rf~MF;oVMldq7{@&xkfsv~Sxj1D|ipb~|cH(NnCm_QB9X%7DsOyhZ2MRy@ zx%F(I+oxLjNmW~^mqgJgWsLltkcU$SrHG8)LNHSf4+b&}AhXC(xaL(T9UE03z9oJ? zY@FQx$}S)#i}pmEI!3N0AIz@H!iIUWEhaOD86$xv6l41kub>h%H-}1rrM^0 zvwsfB?iX~%$TfrloH8gyWOS;9D*ePyAj1IZ6VEr8U)m_pXf^(G|2dsIW`Rmn+xP7u zUEt_5#K^UTLYy)vMP&5$Sl=NdIA$1d>)r2^+uWxIg_2+GWOQSiDKOsM^Ta%w%{96i z`|5oip$Ml8N)Z{|!fHMV!B1Vm0BQ%8rdYpLeQ|D?s7H)GY~u4l+bfzI-tRj{fxSsp zPbkJIgHl9BkI(Nl>WqO51FVC09(N%Yr{4U@buK@A&#&LSi8q&jsJ;kXi)6tp?FK># zP8pOUGP)-T`BecQQ^J7S8^;~2>^S-2`@Zp;uhvp9d6cw;b5mC$py^OE2hvJ+>hAEpD zJ}iY-bTGg*smxxO(uZf@(yq>nG~(6qxr($LQIa(xUd*l-xtUOoQwF7oj2@pkXj9vP z39w-73D%AgdH(dn4unL=>t6b!i6 zS#PcrmPr+F6ST2bcAGPu^B#(pffx7v4bNjIwzLu|amt_+kv5{{z}d&+Lh?^fyWYOhC^wKU$9?EDcB(}ip$exAN)Z|TJ*%nKz{iv@Kqa|jTcfnx z;fo`Qmh4{79M_aojtsbjx|@Z?#$lFrJK;M{8I&UO^}mlb%YH-S+6p_*2^V%Ot3LwLPQ)VnIZ`s2;r+cQ@W*W+a z{v_X*yf;e-bmup{BFm)b}*m2xfr#x-T%10~Je zv%YPqcM=^Jv5#`P2z5ARP>RUA{;ucFadqT{Sr9PODV@B)aD9*eRG93nQa3UB+?y|S z+{N0$5z((O>-i_49;XaS5g9#Vv|k8i0b_;%vHLRUtG@^GV?AL(|0}f3vZId|@lDXZl)~v}-VB`3Md$jXy4+_n^;<0Cd zy@V#5GAKo4^co{}bp^iW1_K_p50yu{x871;=4aCEFV>SVXJ6(g{%5pX3p`oGT&SL82J~W1*Z&3 z5g9#JWxd(}kIyh5nz5cSqDym%`5@DoVz=XmSh!xT-FEZ5pY9nUijn&XtvF>+ipb~| zCb@$PzB&K{TIe3_PGPW`-s(vg{=0|3yk&C4%yzuushIvU_WF5%(1ue6rHBmnp2)M5 zWn&sf_%H(moZ*h(a=UgQ@SCvh5rKD!U5+xlpALUpsSFJXz}|5kB((pd{J$RMwEfq^ zjJE$g%-E#aJp%>~D+;BYt_Y?IvYlJJN8OWB7kVYVSNVa-=LqKlpEb-PA0qttNB-X* zW}pK{H*kxYGw?fwFu=hjB^9Z36rwmHhZD#y@ zQtyBa1FWC1AISU_LVZd2H8G%akB(D}$nU};SHkOC8w@b=D4`3d3`!9hJz#mOK9K@4 z3<&WG9yr#%s&;fw50{GhuIcV4{_-3SHhxs1^?Nb$7~v;Q8I+Kl_SC%`!Ik?b-F+NeW2y= zOiFLbxqZiZ#>~Gp&;|@pi2cKip5KICoH8gyWOPSnXTDnu$S@$`xCmY9=^`ESQ1&I; z0|943TNve?-~8czZA|!q8S^Be52p-D5gC1$!Tr7nju{5<<;?~NmB*e76Y1a6KNqWX zgW~MCk|E;;qih50`#@8KUpQq@ipc0`U_^z{7a+rc9+$C`6GBWAYJLU+`Wtz)MEcDX z!*-}GJuALl7=%4KXWEkKnD*N4;Qm|Jg`I&rln#@uB9 zvz})OgE(bSipb~|_J#5?JYv8AwtZ@k?{D0GnVs$s*{$aiX*%oWp@v#dfd$9c4UtKB3pK(?A;QKJWm+L zDTC5=OS{EP;69LHfNFJGfNuC!XG>{i!Hj(`zf@>_rR05HndF!I$_gX@A&lUZK`A1m zzpC#$g?NDs1CG!1=GPyIT^!00)Ddd+tsc9+o1nC{L!7sI?=VJQAdKRaK`A1mzbZ1D zAbb}c2E0r7;}9O>o<$};+Q{2S&Q&;~LLs0w`- zH00H|w#C7k1uqU7^gx@%2P>RUt?|JfsI|qRUtImv@tX4ilW1M&{a$K45K*QIO? zv2&iRliOTwy4jM0KZ}{k;~r*duMwtj%AgdH(OVXEHbwBp0Sx$bV7uhGw(YwI(l5xi zCn{{0yVzjN$Z+J78vk7%jQox`jZ+4t>(=u{mQ?sy6b4*gp0pESzEDk!_&_n`CG;fo z@}m6OkDFWqHq_YJr)I<%oH8gyWb`I?*UJV~Aj1GVWhn(JRoM^18i&?QUQY!(_gs7~ z)+M#LW77B*X3X!2vp8i?x^B$YZB6iI2@Lp5#k(co#tU1?s3x;-v)7KBJ8E~AT)7zZ zJL_RHMm8tT;gmrsBBM7;y!b@mTYWI#GjD9PT#@ed*{}%C-*+lm9!E+FWWQcuvC}++ zo%d=%oX07HQbb1gB)LwNUOsg`3;k!y-^Ld69du!?g z@efWJlp-?vd;V}>2)>sB10?R6G#T3N&wcr1GGdN>>8r0P|E!n0P|zy5*R>S37XyUq-FOcWtqed$;}<9%V}FLEcO<2C&r(|$X3KfoH8gy zWb~HBje#RPK!yPdX*bU~#}9L>iYicSFr7BrP4gwNN>P)KR(b_{^YbHd38xH75gDDf z6gj^FzJEgr1NbGV6~kkkW#5K=b#*Kqc(5j};FLirBBNW_O>+YL z8W9W_R_wUQF!O3Ohcf=^%wo&gy)-=n43eHJ9_8mtF=MtNuHuwIDI%j=7;jL3xxT$$gwRP^goGv9XDa(-LVevE8OT*E1YQbb0#urSj{ra*=PHUg7* zmyK23&&nu?@}{mWy*V|Tb!~Ps&XV^z_9$sbd`FA(o#?+ZdS=iaCGHPo7*KMQ@2yHw zCfD2625$enNQRz?t78!y!zFq9xNc*{Y)>@9DTC5=>)9=eY8uEe;KQ-j$h%`U!{N#a z&RkN#qT1>5-*+%5=ua%#VBdXrAil>bgHl9Be^tbC?*SmgfT9{=UfURBq-eHJq9TvD z;QQ|$l+W3T8JP(I*!Ap4G{-4}Qbb12dIJxfhF@%f0cN!Fqs}S)LE7~Cd-B*yKQ(X{ zTrIdd5)t*rIS#Y5oro4VWl)O9=ZP3N5`dO(kbE#UE1+GW;Z1iJY3|) z$e)Q;IAu_Z$ml+74@nkIn1TU`d)J!8_UH_{B%j~n7-S>2z{k38;|}W-J&*6$FRHl` zKjM@@DI%l$u#9b+DuE0G6qG(@hejAYkQ_TY9JyR?FKhZv;%j(I<@8EC_N{w2;wPLk zC`DxSVTKud;6@PrSX#y6nkQzNBw!yx&_-_6BBYyA!Q(%Aj=J z(mu@00l)ta0|>DuQBw@G%`EaUzqP}E&2hfEk(Ls+!YX^n-5Mi%5N&YEpcIjB{Jm#T zWUK)vOu>LVt3ERenjajTIz?Z3Ztd|Ac*Qg;LP2?6m#<6$BYP5Uamt_+kvu+RqiG^@zU;$one*=!F|sew5vL4F5g9#VJlQG+zy1jWEc*hy3Q~=t^c&NQ zcjZg&39gWRXcD)|`cZG=07mvBI^mQ->AErZ8a!HJgx|7&0hhCCesFDl_L0VDU&J5ce3W$puPoKX zO71l0b?oHc0ODtyGAKo4bW+LS4S8B1!vGPZOP{S~_PjkXaZBL}owck=^q=Sz*P}y6 z>{3r-#{8A&icRYqMhtnQfeZtDw&b1Rt6)3yh`11cfCYT*wP<-ylew(fHJZ{e zj2uXG!zqJOM3(-$4^uoRTLfenP$A=1_xN>&P<;6O<(y{~nRn_k4rf@iW_k!DVDCx= z5#4dhpcIkOdw`|)rmcVs17=1}UU65Py|k#2CUl~tVA}jbY-Gb3k>i8h+p$NvayU^MFz8;BZ$5@Wl)O9=oa?E;*1cGVSr)@&EiFQ z!5mWdG0yf^=Qmy5t-hh6H_Y!CPn;n}jwJfwltC#XqxU^c`y_^e3$PEz-GHcEa!Yvk@-wXvH=^a7pfp$&WE z+?Oz8jw1%*ltC#XqhH0zvU&@zt6;!6%H32>S)YzR-L@QURaV_xDQ0?I05S|%R<$;~k>nj}=A0zKe1c!nW2!3t&otY+%*!{hFIgoJ zgK^5B6p_)h-oxBk@F)obb}UDynO-_5e@(_mxy_q&)Ga}OVqv~7`b*>$?73$mF$AX! zO4qID_@2Q$Aj5#7LW?QhIHyDLm&W`@(kgeo5AU~Ccq{lWnkmibluYab?j~_kYT{J zT@wlFien0EQ8b#bWG*^;aZ6VzlAC;&(yB9O*i_(?-1L8jmrG@HWrzV>}`GS$Z5u|4X?!tBBMu)yj)876bm(Hq;0!=?8QPS%P8&hKLtq?*nL zN)Z|TJ#+dH;ByQZz#Ompk^foF3CG-{8}=DmDQ@&wX}Q-B=$IrTiG8swkC=p02BnCM zo_ktJ?QH-u46qgnj}epFe0iHirMZoi&zTbsL=L6zP{K!yRM53)OgT6PVrU6-;+vdYc8`Ne^#Gf;B#Hc_b?BNq@;aLS-`-FiNe%mZZ@ z5c>6I*VIkDYwS#qv}YxLd`_2)rgcsWi0oY`!`{;`B&On&LFu~n{6;v49mp_1l#<|? zoUvcnv#j@ZUfs!{Dh1_V(KDY9ezlggz>K+wn1)jZrR$bm6`_GfiQ@_{F|58O|w3LB-yP znrkLH7dC!xdS!K(wO1s=1|ydc({ak6bluY4Qp^XRd>4U$A4SS~jPETNPit|0zsN&J zTy%IX*U6zYSGT3079*DuGjPhF6p_*W_TY5|D8qp9XR|Ltr}th{Qr4@`U25#V#9*^E zMnp)};@rCtj9f;{#3_SPL`MH64aM(ycuyY&D5q8kM$CL2e^3~{5HNB!fR-|aoExkk z^s;>FEk-UUX5o}UDI%j!P(*dz;jan?@VjpML-)EtW8mGLwQZMLUA9peDQ$IlK$Ukn z5PN^Sf|!j{2BnCM&c@60e-7tL!GPzN{j$wV+6P0Nb@*?}3VA>CeV!~=!lKXrMphFu z=1O7?P8pOU^4`DO_Oy_Gv;D zF&C!{N)Z{|!V1b>&jJ|+#NVYA^Lp=E=p|_ICdk0#wyA-nn@IL_ufcZr!Zz|R}=Gb%Aj=J zdY*ISg6}540GErbwLhzNqz8>^8dT)JKd|-e09hp{KN71y0>M-gU=0lL^m{eJr>$HEczL{o@xKH5S}e>0l7~ zOtCjso$lfef2Ae?iV6?W+>f6yV{Rmt;*>!tB7@&E^1@Ng`O*F0g(Dd7W=SAnsdF^z zUF`xZtEKe);azivX)B*@jnXb+|8iRsvFsn^|MkLADeeD#JFb)#;e%uAj{qK4tQpkI zUpljM51ruV?=l&AS;vdq?fs5q{R!yxZpRF~nOKgSFZnONx5VtLJ$hg8S%4Kc9y6^6*$DY^^qPG$&|B?Uq*R9Y^9o^uVZ@a_SGhl%B^|<%P zU)h|ZY_O2>7^0OBIln;7MwL6xufm6YSlC9a!YPANbb~|2yr1$WCEV0OfN6F#E&sDl z%^Mp!T|_2nog_(ns+PWUt}0lD$Ya)cJMlYC8I&S2da+Zxt7QhrFd)da((|+8hUKy$ zfxT9Z=0%%)FW82*oT%erF~DA0{vcN4ltC#Xqo*i>q2t*=h5_VFUN3iyB*{#EcfJ@; z`Cf%g=~rk}KcY2!O{^a?<_=;FP8pQ0TiWtEZQp?m1HxSOl8qB3mGfOMQgsvubUijv zXfSN`8T+tuQ4k|{5^HhFpcIkOM>$1jdEl!@Fko?ipkb(a2BnCM91!+_08x_ev$>*c!{(!FT=9QW(fjy|~Y?ZPQzX=*Br+)Hf2DT7i(MjtMS zyC^*XG7JbWHc6E7m#mDt-7(00JlG(AW%GtiR?Xt49%SsHtB=@>QwF7ojGl@bT6Ea} z83yEXlBgSgIb>Zci!hC1c(nP$$@kr{tw{kD@zaHvG5;dA;FLirBBQ6@W#MufK!yR* z1MG2osPqy~{D|jITYY=%Q`Ky>krRRb2r(Qxsk)!oicz1}5gPs$RVE|ESoBH=u z&D%MT4Lqnn9oC(Wycu{)XwBQ{)R8gFmUsfF?w&RpRDI%lO4$@D{!JCmVU~x<#hGSIB`1aVb z>n1d-L$BA$?lSD(Ww*dSgMDFOi1-7i3`!9h-IGj}U)c&|81RCBR5sv5_1PI6gA+QY;SoH8gyWb{H_l+*yHbHjjDUoDqj(wQoEsXmJF<&@ow z&ywsCJ>N5RmGZsA$Ror~oH8gyWOUnpxIp#>G7JzOF8NHGBl}{&D}wdiua$x|(ND?3 zml~s1URF$D#xIZL__e!7H~sKQ8fceX#FyI%G4Y zOG}OYI^h`cCr%lZA~Je2^1H*&4M2tg``kQ~r{0^`jTXvhsL3{p-4WM(yzez%nM%qs z_Ir8b#BQ82C|xIaiSPamWEjAZ;~X9(*Ce{apPleUc+6KpUZRrO{d{uDmZR9qtO;Td zP8pOUGP;Fj9Q5%3G7PBS=lvwJ>b`#f_uO@n1G|^vjOU`Lc!Gm#>-jga7pDwL z5gFaW*0!Hh1TqY`M|eofK<=gR%Q?Dn zXF#v4ACO^y=`#aMuSZPYziK`n6+YW^I91e>UhGZWfymdacQIq0BL2cDgHl9B_nzX; zR0%+a0TvwcuDd-qmgyE42kF!CM#^oSyhm7-`IB%X+Y%#B6Z>(>pmg2Re({xc8OSg| zL)mv*x%;kIjtd>h)2DB8ozCuk`6*(g-_eGM{j%2#aR8?bN)Z{Is zE$6&n_2)A-CHj+WbZKS|ILn7u&~;Cpv%-vdmN$8TK|z~m+Oyi3%#4j027VZMjyQx<2BnCM-i%CU-vcMl!+;u{^G8jLtL&`i zn%p)hS_fwlx0p41d~@=tEZ=~U=ZV8OWl)O9=uz_ZOCx3=!+==R7Is(D9Z#x;2OWNB zFl&+Q)w@%yY65!YcVH(~{~?axltJmb^{iZB`wPf0ps~)@X3I}auMPQY`>Ooa`(A5Y zKYX0`>BlvljdYkXFAzs@%AgdH(MRx7nNL@M3v zlH^}w5?Qx0O}72EHY7Bd zeH_RzAWrIan|@U{;c3deO}5F0cd9{m9u;m4WYE^NIg1(dJJK{x8I-OYGjp0ZyrP2v z4X5ZBiwtE zdG)17#`DP1kW1;I`f0KTJTDq=UUU5Zx(_42C(YuNK`A1m$Et$11~`EU2525_aGPFp zzbzqD7gV5ENXn5MzRjJpba;4r+QXLOb+sjJ-7&6Y}41A%Bov4|^?XM|wwx^L6OI zGCGY&NM$Y;$S|NKi*xZ;;7F%_!h=_$D{)MP-W=gF$7ZEk=N}eh*0Vjy45tiAkujqW zNS|kx$pIM#G{tlN&?hxBc2te!m(T~rd0A>Eu8l2Tm(q;G-UDH?CMs^}u;FLirBBSS?zs{V3ccoxJ zJu_cIx2U_&PS2E%iR)WJo2+#bi?&JHscmk;zB1}e`hZggrR&zS=K&7*@E-=8x>-HO ze^6xUB;l8;b*}+;isd)feK}`_dxzMt6Hi@8mN;clx^6xH_}r8VWEfCpelcFF-S5rh z9Zty}{gbcLV!E?>j`goz+b8n_v$Q{xtZ>SpbluWE!_^L_yuyI${Tx@x?z@i6OfO7o zJ)tz-b>QIk%$M5St%+9HSA$(iA92c{6p_)1=br4(kGlUC`Dw2zuR_=#E2S@VSu#A2C5;YCmEOiG}{rXJ${m@ zdsH-kEzkCz=f-|o%bjG6QwF7oeDSZ`(9r~^f5L!)x7wQfw&|XGJ#ammt2?|OIJ>*|o zA3lGsXCNgaqW9vVu-2pJ4-M}LKYA*1?}_1ab&ba&C+^~X~NY*p)+GkMO&3x-a762!lFkj zZ}waSv$VZQEB`WLDk>@_s?F43#8iq@N|bt3Z{b3wc)R>}Y4iV`;#`G_CN<|&%AN-j+?^$8*#QBg`|55&5lRQ@F{%?}U3LP@=nEkf! z1Qb?0ynbpng;M#;ufu(1b`gqm>i~xQcv>Fa!4`t^Fha@6#O6f%E;{!1oVY z!duNS;2I%Dz=rcR={sF!0p*o=6X`dn=nYt-0ykMy4`O6L(mQ&bCnkRvIeJq}!9Gw4 z$S~kbzLD3)714XwZ}J(}jJ-IZni$DSY`CDgc&rlp7{#AthEoQm$WIGBG2v54f+u4z z;82>{P5mg{oGuNR~KdP>RUt#Z}fHZ}{FQ4A5D7_(99}^F)BUX3Y86 zpBl3Vf+E-MJZ~w=DSn1o+QB3%oH8gyWntGJwBx8 z(p>1!bgW{gQc*KWHU}ezkUsvS{J(Z&R`mb3BeSA^j~unMR>*0TFDYS0zQ}CtPg7?? z;@W7~iraKX@p+%3uijq|Gv3WG!uX-2Pyf>=N>IXGxE|FrDgz>!68wka?K^rBC7Dc3 zrucvVh3rhVbi(1z6qY=ACg`i^L3PA7Ygg`wi36qQgjiO;VA@I(??>#12f|2>|CsRa zJ5#XpMHW7K*QUSm-W@1|0DXfTwE~*8bJL7jAqpkQzlWQczNR=Pb2A0yoKA}ud_@`tte7ueExq#Kt2C4H%?|Sg%dFrpDg3HeirX7B)-ZAe$r+~%N)Z`- z4xbUn4bRPBfDR{zu&mmb%^ky+hZf1t>6uo1ScejIFT0)SWWvaiBo~}AC|x&Z;Ribw zfD8i+YjkKThDFo<^a|g+X3v#6 za9Tgalp#{5NL4GP^?30HdK)I4gHl9BcjFX0_w5BT44{6p znM-c8TK&t3IW3}?B+cq5eK^n0d);g$ckW~47?K-K8I-PD+EeC7Oo0poitVDP?|&ds z(AC~An2UHObGo(M?CJTOm2*;w*bjZhlH761pcIkOyRat3Qq(|(0e;Ho@7mr?^gVpO zXQhIJt@DIKmG4qZmxhs^l?$(Z zm>%kFuSkk<#GZpElDu)spcIkM{axC;bDQC|4Fh6c`>Gke@#11^T~neQLs@0c zn@1Qqh2)1*2BnCMo|ai$(q;oP46u*%K2clV(;f5qjLKxVbm{8AB-0O`&AT@x+`+Er zRFXeV8I-PD+G;MG@IxXnz@~5TLUg=C@-5Z{ZqA^Cd#cPOvM=V<4(T0mF~*EJjr0Yl z3`!9hopHPAO(NWH!vGskv(0(EJ%;pGg{60Q*yNG#7mra0zdaf<+}VnezmWoP%AgdH zwg0YXjwP!}Aj1Gz`wLw!XPXu*k8rSG4sPS`nCTN$jcDwPA78^BpVLWSamt_+kV5C`SYag>T|~TDNX#auV7`60&&Wq6p_&x^gHzF zb%6{6l7(9t8n}PtT@l=5#d4QjPh)2^<@Qfu-F6#KgkzR=CMgJ~3`!9h-L`E_&ER<$ z3^3KZHu`Pl!tYDY*Uu_%kFk2c;q5^+rv(<5+jZI)Ig1pGQwF8$#=PxS{aql#fMr3d z`}gkqbAKnO)v3w<$rw}(%ksY0%8<+&Cx(%;Ng+68P`YkCclCC90vQHOwe7lau;I6J zCq?klGd;@Bcm*zRvorJI5e?dly}-*Mh2oS!DI%lW_L7e;oc9U?EM5Lc&rMjKyOHKb znJsI3CiPTZ$+0c-H*Vf6z|K9$C57RXK`A1G{WkJ$41fAIHE^*50|JGc4aZiNGRc`d zBb_}Bs^6D(bY1CkcIH3b$ckCod8F`vl>gVoPQ-uRwTbx8U7IXM?=&!QSfQG_*7*5( zd{gJETN%S=j!GEXq@87}=(Sh*nZ`Ig2eh8ZUGVS5W!`?e7AVuPoK`FAx!EH(8faQPTYzL5GKmb{# z;HrpS;M))6pT@55NBw?f6}R0qn{JNHPsJ?qLQ)h?8NE}5${XiKG=U5QWR$*BC(*6` zw0=1l<0EIr@KEqWpv>3yS;zVPz8JZP6pd2`o8`JOGgF*{7gsQ#b|8g?(zQon{`r=m zZS+&r=c?c4HPO2}*z5Paw-d}+K8`=Ek=6@Uqs{(fA?=PNOvN&+c`%UMm`VE(l zaFkw6llaCX9R7KF9@A4sivPz2^6x*n|IdxU&~8*6$S`2CaY4<@QGc3Bie2N>`w<6e z<<%z|1pjJ%Cx7gd`f^eNP8pQ0+x$#xpTZZ4VSrnO7gub0es(J<^E;h>g>UCGO6_ec zwqn+E6)72~3`!9hy*Vsv)(k)30|UZ~Gsa3Dheq*JDOnih4Qt6!_Jve-Pu^l%a8kp_ z-$^MrWl)O9I)9gTLSr5)kYNCCanaTOgGR#F9|>8l#qy+x3dVeUa_Z#b9Vb2P$DON5 zsW@d&ipc2slFfWu9gtzbmZlP?{H9czG`g83Z}qTWo5cN-EA9zg*4zAs2Q%gxQW{Pf zl&)LP(!3spK!yR{LPz&5C#o|OjJXO-7O92TP!)EbA?uA;YTrJt|O)6ltC#Xqj$`_8~Ia! z3ZBjTUjHb9P5gFZ+(29oM0x}F>db;<`gQ^wsO-2oU zg+hj=_nX51G%xPF*8ZrR79%&4a&XF^6p_*SFAZ{j2Z0O&KK$GhQ9$Lf=j7qGZP!}Q zG`QC`?U|=m5ZdLigxznqkaBU#pcIkOYrZD4i5ozM0pG@0oL%zEYG~gmNU)_+Mz}p6 zI(jH9q3?=ME+1yht)x7hGAKo4bPG$Sq!k7-46xd+?DN!U&BT(0V;iT( z>(625Ikl4taLS+*kt;GBQGpM&k`frD&O7`cN~ zgi{8k>y|c4`3Ssk1p}g&wa1%+dT#q_z2D^~#u>%!94=qnST3SLu@`%+>LeB8ltC#X zqciD3*2+%+83x>_yx(Q)-)MhQo5a%tBMn;7Xw@GQr$c4b)N4%X3X8BGMq9fUALZn2G4v3G7Ok~ zpjlvgLH|Na$d&Cwl0V;8CT~lLk|6Bb9G-`Lr?rPvj#CDu>z4LJV=sJX1_s>8KjW1t zE~|6QNa(&w`^(x|`QWi^-+{w0q#3_T)bxWH^cN6?GYcQa7nD1U}ZAvB2F=~6t z+zNS@bauV-uN^+JUTV6GS+C2rxXDl zCoKiH2G^=w_D6EqyTSdW?>J>pipc0)|6XTa9k`xBz}zLXFon-<_KgW&NM%WCmTFo9 z>;ehp&Ia_ycVNamK&r+mgHl9B_uJ>iGdh6`1BPZpG|XLMjqHbR_@x`QcV`hNQ+30R zpAI~_*o%<|Ni{fSP>RUt)19v4@^J4716YzN*B%HxIwZ4MxNbr3vX1hA|Kb3Bhg5p0DF}j{8 zHG_#5aO|AtZq$l8(r+5rq4|3u;n?T~?>%0ciJG6f&u(AEjCqt)$nZ}SyU-go>n36_ zP=fzZyme%tr(|HDW}x_g|Mdx3@T8{90I(`x%EfyGncF5#x5T;AGtVYg?<4a(EIeX! zqEXdc0DFEiMymbC2J~(X`R{x_20g0(6qq_NSxpjjQ_V4)YBiOe49@J?x~#=padHO*gz=$ZXoOaFXXP)XXRN1@Y{fmhdNei386~`;kpQVNY z-!wO-O-x*O6A#x;rzeLV`;v0~-nl=OxmK5~gE8`NQVmWSlp->^-!I%4y9{I)@YEr2 zhSzG5Atk`KueMK0cHqlM%@*@Q0ZX=N?00u2Nwqj-P>RUt^|z1Z3cQ6414Qcv?XA+7 zK77ycZF$}~XW1#RIj(EvA=mgB=`PHer$}`;Wl)O9;EE7=Ucvsk;19T+3j?~PX_GRz zmFsq1j}ys^PXtpB*EuI#RL9;;ohH@)qm2G0ssC$o6!qU3u62y)%~P;TVnq1B z9iiI+4=ZF7X@ht6YJYfrSmFKq%m|6A3vE$CDFP>q#(rGE419*vfSa%VFTR!$*~%T? z?^prL9adZ^-!lH{AAIHR-i?6mXG`PtuQs~yJ-e2&zFQwF8T8b{CEA5CZb z0~rSVh^Dfo`d-WwujFHVTwCTjJx@SG)SQa3YA>k(BhQ1EW$^v7K`A1m=aN?h=1qVM z19WZ&{5n!bl9p!g$XJ+2*`D4}AE8+OyZnU(ANKe4htz^o2BnCME^XS+Bk&DE7(g2* z+B^3^^n<*X%u}6_>dGnW4XUMLk0`qNEwV9VULdvNltC#Xqx+P))i(IW4jAC{{R(w7 z>*%MdSNp53vb)6ui65uy5~tPCsS<0!$cv;loH8gyWb|B8Fv&+2$S}aQMn#aVUR>^F zVbTr%u(qZ4xS%&yyOfIbMsMn3zDH&uh3t-ng@#3_SPL`LT*IjY`=Z}`K2x8lk1Q+=~yyI6mO`M2dfIs3y*Wgnls zTUMn$b`QQr>cT05Qbd;d`*a{Xm!TBMFyLx(LM-R>=sTY0H8koH^7n~PjkXyd{FZxo zzRm=*wBM0`;*>!tBBKvpb=-~Noi!M6FH2H{V{3<`GjIPg$4vnr_gYzp1!H!NgA~;p zFtQoB8>b9P5g9!@G2?CG12PP#*kz)cp3pAleEE9qhQ9KUg!=oR78F|0jmR>S5t|0XE!cNdEy#U1Wl)O9 z=v`xJ13i7Xv_U|T%+(@Gu2-CUfBxFp9NIE*+I>>K)<5b%Z@5n#W@&#Q|H3JQQbb1g zo{Y9+ID-TR>_6kL@zG>YP1HMbznO}&q=qs#%QTNk)m6(#DU57M?#C&E(sk>(?D#h- zAj5#M-NB7>Z}>|Nn{7-ZbNB<-o}m7ab?-q8JJ>; z5eNEtVTvfHAAWZe-YKmFxXMYYPl%P*98*=rjQJyZ5T^`E*Nr((oK*zKFkrK%gq>eh zjGtoY&dlTGr`LLzMfaW5sorM)IY<#Be$Mz$r7;*>!tBBRgH^QSxEw?be5Po_~-l(Xq&X8vTajyq>!#E%WK zuk5wt8{n$M-es~QkKvR-DI%k1y&=U$yMPP>68cXw%!b~n+M;&j!|y$(88!>A1&U-< zb1}Z~8^Vm)o;;3I2BnCM?!z+Qo9Y1>20Rr}EQ)QHJh8Lv$8G8@`H}k>S@`~3E&gmi zNAm?EJCG-E%AgdH(X-zCa7p-t69$-+>bYI1^=DC0(%@9Cp0wDZqOvEjc=CFo#XRo<=fT-qQYMB{`!)!`!{Hg=tNEvF3gyn z$dfo_P>RUA{$9_{XFI8b3rZMJ&;CS9Z9c)Ye>rbMKEtc(7avuu6MT9obI(4u#mLU& zsehFJ*Y)h=e_c>c{^x?yu*w#GN(@%G)#nvY3-hOP^cJ7IctUK;cR4}VOyLN*KSJ!- zo0%@;>3`(^{elu5IJ!HD|Hb$n$S|Pg)^Wjl?v|?E zl4o(spmf~^ZpRVB4P+SbS>9{wm5Ni%d#!DI9%{7hmlv|nPn_&nuvVm>#EjXEJcm;T zrHG6^c-t{m0dG>l0O>}HSRc9DcRvwK%U_g@XDtf`Q9mv0`V^esihYFdPM*gpgHl9B z4_HlXGv|Q}100Q>rX9Dp7-H+#6UDtJSbO*aOK+OQ{H1^`kgZu}l3`!9hyC-}t9${E>8@2aQrYJ92t+(taq?}>XH#-5mXlb3MHpcIi+|H`+%#Z&_s2248c z8xbuzw?WOuza{Bl$(gfP4^1xeOK@4d{D6H3rbpcEN1dgI{9&irX0!+^chsoRdMoKr9} zzY-dEee|&DCsk>#9NJ?{DMND@*^j)6QwF7oj2@)*L`rCY3K_Ax@5mr#CCvX)cs~3JFCuZuYPfleY6v=pU;<8CEyV-7x z97MLjDT7i(Mkjq)=PAGo4H%GKr=+b+J@aAEf8{QuqA2=w3omLn|{)kfsrHH)$?|wV2 zD=G}gFhJ*i)ndv>54YMtY6g?(iygvV{F39RkEN$cP+@P1g_A$wltC#Xi~W@^&jfb? z83vqcOK=T+BBji#vD=B(^Q`RFNXG){4I%P7#NKmYmUaZ$8mA0O5n24N+?}tyAILC3 z_)Ygd+ov+0${01iH$GE4yUTLdO~#@FI-CwB*ljzKY=ctnR6FqnRUt7B*+j0;kNvfR)%G0{_x^ z5~*>BLtS6QjgDsMVpPcGxC^d~GMJ?uO}4`+gHl9R`a9;mdO7fwK^S0HEti|~Fn2*K z=hl~ywx*!_vv&47j|B%gf0I6okz>gAIAu_Z$mp5DBwH<@AiAU_whT|U*G%B^L0*Vyw5rBb3Rfh-`KTHj0{>JauUg% zpbSbe8NX#Qu;~93$S~kc!n{uC>4!Vs`=6B@%{SI9u3If1tjGEJ(AhCNL{27o5R^eF zCgT%VGkquFlvx;X!aeD>Nl@(SyN^}3-e)k>W#Vp8+S6uAtyrJ-1d&rno&;r3ipltK zJ2Yt@92vj>*TGo3x;(afTf#p1TFBVM`56V$wQZI*iP2Y1MdVbH7eN`6VlsXe`|+Be z29RNZg3&R*P~`~07$5(JJ;E9ZNir|{q6<|-6e~8MFDs^zya~#n6qE6**sZ3A?STvf z7Ef#m51zeYI5Y5|@$9OZ3{{%L`_8j$U28e1ir(-|C;1SRK`AEV53L&0t8#!01CBb& zUt24)FeUf!bk?TGC)S3$=*}Lzd@VXj^a%P{>I{-EK^c@{GCl&_=far;WEjBgzH#hh z*WuZ!r!!9dFDh;LkFh@qf6PyRBKk)n@^{W8`4N=yBZTOM%riHf--G#(@~!~5jlqxNKgi)n2cXz)W7-x?~}j)^Q)&TCUul#?91Lfj<0>; zxZ?)xi$meBCbt-!Y(?a^q#%MaD8*!aullJG`xD48phc%D>bk-0U6Sgr+wxUT*uLW{ zc*91vOIkhKj!v1)B?S|dK`AEVd)3v~bMSMBFkr&2GeoEULfihwIU8&arT<7=X;}Ag zMbn(~F3mN_GUt&(2+E)olkwxW+qU2EEC~a$j*9TFiE2vGzQWGj;lHbHZs#sKErB;Q z&lUQ`5ILU|N>B!+n2e7MOjwNIG$I)Aeap57UFL_Ld^z8u`SYw$Xx0@Sh55z1f%RSy zxrqFZ6h=@6cQ=;Fw6wdIfD8j3=v4B*h}=Z)DyjKV(#z)BL%DLbZ!LN;wVQ*{SNaP` z;RIz+ipg95Ig%uYOT(!hFrb~anXkw?MlmsxxwrOsZf@MTaEFs>sV&t;4Sl8MJ?RBO z8I)o&{)o`@=b8i{!vL)cN5k`4p1fbSNxO7zcRl6vKrb)y-tT*Yc4Aq`+AbtT5R^eF zCgaC#89w7?Aj1GtHo17AGdI8IYR|f}teUjcN-VMXLh7nLcBr`*kw1_k3Cf@plfh^R zo3CXYMs}V%5B*hVwK`AEVH>sioo(KXN225E+6bm00 zu~}$*k?)%2=JoqVapTdH87W$6x;aGtM2aUUgHlY!2S+zMZQyMa7$Dg}FDa(#!@2RQ zX1*i$%gyyF2jYKc>C10c$w%Kn{Y*+AD1%Z=#@~J2c2fkNKwtnfnW5d#W{1!PPmZ{w z^`9)qMOiOo$7%0cOXlxKmiY@Qk)RApF&Q5yxb<#OMGj132&(O@AC8j{L zj#k#h8;^b`(HaD!bF8aKNd#q3ipeMbIbGkF=!G}tV8G|2TIaNf(q7(5zoU2~pi6(y zB=X#p#mUXwC3(!~+NE8ghkfOy zCl%kz?0nQHj>t8n6oN7+#bkV*+%tm@>Oh78ucP!ESig%{u5nRuirIH7buIOe>3OEx z?^l10+lW~rhKY%uiDvo#g$-Fc*H_tqF$0Xc_Apv% z|DFl(Yhp%nRPTNF1z&{P77yH=v-eqnzKYvO%Ke`)^#2iqPq2Sa{=Xxp_hjsd9xG16 z1O5)MLf&SVg}-rS>I;g~vum1#w#UZ`MtkH0Y0jUQC_uKoCQ{yC`Tss<_{sF|zXK-k za}F^EG7PA7n(ll2_6XY^%U3Fk#`lYb%I|(no2k6*mA1(Vk-w7i3CduL!L|T=Pmgu~ z43`-O(9iBa81?#GP#neG@_qtuok)ieHK=OKdVz!9=;PvM(mR4OD8*#_W?_upc__nx zQ@rnwY6;!iGU)a0-iw30>u5fYzNTk0-=(rvX)UtMEu;d1GAPAl{QPMlJ1+uc7@*Y@ zp&HZ8EUldF%FVb(Yefm~U8nQMCC$wRb2AaSmGqvV3`#K>e{@_iE z+2+wmg@-=1k2KZbc{3~*v$+c$8@7`^5R^eFCgbPN6JFf#L*XzW>Y$Qwk>JbfF}1DJ z7Q03yTs`vz|2)`q&3BXHTV!p2BNY*pLFux;vzX)=IFAhmjO$uITij&t*HxHEw=THb zILv2jK`J3AgVJSdn=5Nq50GI%%=i57kt@W1Tp!U2U5H>{T&*}|P@XSn zc8^&-rsUYt|IVFd#noplH{wLxLqNfoi8Nox12$WGoa~ed3|n z!K_!vGIxJWtpIfxz3%BHRS=Xx zDJJ9lbGcAl0FYt8!U$I!*U76M_H%t7l8=I4-D0=6_h#{hqR6{Ybo$2+QYAqdlwvZz zSM_qL7y=mv2nRfwxHurWGM|}^!{>>-D)?#%O8_J9;`Zx#9LV3fkMxP43`#K>f24L_ z$_~yBg#oV=^zL0`O_QHFQMyg#u>$o6&##L5m^1BrD;UxD3HnK&3Cf^!+1fsK<~SUc zVgWH<47oR~TD5Pw=g7ojrJ8XaV}9KBzH+}a;pmO<0n!(OGAPAl{KlDx`+NbAVE~`d z=N_AnN`g)DV19d~htEaXJDk^lgg?3yckMc|wg*X71Z7Z)$@u;(JTrP9$S^?eG2>^o z0f!U@Fbzd1PpHr2Uc&0uNOq>gUn@)yd5Ba^PzI%#jE_mgbr#`+Ef}C3JKy}{%~8^} z{in|#Kb|4yQD?IHw&LrS#rnu* z_~yQQidv0qi}iaJjOkyOBm6k2>972Mzs|w`;`l_6R)404K!yRM_H|*8T+9_dJMimD zx9$|lGH6mfvV-zg!E`S=zkh=Cm7oktvA;Ne)_Sl;(*npa;HFr}W&{7{N)-WaNA4D; zX-RUopVXh99~=FlhVEaJq-KILD8*#_cWw{p<$D4d1{4WjmSxwAb6ul5Z!o^{R;~cK zHt&rQ%?W|Vs(fT^Pmx*(%AgdJ@jZJ|rhN#=FrY!T@xYSUPrNwMgA-1 zVjR%(*`@;aH_J!H>%4^5k-Z{p;6#lL_@`mdco2f_N(-Y6p zJJB^Pr1Z7aV zY;Bt?brb^`1{?|rUy|{ejVx2_?|tX%`d0be(;+VYYYZmf>(z+7KP^>_Ox|q%MLoD8*!aL{@fpD?DGq zfQbBq53e}&NC~;^v#!rjIF&Kz&f#d2Ezc=&0^PzEN!8o;b>zCd5Bm7ln%qlJ2Bpi^_Iceoe;~tvu=#?f3Hd zXm*r3O^ff|VsP(?+Wc@d`W*xt@(+SCC|xEe<%&fB83xczxL$VCkY)M3SCRYNO?J0{ z{O4N(Iwj?N6W{0{YulFGM^FZ(n2aAuzRyR%FWJC=jFar0@9st2WJ@>Sc{_ffkUxzp zaN&ZMf4{rcJR;kX`w7aR6qE6zr%UEZH6X))SLc&ou2Rn4^+|eA*zofM(%L7Q414z0 zZ;HxjoJ8cO$0z|;QOR}JwHeKyk9V5V_%H{aKMty&BgwO1%_ z(66@HlLraPpcIqwqi53>FF2(P20Tt)Q|=#kgk8={a3x#N7qX1a+4{_g9DWzk508;$ zenuW5D1*{vYdb953Qn(r0UEJgeB_Dvs#_sag=$N`_V{JoWoeO4k*{u02}5KD@=t;? zD8*#_>GiF-frditq8Oh?wX8+n4D3`#K>|I)^s;3K$g!+;pBAC`3Mas*YG zQU`B;m!Cbvv%%#DIj5jR#1Flab|+5~ltC#bKW-n((7p_07$EI-cCG0FX4R+4i87`_N}9ns zKbg}zbAPUH{E6Nf^CJHyD1*{vYr7(Pv>M1TAS!-BCwSqVdJdCm?}Noor>N6BJ=5z) zOXm)Hphr({@(e*4lw$I~&yviVjX;J0$0dYoACEUsYSSE)EX(4^&t5I?Hs0aRRMXgr z&cE;>&k~eD>9VzbQt{q)Ag==f8tEctT_%=)_|7l&GZ=He7A)x)VmADGp233V8uEAc zCC?F*LFux!y-P?i638&%oxxVgB-*XiN&c+Co`-4@KK@A?UR>gu5%?1wgvfs6d4e)1 z#bj`n#BLBZIBk{%H;7;BaIf7~FN|JMy7_n@C}-Z!k+^W>RaNZMiV4?hp6yl@Ntl=ALHJ8RLYIjhXO$B+dN zApiL*|KD#A;R}v$X4Stb@InIy7!)`4?EN^%t`;+O)5JXUq73s6(Y3!z&d!OHqi>J| zk{1cepcGr>_{|dOk{?z;h5@c#&TK0K?JKo5=xX(p2ySY&=kw%ve|Nk2>sn7_nS;nn z1Z7Z)$@r=A_U#-vJb(c%Z8y>xltWc}c!EncZ@6?l4ZZXF(2nWn!_ouC5jmJ_y^7#l zt^dyko~9ud3S<}{%&VzX_U`wJ*>UBMrNNQ>0nfuSG_`eOT?WL=5jlixLr?~#*fMMW zbMV$>qw-eIWz?Ajt5_3*_AS(~F4si+)Awj(HmQcPCn2f)pw2rC07057vn!RKA z)T0%KWHGvsgkY~^lhwB(FaLhTKW`|8PN|6?KO-oEQcT8gK_M=vtqt^ zXkIhrdZgLKyurX}of}_%ZJU~HMW<&*k{t-jpcIqwsTMXx(Qxt{45;DG8#4|&`r+lx zC3c?g=ag5Rh<@tn9-zwQysZdX+b_wE1Z7Z)$@q~(BP~}M$S@#{XZ+y6w8D5qx1)1i zaV*0N9tn=_5ABcY!*`*#re2Yq2+E)olkqJqr_iYn$S^?Ufxg)8T2j0Vm*w=AiT13q z6-fu@HE&FO*nAGX#EBw16O=(ICgT^B_x;)6(Gv!Q7x}QP$}A{I^vEW?YD%d+R(D_{ zE59pQM(NxRWNk;2pA(cpDJJ8?Kt~ zm&Pj7ZX@z*vI{{OlwvaeQ$stfuE1MUFu-kCz^LUxk>G*Cj=)5H4*#O29pbw@SWaJh zs&WF6W5})qWl)OA_!jo+Qw|&k!hr6%$D(s5hXx}hSynWc>c;$v6ZpRKP1%90O>F4H z!dS8!K^c@TTifD%Jn)MuFhH?+uhpp^*|NFTWl2LiCC>xS(_W6$YOuZRbu9;3<~L+_ zf-)$@Wc(K7tmV==Aj1GFHwmqS3vzu2==z_#T3zohefvi&cG_fQP?8g!k{U<$ASi>< zWotXBl^#x=f&q^6CDZ;p-~Y+>5mesjz5DV5-uj&6Er(axSdF1_JlT_=3`#K>-=C8z zrWk+>1Bx0=G*@c#%kykL7ghGv^QH~g4YBu5emBO3QvHy%oj~>?D1*{v%N#9KzY)qH zAeZj#O6}tnOFif2XSN6aeB<0WA;HbhYwvTv1O3uQBH5du3`#K>zfJSS(;vS12Ls}K zL-wrW(To!IEwb+zR(@|C=JMe{5$DSxH*`sQZ zntppn!TJpk{P-KBr_vtCG5G|{CCIg*ax&SMpbScvt!)eAmv9;p47e0*+*fx~sAj+V ze#;*{d1mr=KXbk4Z)OvwT_24sa|+pypbSbe8Q;Q2KJJCnh+sg-(Fzl~TbpZa&#SW@ z+Q0Mkrl-;JDRlv(;d5N*qoY)^KS3FkVlqDcGy2@oX&}RZ+9q*FQ~!X1r}-r#x*yDG zH|6TtpT4O3%feNjj4X2+Ie?%HO7XRgUDmygtJw-J>tMiBk@b`U{T80!GjlvvFSOH7 zosRWd$E8>|J=GA4$m!(3zsmpXvMylN|DE3)unL=He0r;;2Uu`eVeW7zd6Sg!fYeep z=d3@+%DChQtpXQcZQuH9n;F7qkc0lp|M$x}e8KSzyi2+aZs0J0lU>>)srA?e4z{f4 z52Q`o%Yw#tU%u+X8o4)u29Yz#!31Scx@0EENRYm4@Bg>pk4kajq(q$X?^Xt`4K!yRnRlAR5hhJur_fL~x(slbT%0ky>^>oYr z$43sK^E-0LVFYDRif`c97Ur^=Wf;gXfJb5~$M}_u$aOY@OGelIG{wzXrq_ww{kf>s zHi|6sTXHx-8I)o&{!}*p%N7A3!vLw{5B$qIP89H#ZCW`!_#J`d_l;~kKIwlDRoQw2Na&|KOck0dE^L! zGAPAld|=Wh!yxCa^&2yEaGGi8`9fhqG9#MBB{agcDKOoEej{K6K3`#Ni z-}l&gf5SN~Fu>tLLr@jzRiO9@g%fMt(`|Dfi(UC#P(Qc3&^Qy33&^hs%AgdJ@q6q& z{I_zI6u!d}Bl^dAECiFFbx< z)Me+fHxiG;C!$|=xpm8?qR$CS$#Dc_P>RX;w*CI$r?)_c0n`)MgP;23EFN`OHFGz2 zg|U87R_n+k+f(0z{_;B)AIb3qWl)OAoB!Fi_w>obOB@)W^;Yjl=g{5}PX7ZcC-OV* zZew6E9kG02`?*p0JhHaS$O!~xP>RX;7A6q>2+l=>0n8-RN{XDJVc2$tjkWsW+Si_+ zQ8$-9w~4FC|0hLG$uUY z;k{paOb%J*3UU%b8I&%QDZYMhfD8i!jW09G>`< zhRBuVWP&m%#bo?8&11=wFF=L?G2yFrX|>kJHe0tbyD+(59w}rr-IQ^B*P(F_bbiMt zatc8ilwvaeFeC3E54>3d11|sm@S0UV>q4aE56e%#%d+2@j(*)|La9CGy+Hw4=Fj9* zf-)$@WPA&|TAB=H81USP%dO(#_+@hOll3!;_8j!ynOjpiia3hRtN0N43ptIT3`&)1J>Djq-X{$)efqD+x3LD@Cl6`d;0U6!mW%){n1O=YH|ia8I)o&zJ)CY_g?@q z47lC*d)tE5$iQoM`%209>pw!z^?y{qXm;X92mNVenQO?I1Z7Z)$@u=vCa_x<$S`1= z0k^i(SN>3^h7I%A8O>C!!z_m@SQ-4zKh3vBuQ|dL zS4E|(yxFQSwWp%x<0aPxJMq<=h+I$3At-}VOvbP1gvAdS0T~A5=x^P|YBMio?Z4po zr{?R;^*8n2cmGL{8m3I3Pof*hZwbnv6qE67yVC9@9FxF+bf2B|3%=d)M=y3rw=ccq zm)&0Q?v3?hch1%-^op*LoJ&v!rI?J53^*KA;rmB0AZhEYweVe~_?>!IF6FwzW%lQL zxbHpT6M1&G6`f7fM9w29gHlY!ujuN&ost7G44AyMi~aH5!PD-a_Stxc-F7|KOxtG} z@{A|0G8~=l`jwndPzI&TmU(Xk4}2dV2E@+H#!l03GrW8J-K5@)!~I*nUeLa8@T#5U z@sSnT!kWqN2+E)olkxeNUyoJ5nI$m5&nNt{XYg^;>4~-xX2FQp$yQu4@=;zleVfz- z5V?h1Ku`vyn2e8xzFbK@2xJ)G>@hw{Z&2ak@U7&FW5SaHyIX@j?{<7p)_kSQfyk}o z_XK57iplu?tQ+TM31k?c;-&NKwN=+uPcdh)pi2UWr8*`nf89PZ{HA;(`uhiMX943^;Q$yry*nOIjU+Q2dnE#!l*XKZmyX0Kw^m4d@qKzLARv%AgdJ@sUCA zjUsp_4h9tDJ`umT`$32B(z{FUc1Cw)YIAR|Q=olT+}efyoxhWd3Cf^!*)j)(Yd!}u z4Cr68onu|c;DKXOL!|NEERGOXaZ<{G!Yv$qztE9E2f2ix3`#K>KZdc!ZibJJU_h~< z?b)N*tu!_a+$(ge#RR5xJXOMoxrCK{WzbOh;=YDbh*k`%XRNa)L4_#bkW1^6lIR?*YO9U9Vdn4^|9IA6l{YxW+5< zCh;eCyn3(D>^dR3O%YkFRP77V;KJq7mGAPAl{7B+E<826J7{I9Jzx$Sm)g_NZyLC;cDk2)yZ%u?T+)rS5 zbq{@L)ldFRPzI%#jE{Q9o{Yu9wG9G_Xae(ToA6m-|(W8D_9e#DKMa!HpcIqwZM&xUu>z1`0R1se=Na0uz$Rar4l|*qRb@BiQ`l`kTs&*ck50)OB3BcX zK`AEV+qRL$9K5Fw1IFkp!=vhxLwD8exe%_>X5@7^KHYPQ^LK+{{widdf0Anm%AgdJ z@#D6a$U(Sm!+?XaRVjC7seYX7mx|6;t4VHji*yz{BE@o7IPxhZ~D8=N1|J?W7QZ!@=WEk*XeD9_U{vrOpJQd(a z7~TqTSR78s^tfea-aq{VS>{P{GeH@YVlw{LJ^9$Fc<}fe1~_T0VSnl?v3*PDWtj~* zVl1+`>H)M{Ef*Ubve0jGPLW&wD*vy?-}V17tGWJPvzqC&=i|YG!wNs|%%3)42X>6g zFKvj)tCe2IZZP!fPEgFJb2O>Qf=`oM|H}XO$KU_4;Q#u(SC_LskYT{-)El#{l-<$p zOU)xz?Tz&?wD)OYM)IEV zrZ^52i@P*ecPN^EvDwH{I2GGH)@!*cm<93Y$lw2`&-fqTl4E3`1^;NQ>Q~XzGBD6H z(ER(q{?%Yi8Ic1GFr}e~G2UaF>CrW>t%E};?3rD~(p{$Y7S9DsIa-j0d2;vve*>8o z{G+k@w2DMaCexE?{{3J7YPh_xc>pxPlwAq4`x?ZHT3Ua|T#M(tUimG5w|~>X!rJOH z`}>fF1@h4Ue*+^e_(x;)5j?45WTa;V-)R5mzp%}p#)TRVHbPi3SAX%i$l-hZK9OsL z@9KHXUHG!SSZz&3RIgbYI^pII`PW|){`-!HUj^Y~)cOfFcpn@FRJzVy$=W-;Em<(S z;`voQJLbDLg`yln?EdhGqO$@P$zud%P>OB-_!Q08@egT$3wfaCsd}Lu zV}t73qj=cgA9a_$gvi#E34$^x#bkV3Uh5es4`di{y}0-GYQ=^#MPZa;@9&L+!Nq~e z$1;l8=);-l5!r?^Nl*r*n2gUy_#W1?4ahK{=6J<{JE zJ&7Dphu_z`i z4q0Y9$}~Y4lwvZzZO6Zp4FNI?SaoCg%lfYZlSFPy*3vRX-PHm{i0njJASi=UOvcCMjQ5Yg<2DRvnwimy zkqC1TUd(J(ez->UrvaU=QT;^KqMeFAB0E$55R^eFCgbDs^59f2Aj5$AgDgeHCGSQ> zzjn`yJz^Ic@#TL%D(yevmz#>-nSD-KBq)Q@Woz5bFtH8DFyQu%`iHJ{Tr~Yxo~J3# z?+ti3&F*3MX;@%@n%ImivkPU3pbUm~m<+xFiw)KKTn@5>p&AUhKNBmlX`VJbG<&DZ z+WVZ(xYaYS*6#|K2tQwW*g1b{}3Cf@pTjls^BQNM`5s+cPP=&s8{spDE z?OzImRIRtJ5sl4PTeL0L3poFFE3(WU6gz@4D8*#_#_%S0n z@cb$GQC<7vqnUwTnPCB&UKApGQl1i&K`AEVC#-(6^b{b&fX#uYo3zV4B2?GCd7*j3 zDm%rbE`2?{-C)IAUlJmFQS1rIpcIqwkH^-w$<@@2|N0=RL&(ro* zE9P%R*0wLjk)RApF&Q6#&K*m#2Qm!kP8cDrtk74OruR+uyh?4#eX4srDvxg{eY+?6 zj*uV4iJ%Nhm#yvXs}BYO83x#xNnf)T?GV2(m2T!t%OrNXE$tm^+7UCue%d5tnf)ow z1Z7Z)$@r=A&P}Q&kYNDB22CmNhinx`nN3nPev;nToQ$lyJP@|}q1%t&h#Ww9PEZD= zn2et)g}*In0~rSBiSjGeP%E{MT^VwGa9dq;Ft)RHr^}!n2Zni znVei60vQI-iLi1`vkoS=^`5HgJ5!!|Z;SM$B8qFhU7i&BcMhVs5|lwHCgYFE^&$k| z19})xp)+WfX=zGdBlS#?qC3lazk<)+{$OISLf%&{WSN60ZUkjeipltKyYXnqJs`sX ziK$=iP4sH9{ElDgdFrXumJnLicf4ziw<(5bBXS7EouCX#F&RI4HWsYW12PP#WmRm_ zdSTyVecg3*dyK60zBggQw>@?%&lleGK;%%02SFK>E?ed;>T2+lJurY`k@3~Yge&;M zVc%q}hJ~5T2V!*{{Q6a!uWUi(Fp4KZ8I)o&exV_$@(>R9VLxgV!i7~xhgv74ySk#ltC#b;}0=;?YV@3349x5@K;)4Kw0tQe;it@}dC>o1OalP(X3&Ir|McNttUC^QY6 zxQf23{gUEGPzI%#jGrajeY_=s3C==4=&0J3705P7e2tHmkZ*xLxQ(I& z5R^gbvbB9ccGL#QFrdAfCH$w0XX9Sr&TIh}mhx6)n4nWYqA7s{Wl)OA z_%UqlkTCoj01S}nc)7ax#rG3Yok2alt0{t6{XCAWPp8(Zei@ZUwy@WfAc8U|UAD~c z*k4Hj83r_a&@nl^xhu3eN{+4M@#Y$_I1_2c9bSrK%?;?MTw*A}1Z7Z)$@sa7Enh?s z$S~j>L-NGEzEhv>r`S4FSB>a#?oh9O8<1!6MolRJS>{+u2tgT?Vluvkr9Wpr3S<~? zKQU->!E2UxF!s{Wo3fu@E!Vb(Q~Qe84Q{ziAo3eZC_x#NVlw_&{P-0ikAMsVP75j> z+O3v;to8l&vu70k?C0+CHHv?HU)|y1;5tN(ql6KZLFux;v;7_#IB5_DIKOLdj#+t| z`e%vDNww!y_%H8~C;QjhPZh7dkcY_elyHJFD8*#_jM4HZe-)5nK-q4-5|zaKWP90= z_9ty^CX$+78V+x)M2;7}LAUJ$$_s)rD8*#_jM2I30Q~Va7|^7^q@k(EFT-o*OGm$M zvP2~$Cec8NY?&7N7=3V?NQodQgVJSdyTe&G0mv}G_~r0Rv)pZ>M-_S_4r^5#mj{xo zwJs{gYz%XBLDqH>C6b^FN--HflE~>Eg+~$?u&Xh)c2m=V(?7pgZONGW-EvZjE#H!r zJKS^qYjs3Uro1F5gVJToED-GoKiUWb&Z$30Zg3Y>I8`ktA12*hi+I;!U^QdAH9EitZj)#{(f6ZP79rb;3{hm9Roy$V@u|ML5=8I%};GAPAl{EBYt zs^rx`h5-eE?bBMnq6#MWme0mkd!FIW&Z?w7iE7sJisVGLuuMuUK^c@TTief>;~Rhs z14a!0lvIEC=*0aXOtZjgg?Gu`1}QNkS%04F=ov)LqP!s}gVJSddu`}h_^CA*a9#b6 zuHdYbSY|2R%;-`29Vt4(zgT+nSMRsq{tA(^DRBg4P>RX;*t0d823}*pfP)*IWWMnY zp2>T^bot))$4B)BtDqG2CQCRg5(bz zJ)50Mk*0>JMQ9%Gcu~61p;Z`J=C_mtf-)$@Wc*yUSIDju$S|N#;XXO7lu8$|{v{*# z*|cJk+OcQT+kfVsXRAb~?&VSv3Cf@plkxi`vff;9qz?mz_Iw!a6?eJHd~&a;B*W2X z4gtb(a)$nU`gvaNLzX#@l0;AjrI?KG&+k-r!imo?;8=lMjb{NkP&{ms>DWSm{K$DTAF z&_^fG6;M(M%AgdJ@x4m=o;kex2m_={vsA)&X7USf+&t}Jt-i+jBbQW7hmc>W)L%Z5 z{+^OXPzI%#eEgq#`i5dHV?c%h@wZCFlTx#~^W`>qGM1e#e^nm*`OyabKhgqWyvW)v zq@)v+K`AEV59+wX`PqRC19ZApsyN2g6~sA-mkNhVnoZo{Gco^_a^}b3@8gL4fs#Q` z2BnycpE0bA8V&&&2K2Js6}i&%$2(ii?DYjQp9|CE$dZ<&(fLySc=VBf5hatL3`#K> z-=F0dmJR?J2IRjC(6W6rS8RV9Ea=E^_9^rJfrHYgcZpu(KwsV{reqP6K`AEV`?LRD zGJI+T1Gb#sTXK%PJM|fhM|>r-v#LsvNki;;x}9Qm<>+Xrgpy5A2BnycA3d$gKEtsm z49H#iCX=Q=yhHyz+3k4Ts;35{$8Fq?%vcrGM4;pCQc4a%8I)o&J`RiWzEJ?zHVBA( z%x9sRqh$N-__nx3hXIE3!!u4dGYB=3Cf@plkss_;P#iEK!yQfN4GMp za8q8Av$W_qa;|mQt2*tF+hM(KkxU^8WZN#I2xnHp^rt&DR~5CP>RX;F|6^@5%?Sf2CyA9^GszMXAe2i zwNT_;-=+2s~tNRRpg&Ir!|yAxMZ3Z#4oKrbNj6Q1@je;tlqvj zFlLb`k4_%?LMbFDgHlY!r`8{{zs3n<7?4NV&b&);vLrE!C-<&;`?}&kuNS`>e7RNL z3O*BaBRy_*_>`)$(|aAir+fiMRg@0|Wl)OA_!+~Zp$6VFfB{S5FIzczIejEDE@i*d z^{}<)EeN>Y{`Kwtom<%vxtdZ$PzI%#j8CvQwQd2Pt6)Gd=V#uhD#sTT^j3SG&^0Vf z5f;#lbJmyRI9ug}$TgH=f-)#ww#=62BjJ5d7;s`E+naLxo%r+7E4=Z`}H?b0!&Z4h^`d$?(AZ5 zu|_Ax)lo_b%AgdJ@%tnq*SX;f;xM3r@#{?CE9sDs=f-h28sl_j>we#i=oR07SYtQ( zgC6yij|629D2+H9KY6>j2OEaA2Www#Q!d@7zSGASi=UOuqcj zsJCvTF?@vr2IP3X@Kzlx;mnv%&feZ3T;wA(9T%lJ6HD=~n?#oRE2Wa43`&>DWz2Wh z0~rRSw)#bo>#cEzpu6p&#+PXY(? zZ=S5&6>mp>_Vp-97m0my+B;FClWJv%emA?7@`a!bN|!D3@1U2jfD8lZW?Rbs_?KGGOj_~j}P2R>+GxyhpBWt^jQbkY(rI?J50Q*}%B?1`+++h9s*frL0^Vq8tUw&?(ol;Fu2Bnyc-}hWAv=zQ+2m@yLE*+t}d4sz`taJlA zc_+K=tKXUKKf6uO5MA5;Q;z0yWNyV zf-)$@WPCQBFYoLvAj5zfm1nc|&a4SVG3*}$Wa(0=MwCa56+K(6H-1L%R`gJs2+E*z z+1lQ+eE~jyh5_5?1&##1iky(Bv;1K=dE(;Ox25luccp#JBcB~Y)^;!DD?u5QE|V`h z$-^o0Fo1z;?85xo9qMWCa*D;>tAwmy6?wf|chYZ{$nGnM{Daa=PzI%#j32|EzIg(F zt`G)%;`roV!jPJ5xc)qSUuxTy%5>NEf~{M&bnNs)=ZE!CS_sOZ6qE7&*+aDR3XoyI z*hKwvmgI(y-5Xdud6(>p*Ul7$Z#__}wr`v9TV$F0DXj!$P>RX;hdqM!&BMDDFhD-m zSIPd&3I7a>J8n9nZ=7!jMeNqOTsp(Q+glTn2Pka>Wl)OA$Nm|Us0hVc0T~A9Uu`ve z-m(Vz}+zdeXox{qy@aGpyA=f{p-(DBlRmpcIp@{j)s&p==yRLg&qc5+Tnb?fJzWbB%ouCX#F&V!%RX;XlSqZFe{K@K)|IZrstCm*si=A&qa;0 zlPU|B)W~|ZGJm)BgGfXkp>z_IK`AEVdzJZ*Qz}4)0iqgA>tsGYk31(olwhSqS$%<4 zBIHW8rZ`pDMhuZhDP06*P>RX;k>sM$pIIQo0OP}Y2AoL;`BGAp;x}=Q-wb5UyQ1KJ zIc|@8#STRNMd>CegHlY!x9u%sTjA>Zdly}bf$vGTqkyRr++CQ5yB zh&)c|B`AYZOvbOPIw~H+JL@puhUek-m3I#&ziDL@FULOkIc2IU5|Kl=U*gI%pTu1UU9OQ34Pi>N$Ddf zgVJSx=Nxev_?s;-faU2pduqz-&^EfktGu06vs*=nxAxDFfgHlY! zM?*K=HGcvb1{~7J8M0TUzm+D;SupzYjW>Jj^!510xU^aBhZIDfrVJ32LFux;GfU;W zPe6tNqs|p{fw#|j-uD*SxWq2g_askjz_S37Lk8b1_{cb6qE6z=VdXKAt1wm zsNkth4gz7dqCI2Q%+>8l#SN?@@3kZ(bNkfxBk~Moh@cEgF&RISSo;dXwG9Ij-w2*; zFtO?!9=!c-%@z4vT#L z_jUSRtIMT-sP!uNvR_alDJFBE*OGITVS+L!#bo?0^X1$9cYq87>_(qyt6I8in+#H> zKR$e0w-lWJb4)?{JcaJ(V`Q1RX;w!N3{E}Xv&1Ey82cFO1OSudGT{!!r9 zV4k%Ozw*0%{jXiWe?sqjE>K1Z%AgdJ@#FS<3uhFNVL;S}4BFMcO6A@cSBzVE%d5W$ zwH0C_`48n6K^c@TTjo=Sp@Be#0TLUGDlXPU{kp^P;q!s~wCy|fS+Xmt zbG|XUJ5UjMkupY52Bnyc-=Uey(}CBLFyPiO&;8bx9l|HuEGE5c0_P|a`PK(_o+!-a zEkmD6E>Xq_%AgdJ@lo%iXb?W%iCuNS-bhc9(MwaNi z3D(pJf-)$@WPGpcTKyXSf*lMvcx~lV`x|p_=^t}6f7P_#6U}S1)$DLiK{Z z3`#K>KZZH4b#R3;2+&qI=JArRL{cc7f3AjIC|!!tCH9c`k5_3dBj_2!jyg?H2Bnz% z?|Z#=YmNgM1{mrjOI1vV(LG+(S;Va(r(Rt#EnVZL&~|Tt5j}=IrT!);gVJSx=R0FM zmO!R}0JiH3{<9f>4$LXVh7OGAPAl{4VpzpddVY!hk`K z4=v(9V!gVkhxUhaQ4W}|PNO&Yv_GAZcf1gh9jJ2zWl)OA_z3XCB~SRI3ph3}0AT=C`_8x1gJZe7 z9ff)%BUbv~tt+Tg;I^(i$V;;uk)5au1Z7Z)$@tN8gZ;};Aj1Hk%5}bSx4q0Ae(v7h zY_iYtcRqcWxPei)^TRU?i0n-LLr?~#n2gUGI+7T)1IRF77un2-`Sg#|TCZ)sN5oc@ zSjCGOW&ldyv8<1sop)L`WK`AEVSFy^M-`)o@4EXc@yUG}w)a22wAsM#sGKm2?H=;Hr z?t3rbZi>E-;Yzh${kNB(|NC3WpcIqwbWPK^c@{GQL+uD@DUQaWEjOY|gYuF~7&Pfyb(p5+G0iJ_zVW(+K*V=< zadr|adr<8N%AgdJ@x6-1HZu&!FyL85afjCjZJ`^al>>(l*nf1DFj%WR9W!hzxd;6) zrYH3&K^c@{GJdZ57M5!VWEhaot(bXiqUxSNfm^v_bVx8=alY*9w!U#2ihVS)w!NtK z1Z7Z)$@rKg-{lm%x`6>nG>%k%6LIz5mTo%9t5P!Bz2kk;7PTD>&p)w4WN+#-f-)$@ zWc;4N)q^`f02u~2>NSns+_(0p%D2rE$N5+8Z#*IXT7FFHs*kMBNksOcIuMjWDJJ9F zc8%LHxL3h|sKuQlCw^3MQ+_7$;yt|a)(VL~SjcwN>e`~#r+}VMwZGWmWK^c@{GQNcwdfLJ{t}x*4 z(E8Njq%Scqm=u1Uiw)l2#-gmYiX(9)nJnv#$N|*n1Z7Z)$@sP8WSSCu{tN>a9S?!FXa;fG7MO=_i=xsBy>FL0@^=oQx)YQ^DJJ846}O}fJchx5E8X^6rP|f<*lzGjv#BRMNfu_$ zXiSn|-{%~S&Oi*MdJvRBDJJ9N?NJMcQ6R&B4aWQ>FQ0K#31|F1Dy0#v@-|x3Q{Jg} zW>Ar;gDi6x)svtMN--J#u`B*LF?eSk23%Wl)i;0GgoC_3L51>Lr%>SGi(jmUgwII9B&_|{U=U!2ysyPErt)gFB}A&MG6PzI%#j32juO9aA^ z0SvgRvThe+(!FeHfi~N8WjPP0V{)6*Dt11Nsn0|2&_q)M3Cf@plkqW0`ocxHSHS?o zouUp@$=^1)Q$O~G$a76RxZukDdf15PX?i_61MxLAh@cEgF&V$V&9Y4rJ`sfhyF0{( zoCbOx`!)1OY5nButh6{;YI;DYkgJ*%JwL}#g9*x@6qE6hep5pp1CU{W7EjsGQHK%# zS~|{>`}Z1us@{+KbH=P7m5_E2xG zHE5%epS03U?;;|{Q^N_$pcIqwEo{T-vSJ{^fJ;6~SzBz})O3}5WH+qZr0si2yy{0> z4t-fd^E4tSP+t&~LFux!t+I*-9>ZXOXjb~b9>=$BGD5@pL!*-AjYU0Sm75OD@o%N< zMdUvP*L}Hhq?PTng3vd1Asy=B=!#+KAw*81 zMiP`kDJFyCwl(&7+dC4uw`lLt!h+9Ny*{o=ebhO*Awzjzp1rkIkmc%n6V15f=`Gt3 zI+^i&UKnMy(ER~<5_aRg;hiplu#^!TZWJ|M#YR?GZ- zN5$O&Zbt4ttKPEdkhiny&*feZr_P8~>o9jw9kD%{&qVD(VA zpLB=Lm(PsUZ*0An5&11OnV<|xF&W>&4x4nq>slCa!9e)N!gt3eHPvF(!0i6c4d+V! z4`ufq&(+&Feq6|S4?C+w8j>cdlubh_MN%?Sq(X_Nrje07B4m$5g+%rU8D(#>%S`sj z>UUk|)A{{AzE}7C{hi1C$LH>^=j%G>ywAAKU?!}ZIr2^?V;GS$NQnexP>RW#f3`5) zU0sVnh5;3xF+sH!QP(QVdRX5295`?b_<0)6*}wo@ z_N()WwPOYLMH{!>TFD#ZE_9aTnEb#pYMWFHGUja3dxA13#bo@dLzu<}erEs%INsRw zyD-!*bGr21Hq#A{UwOO7v;S98xkt8I)o&e${dA`LP#3h5;&08Z%nO zcYXIB(8%2yEIqM5@?pDRuX`uw-ll8Fm~%-f1Z7Z)$@pxMoBeJ0dxil|hw6g#2JeiF z6&Xq9hV?%0D{A;q&CfR5t9NQGBIl7l5R^gbk};>q9&P|K4A?mlCuvb}$G44Ftj%qC zZA_bx!HHYa`WGA3$0QN?BPo@j3`#K>zpkbG+0!Yc$Bj|!Y7vVb ztFHxQjwr4eyETc(`J^<0GAPAld{6RVMl1u!Fu*r%=K;sy<28}N+iJDL%d&l+8b|o>UG#g!K6^XuyQ$)%l zD1%Z=#?SpZT>@r-3+u=G88R4fPCmoJQ`mCv6VYs!$+@)-aVmP*(aqxw&T6dJ5xInvLr?~#OP01% zcXuCx6Z_P)$Op35X}$ERSKGJ#sy6Jdm-{?QXWAWlwvZz_cYo0c0G_`z=Y(8 z%ieXDEHCdq<<>B@gJVnaK>jK^*+{SUOjktyLi$Kh2Bnyc&zv0JrVg(zVZf*7V&d9X z{)$cxyC(1TRhRR4Uf-l!yV-|6!x?=yq>PkLPzI%#j328$ZX1D5y~2PnuDByK2fgTw zxw*4-6mxvP#j#1+a=Lh%gw>x$##~M+ASi=UOvd+~A6!qvXP97sO2|V8ZjY54)mNH- z_D)kd|IAdTXq4$Giq(Xu+C|xpUv2wFYAj5!1ofba7Ili$AFCTokwPG$C#l*$@m+_IzBoTgb*jcYv=%-9&B&ncb0F z_~9=Ne#3)-$kMJN6%mv{>5`>g>Dy8UWEdc3ot4Hm-d>-&dg$20M}n>wgv9k5E@<4T zI(iMAA*&`86O=(ICgbN}4)(I}873HT_Pd<;PN{*`$ctw@4ive|&onG6Y8krx%I)|| z4P?wUq!NNMD8*!a@7XIQEDK~9;4tXQxFVSGJLRPwcX5)$#zXf@Dpz+{9e@0tite{- zNu>m3P>RX;6DFe0164qV0jIp>AM7_u^ORUy+`&Ly*rdMTYCH3Az}eW07du$ZB9292b0f-)#wvb5i^mcw_w zV8AJ5lPzL({YMQFQ{8@9gebltw+nrHwUhC{N_}+OZX#6>ltC#bTm8AByCB$D4P+Ql z!^d7$DVW?+kp%NQpn#Hw`mKFGRA3#3yUOe z>G1S1jj!-1HC|ct8cwFG5Qiplty!C}4a@TpfAU>_q#^E~{nd=_ad*GlT%u-~%J z-X5hDY`nfoq4fL zY#5!P=_EA}ltJl|r5&i-0N)va0TM#46Vd4rtM{u5c`Uk}AtmMga@b(L{PIaVMmJ>4 zU8F{WGAPAld?wEB**x5D!+>@dOXs#AE;%~w#JY6PO0l@}D{ryt`3u%+v7l!L-J~Xh zGAPAld{6S`a6B!LVZi6JBekpzTUbsx^zuJS`nrc>~e~VL(CnDjWI9j{Ty1`;B#DWZXZWes|(V2Z>fS(}#-4-$^Y5Wl)OA_#B{< z)u9hSh5@%3P8(G}VG>=peeQb4k-n{$9P1;hYcxj=!jTJ;>7TBefBfK`FkTu|Ibtx4p3#{JA3- zu=&Z8u5LMAzUBTGRrCYXn;w{lX>n6lKemrQ^Bs}nv zJ)#5u{pbH3`&>$ z$V@%D@_-BjSWi4UQ`qoQKt**%BlqJ|=`ZY`9HWi|o)tYOWQ@o^N#6*{pcLQKv863V z3R(wb7_e%vX|K{Zk*Ry_bJvU#TfcRX=w4Dyu9!R*Y@b8qVbXVkGAPAl{K4Ojrj#Tg z!+_FT8b11EY)W?5Zv2wrOa?p=4>ZMX?4%1*>Y|!ISDQd@%8!zI3Cf^!$!Y5*TktR-(rxp~b%n}XD(^AQDE=1H zGtHQMppu@)&lGs=4kAyI1_{cb6qE4_`71$8@I(OySQ~g+h}Go`7+6~Eo=UfPHQaZ4 zr;)dx!>^O$=w##+=?6g>lwvY|A@8oUxC+QH;2CFx_WH~6o=nn#pCc8tcIGM>u~k05 zus(P30s7d%G--&S3`#K>zvQT|IuAdR2m=Zd7A9q1KKyloZ*b!=ORk%RD;A!xIw(tt z(u9s8OM8a&lb{SrF&Td)$WI_I4#+UzYqUVirRNrpIBv_HzEbSuGnIb0d9k59Sj#>2 zAR_-F4HJ|>DJJ75{#C5@4}c5<)--EQD!zG>duzt~`Bjcx{qh}$IjK^qVQiX18HhYf z8X+iy(k1KJk6Hc{kYRwlYlS%7b9x>_vQY5ux@}+l4APn{U2mF3aVvg7Bp?3lGVD?ESHP@JSB-f@bH^7Nl*r*n2cX$mEV?z$7d{HmGcFP zQqNB{q4JlBRxPRXZ&VMgWz&9B&h!qw`Tv+aMNkH%n2i6bl=O?>1s)8TFWAUoEX#EK zd7JZ!<)5w?HeE|Ra{FmM2itwWx5#?7CQlQTK`AEVH%K?EPRIo^46uIcZt8uvGI2Gf zlA}LR;Cf=zedXuJKYna{xePs4*^p-l%AgdJ@naP~cf~Ov!+@gaAGe4qh^KCFzj$b; zGWU^9jTJ}pKd^V{985(YzqKX*A}E7We9YMCe(0})Bj9u&1~l`1iX3+;OQwr3_)ICK zZMw#AbDvCP(W4Hw3t`C8wj?&gbZXY!4tKhZJi z&mU~>eui@Hq<8@HyscEU3^ICq^88=<|9rK9uW|e#Dwzy@IOPrlf*aPJVJ{TiTYc`h zX0~ZOuPWDN8X2w6PsMmwenMmi@&Z8_lwxZfe{gKJc_%yzg#m{8tQB&1IX5b4uG_R2 ztx!0xmw6|`$MMHEhhlUZ!;!p5PzI%#Ec55@2zQA?3@DRIFi#Ht%@~*Tef*A& z#T3)36?JDT+l=j~chJcMC-U#V%KvR!Tm09`Ui@cUYi;860X(cQy6F+JYT=bBP0HfW zmD_c<3|JYQI1;6neC+W1UC1JTN`B1ww>NnI8^7=`e&HX!W?cpNAk;EgQD`}5wJs-- zr7S=qLxsJ~{lVD#c`s{Ta@o{pbn@MqY)zP-{}(_1&!+BezAF~2c383BZtbGLOTK;W zUc$4hciw&7Cv}0OyWJ|(!T(wyGI|%X&0qQdeN)GGZmngHbbyQnjJ%w8a{fT?)v3JG z?T*XiJIVpQYCc`hQyj|u5!sb&OHc--*anCH$Rs3#Wq=F=4yy?BT>7M(UF2uEV?ypc zzxT_VncwJlc=3No&_-l8vK>JglrH&^jTd~{0c04!v-$@U&*;XE7nc(DJl)X|)~WH@ ziDP=`&Sy1&Tts#!KOrcCQcT95NL?G53FrP{fEw>|+qBo+1Q`N>S0nGxh-{f zlYfKM6#5+*53)T$8I)o&e#$Vs=nUWFfC0t@H!Wv%sfW%RYIV=BvD~N6m}?mwiGCg4 z&4WI2?n!naD1%Z=#xFi*jt#@_Lcsueku967bx29??p`&jNubrQ@B5MH9kynFw$fZR zvb4R(js#^;iplt?Aw#=v1dw6Ci(7GF0nAn}-`k%39Q<40;kyQ|w@HR$20}(N=md&4 z*@>VGN--IK_pf#&EC9$b;IX3ky@|uTG6#oF8X5JyrGF_Va#TGsXHj44O$9P$AM#Uz zGAPAl`~*!xUJ_2x!GP0-p-Cy{rPjSU)b!Q-drqHEbD)h80fb2m~2BnycPu)D!9{CDn7;ySZ?aJM2xH1x| zNU~wiF5Q1~%uAuOLSmDO^k42@29iAq%AgdJ@u?feiFWwpF$~z`e7wP4j*`0G^2Xw2 z{@}D7!^WS~gw^LXEg~9`rTv2JMNkH%n2bMzadW#Jyh9EHG`krmM5p+&R0pSB+>B$4 zyoGMO*J)gJnsK`~`lX^EvNu5)lwvae42A>^-6)V@0QsPvk&??}6T{+OMU%bqa;-j7 z&t`g?&$|mAK;LT(Ci@VSLFtmE{adnk4Ul1gn#*3z(nq%+ZwR%y(b$kK-T3UP^MI7p z;~$*|(Cdy6@-u=mD8*#_84TKrH294q7+@i@ZFa2y$78q3!BhMXzGz43C3tzzrY5=G zT&0Vw=TNdQK^c@T8FP!73!JWk0o_dN-zq3x&6V#-tCg;n99%yaoswM2#%6COh2Gf- zBl{7QK`AEV$7hkRHEBSG0j+KV35DY?YOe5$k@9zGNUwEg5qvfLg@&zxADy}hCqE}B zgHlY!CkJ&)_bLJz1}t6)$hmR%l&7IdLTbZRkH!t6Jg&cICy%P-`9~m2JA&*_PzI%# zj2|V9ylCNE!J^X%T2O>w20}0BY6qE5i$>C~a zQ6R$rkEGt14_0er8vKP1x;=~!k3Pm4sV~Lj!&_O_jmXjD7X)Qcipls*9qz6R@OeQP z5aMOAc9+M+S=p6qHw8!=*{yATcP2qcl(AGnPaKh7l7k4!pmfQ4Zk;&q3S=0dyx`{4 z=Q%ACw%{0>wcC`&Rzrp4)RJc^(kqC5#X5!@Oi%`;n2eu$a%@;j0Wu6o`;_h>wy%`+ z{;^#`Uus6?6Q`X!g4=i9HyAp~VmiplsERyTVK9-m>rrcBk*y*zgteChf( z^PYJ=xq_#Ckm9E%vzTDG4_Vr=;ja-L_|%Yp z?&c>N!QQAOznnEY*pDJ|965}j3`&=*=a*l8cmf#))Lt7toxRZeu7M_oA*iT!Q}B8I zed6V0=fm539wPEtf+^WH7*pqwQT|*fr7X(T_P!m}reTUaOX>S5v=uSkeZ4f-Qj@ zO;84IvixRS)-;rYo%AgdJ z@qJj=#~1L54h9H>IQVQF6S!`W)KlEO>77uV;rjcfj6ax_b~vC5?(q+|_}% z(6E4yYxgba$#|v68+PANy>_l-S@@+9(Xyx~#w*cVrOD(tf-)$@WPHC}ROBuMWEfD$ z%PAdo#rEv&(5RKdeVG;G3vrJvpS+j4xIDlJSvn z%ViwCluPnbVNY&e)hbo{hCM)h;A-E33nG6Yzac1t(j`mVnMP&>kYT{$i4Jjw^looa z9aeHVAU*k)Q4&AmJ^JmYuitihn=Gm+h_e8`xy z$VmicP>RX;m&cmkXSo9z1{@u8)8>;XS!ob>%JJSYr;mFM&fPmA{mOUH`Sof<&L+Pn zD1%Z=#?QB#9;c`S83r8V=weKA;96}krY+gdIoi^)pLMLOj^aCby%xR3$RQ^altC#b z<425TwFc!-1_Alg6Iow(Oq@AwQ7trZ!f5~e#~k}w-woGCrYrQ3G3Sy~2+E)olkt7n zIgg|8{aF}bExG$1>$hVwo$KSbHhMpglkuo|xVv}ryOke~*&=ct`2#^2lr98TD#;oXH&DUBj}s2$4UMQwhqT6qE5^6>s(pGLT`w zkFP@N7d0auWxvRldiL6MV?B5U1UTYuF+YxsaSePzI%#jGu>T4cfxDCt$$woRs)DmE9pe z%Vz1>y73sl_}k}XA)lr?h+^O zskv&swRbj?K^*<=K`}X-pbSbe8K3JlWVtm2WEkLE%Gu^z+OcI|zo(;0y8l{*_^O&j zmlHqbwy?cM##}ojoCG3& zA%7$&gVH5SJ4gMQ0gz$9CjrL~oMrTG=LSYY8DbbB`s?q;jaX-CogZauK;$xVK0z6j zVlw`#%JuAoUzvabRkzI7d{K*d969toC80~%kbj7dzNYX!{V@5}X+$n37Z8*|DL&?Z zzP|N)Tg652^(|Py&^z_)a=1X{cCF?<-kr7L>$uezCsISx(kQww5xRn0_*WW#*kQ@n zw-}d!|Is|k=44pL$VkUX^MC*KPeV?68x=Ibl!dIlpR&HH&>As~jLMo1C%+j;&g7Uq zF;5e{UKnYpB!Bt;Z}{^Cunf+Be*p~s3wyww-NPB~%VCMxuOvpXHTH_fs!d+R?h~5g zc;`GriaYnHqHsM1fpw- z--JoobIohLTtMV%awS0-lw#{1KSS#2S|tx;7_gx{nUc?4|B`n|vR^5EJH6+(U{~Rn zqs+|7u^Nb6L#`qygVH72`yG$f@HY+vrX*v6M!FcpZ@RivKKN0;KJVUO6|?W0lbPas z^pWdYay3C2l;Ybvww}4(C?5ne3^?!CN3rBSbA+bOnS8j&*HeOX&P_(Wl)OA_&u!sX?kWrh5@R)Z$;%j*2XKSZ%KG`xA*q@@?5$Wi~hs8^Q~=&Tu-hg zD1%Z=#^;MJX$ZnQ3^2ec(niQ!HFSFSAi@#3)@JpCn$qbOvcZU zzWp#Y0Wu6=b9GaX)lfEIxheTWtx}_Q$f9|(^R0Euc)z|v=Zl)i4FqLSipluCJdbYb z9FSqaUiUjTd_ga+yPsw#Iq$#`a#Bm+On-y;i+P>%tjN-CCN~n4LFtmE9j3~21jsNT zyvSeH%vVFtP*?wi3}f{&_b_&c3f)el@<&@`5xIrjL{J8$n2etx)m1lN1~LrT`|5j@ zvek6S$BO-@h0G7-u;1I=m$#$i1GOuI3z5H)n+eLG6qE5Yq`|t7QXs} z=QAVu(_&+4vRj77&+Yd^XwkI3!hR)R7pU9z;}w?%DLQm*Iae{@EQPrT#v z&X2Wp*D2^Zcn7(SpbSbe8UIzqoI3`O&oF>)CAB%``dX`!yI%g19M%zGzLq_OG&Hrd zQugY|m^;bs1Z7aVWXvba4#U|w7$9Gg@0{Z!US%Em@PUVMEPL2fm5IrH^~Y%r_UuOF zE^-G!8I)o&esAMQQJ^c3VZiyPdmJZUg8yBfH6+ z1Z7Z)$@rOpoHQ$ZZ3G7F*UL+?q4mFLQB_ncwc$~e!uY46$VTH%zEY18WX#{lT?A!N zx@2iLp1-jj$S~lHip|@Nb`g)(_aAklsz1_GYa?r?~_o`(rB7Y}$6O=(ICgTtF zD}SMZzbY8;!TP>)!srIsV^x-ix#d$Tgw9XjJtr`It@S!H`qXa^`5QqQlwvY|lx(-` zZv`?8=uWy~k=1{W>4^r(;iiKtclu|YH6?c0i&mTCLy$4|lD`v_K`AEVM@gv$iZ+m8 zfMY>KUY5&Mao!Uea<>#^m5;vN;QeV+{_Xqew{IbGAGwF13`#K>-?m>{h3Eho2C!Dz zIFda-`G~rk1xB`4q={>goN~@B?-JVav(>ipQP6&$1H?jxnVp3m8Rz<5PsjY&XQBUC=S!w^=`wChib575 z50i%o%AgdJ)&I=eM0lNrch6veN1KvMP~=$53x>}&=HjJ_L;Fn}yUe8fHu`Cydy*0I zPl7Ti#bo^Z5A&Oyn1Kufw4}~a-L?res%Wf|+p|?pFo0*lZ!CuYK1Za1C^F_z@-RUe zlw$JcKVx?F|MDHkFkpI}Et~kxsOphScTwi!ab;N{M;RY!IqrFooHvEYW8@KnGAPAl ze80^SMLq#!7!YASmV2T>b418IUB@wdj(*RM6}MbfudE6??x={!HUn`@+4z-+JV72KD1%Z=#^2HKj4+2! zV#0vLhHs>Z4`zNBMI1J~epbJs$a>u%v)f2Wp!2D9h&)LiCn$r`B}KVq2O(RKzh3@{(kYLM=rS3mVp zs%dQb=qDawf$i@5*IIaN8O=cC8S)fC8I)qO$e&-;lipc)#DD>t#j5>W%f9L;McfVk zCMz^x`k}6R#BJVb>cPxrME*sdCMbhaOvaBGN0+5I02vGLd$s9`fx%TS=Z_k8Q#z-A zoPQ8tIw4Wk-S{XSk!Q&>1Z7Z)$>8^lz2eA~8SM*Ryn+GGCsMZ-TY6sQd+AjlEvu<~ zW%OR^x0$*xO=A=nM4lu6`m2mjh0^`ydoDAa|L=P)Gn^RTQ6;bs@UY^{{awt~dpg;C zn7#+9#PlC2ncKM2NuTPK;tc;-@+PnHf740Iz3Z#o+~pO^#m-L()D< zXVY%>YRr%h?j1jHlK&9homPZjAkY1k|Iag^_!`IeX7X-4SwQ9j0Y>k`exyyMt(9qY zxfwqFy6#;nc}V2mMkWl)N*acsBpwq-&dGBoYm<)pvSxNV3^3=); zLrV2#b7|$b&{P3F)t0}OL!aZ8aekSZuG}ph(T)bz zXDv-ve37MXLwU^gxA!Xl^A-k5F&V!jZz#&c3S=1YE#PWKw*Es_)+1)F^J`O9Hx#m7 zKE@E)l%ccZ2_oB4tO?4X6qE6pF_*5QG9beM`6zR?rzw{d{ny-UZCLK0lPlNv_~LJ0 zl`GoVTY@qu#pJtxjvaG-%Tj?11IjM?Ob!O8<~Y#@uFwpmfA-`4MlIQx47tZob0UzX zZBMZyD1%Z=#?Q#E9lLxS$S^>)H+TJy6VK)v=6iCEZ(49(wtn^97abvfFZT(f&wMyg zo)DBlDJJ9R2s@71UH~!-FuORSILJMiXE-SSJ!NK#T#nQOwj%=u7y84^hLJHlQtS!J zpcIqw_c#}GwI2W(26UK`Jzl0-tg#RuxS7v-%B?$N2Yr8~0R7liRWU?%qBszgLFp1X zMR!>nkYT`Pu3=L#kJWc{r&3zvx31N3R2^AAxjUiKU?Cj6o_$JjBq)PYOvYbLkFMCB z31k>BPfBtb%O#gA&pE3YziTeLQI0L6%lFw;l~`o~WX#SKCxS94#bo@9EdKE}w0*_U?vcV+CKjPG~UPeh-9b)&ctltC#b^5eaQl5jL#Jzt$3BXj(I-G$fzK ztIoFy8M8OVlb{SrF&Tef_Y)}={;FWW_UaqGBT4hAOjpd6vvfkn{o>DhT(3HqwXNz9 z`rMii#fzW}N|%f|*7Kk}kYRu#DgNzqxo*=xm@b z#pkc`f4k1{=K6ovIo@2@Nw@ObNlP$rSdkUN$e$I19tYb5uED~ajkKc@sn?|~)SlYu1giIk zGob^2PVprugHn8vV`JtoXSe}m7@#NlYC(U-G*`o#-l-zV;Jf9njiNKU_Ke-}U*8}r z+n?e`PzI$-HZ%T22{=gw1JvH>m(N7m*|0rx9KL*W;o*;f#~(gyP+5OSKxqb%11Qf4 z%AgdJ@k3=+*TicmgMjH8H}qcw;eNbx5qgHlY! zr@NgDmEiOk45&;Xuh`2|SoKiI#OH}q{M60St~Ksm>*7Lm=0y3`#K>KWuc9 znc*waFyQo)VQ0M^^jExeg^GM-;W6*q1w2L?}^V}5mH)mij&9-)+Af-)$@Wc*}IaeoRgkYRxSo=xAgl-w_e z%q0p7DY;C_1P4V&bDWnr?N)%^rVOKm5R^gbk}=ah$e{x=3?Q%UVyg{lyV(`Y(R|6- zTX`f-)$@WcEN0x}HfO?xHpY-D*%UP4##;l%=)L&ZhrXI7mk zE2H?Kuc<^)A_&T$6qE5^)lET7cp3-;jOga5(h-o`;vO>!7a-$55gP%AgdJ z@!Qckf=)C*h5@6S*Yoi0%@mgjd%rhd>Cr89hl1kfXVIBD429^V^DD|rf-)#wvb6o( zHpACcU;yn7=Y*Qf;rY(qAHR#&GwWnA%}rGATm0y(s<{){wqq$V1Z7Z)$@t#W#a4tF z$S|NP$IRr&%mUpAZ^KM$@VQOfiq?qk8V-)94?3iT$Z?cc1Z7Z)$@uZPolfB=kYT{4 zM;nAc1Z%pBw^?n^cMW-5?Up*v9hrFWV)(k-i2RxoOHc--n2hhkV(IKI0~rQ%cSPis zy*zQ_$Y;8cxDmtdN&C3Rtf%*!Z(6TFi^y*%aRg;hx@0|DAKWSnWEfy1HSqJxE0Nq? z%@fDD?BZFSjkxzam>nYrY9*i-(QhfQ3Cf@plkur$)9Du)@ZD}@ zjh4o-Pg}#S`CQD5tLJy3zvp<$8-g+@#bj`^5WCROZG15WE;L}k@l{L%fk!irWPW|I z=Xk7j8N;V}y-N}F#$vaFb|FhUf%5jR@_)O~c=N9djW=BXT<3%*vj84e#K~I*+3PQC zboFg);^(k==B`(6YH_#2b*m7c1;Qs%;tBJw|HZ%l=Q<~_Rsp{H_5f6Ldfv-a&VO*| zent7_4r77c`%3lHhZ|LLrh~@N2glw~68_5n=j$ANjpL6p4RdY_12PQA-w>T+8F0q( z_PZF%W3N2KSY3zI9XhPL!%0l1k%1>s5(&zn6kp@mb&kQo>peh*0r5gKhfkTUPY-%j zM}4lz!un|1evef5bbpae=wn2FPkBdB2Bk~3^3v~<@Bu*>urgPz+GmPoPOpsS?$fl- zGLsqy)Kkk0m1(Z)m?3g9C5fO6N--HfJNdqR5I%qa13IKDbmu_@~6?hZN<9| zCYL_wis5x=$Tdae6v}&oGAPAl{4BJ)ehd6mBMitCxhq<4{`SYgqj{n9WBk#HHTJB! zCXAkHD>>gI@&`&XK^c@{GJd=an|6cWp@ji{E&6-jm!JKvxnnAT!)(L(vpM&BdS<`e zW7nuUgvhCs6oN7+#bkWjra$vM1IRESen6H_F6g|JUDmm=tqu2`^)^bkPcQ}DvAzWU z429YIxL-qBDhHf$Px;yjJU2?Cd>|-;(k1Iz&VR2NkYT_O&6x{(M>`LlU>(jhLCF=taU z3Cf@plko?~grkGuOA9byGiXz1d==#9vElpKOGC|$Cie_90j z0T~8#Ka^5-tBnH6M}jga#bo??cC&gJ{O}eGxZcC^ zdxKV-oE)2IiW)~8L*ye$>d}*~_Y|VfJ0k13kdjYO2Bk~Z^BRRdxNXCLRbxSnM~jd5 zdF#Jizgym8oUOJ{fze7i>Q`tP`q_m~lmdb>D8*!azx~>nYZ}Nfz?4$gSHq`P!ItB- z<*C-CSJoY;qxJGech>Exb4A8nL@6XFgHlY!UrzBBd$Y}3(Q zzO4KXCDSn;kxMDX1Z7Z)$@tk&(YG3S1_%Q#Ms2!yebzzf{@v8P@|?3OsxM~uo31_A zYh$N?epK!=rG%giN--JV!ulH3xPc4m?u#KD7 znP)2w_>a~H$KBxXx~*v}{%IBZC&(%(wS@WnfARVMT-nOX%twOd4lB-mUQf+#ES&sY zFw(YDVD+)phpKGnqE5B^r1E(ZzD0~vD-rGcOfN--JVm#b~k>02piy(?hy5hB-88VSmv6qE6Nd41@S4?ub{`BoD0DI>Wj^ff z+_SSx7rKx!*HfAa%AgdJ@qM`({g-(l!+;lv3ef1jX zQKXUbm7oktF&RHMOWE!KUzLFYr=tA5Ppxq{X()2z5`TV97AaKhrcThB=qzhxRbzR@a*dsordE$5@>fa+K^c@{GCm*sk+ZlJ z95!IU*d68AQd`$&7aMuy^XfJ)3eujD`K0@8-Jw<4mk_y?()m~Uza6zY{&m>s_~)oq zlS7-Nnt#T8S+kUMu#w+N?ZWBH9pxH+0BFwk{i*NtusC8D= z3hu#SMa@x#MQ6GnCf7ZOtWKIsc1S9V@3Hbth~i%0AD%Cgjv<=#R0MoLn>WY7B8LH{ z3{1t+nMtSBD=s|Jy*<9ioO<-Tn5bhl4~ZF_744*a|Ev7p*7&!7E%I;wtnr?Jv=qR@ z3Ww0ly43LH%-@(J+vSb7-;zuzrQeb}sQTj374(K)7o~?V-~BJX`=2$wX5F@8u-suq z&225Gwktj0Ie+%Zwaw9YMA8L0_R9Au&QUDL$a3$d^!}Cq-`6;Pkgeuw z{`-jDs`mSWXSH+BCk~!$esONS`r04mZwnCl8>Nq+3`&>mT{3@p83P#xjLL0GR5g8R zve)&}?dujNX&+SP>6)C=I?S@Q{T(8Ir}Pt)K`AEV$861H5qQ@D2F&-;Hq2#N5AgEY zFc>*mnm;j_%DS4~?2*OoW{Ai=lmUV=D8*#_$Qd@|@C(Q=K*F)5$5%?RyZDuoz5k|U z=~qu;OzYeyvjU~}qjSQ<F?rD8*#_v?&O2>h$1Fkm_3)*pSbipmwccdkAqka>BA*8M{~ z^Ra2e8*k97-G0gtK^c@TS=!Yi!XzNWfV-xn<#wxio*H(h?N&Osy&+?;o+quvbNWlj zS7Bt#1C*ZxWl)OA_-u3}lc^SvVSs{^?vom?Lvv=c?feg}XXhGK>O5}I(9(ZZ#!Euv zLCP>e8I)o&J{#Svf3X3`Fu=Ij{`yYK@MT$B6MVy}u4u0PUKvI6a?j!Bad-4Zp&yhH zf-)$@Wc+-zwbuVPkYPZYsP6N#9a<_(EVg@R)-@_@_jy7N7MU&CMf%H(-4JDzpbSbe z89z6ZEloQMWEha1^Wqnyc+|&6IkFP3-F>%A$s6O1R==6XL>}BjmiAA|7(p47V)E@j z^P{S}#MS~C1{{s}B62vefo!AL!hKPh*@)Nm!M?HO@9!Il7Nd7shAHC&Wl*|gJ!{@8 zgMS7B21NfhD4X%7*>9h$f0<4|@vxMo%4H4*=H=J->02XX9-&MSltJl|rTy{1&ow}X z0gEXdtb-5ZsrTNGU0v?^#J$3Ga8T;?B;!q%$!*_-VKj%gD zmB@+|KimD4C3w##MuByqb0-mbf-*x;2BnyczgU~al>Zjy%wp4Itjz*w&mnJE{2+E)olkuaZhZ;=|kYPaFK)Afr`UbxJ32ah(zR(|~ zOm1US*)<|p!6$;w%uZ2e3Cf@plkq)?O50UkAj1HEbK@)FCj%TFYMgq1J<@`XmK4nI|ZNQcTA0&EHsO2|qIe1ES?DtIL-a8+_+s%pR$T3oCv#q_bY=xRI-) zt`RckUz7!cGAPC5TYvVRj-Q3#=O9*anL#DD?! z49fl7)28jN(AS@T#_lD(e!D|jkc2yfexW}44Dw^D9YGnCVlsYplRT5^31k?sdUiuz zjZ$w_Qu>;ujaDpS z-QQ`&D^-Uz2Kobow5Ju2G22od2+E)olkxl1&-aYM>na$~*0TAmdu$(Hm*_TuJ1?C| z48lz(3UB?GT;*N*3X$!ojs#^;ipluzdE+v6_lv=6W3lZ|mqz3#R40NmD8*#_h!J)xARWjsfUj70qOpCT{><|ACT2LYE1id=Uk<3+Vn?l(Zs9`Yxmr1JRnoW zvpm-T8M6b`nV<|xF&Y0={d6V6J9sc)IQ^>px6g*ZtiDLFR@oGeRNl3_@ca-%=(@zB zHHhp;bs;E&QcT8QEe&cm)dVsOShItfTWV;{)*Ef!Q3@+O94k~waeH|_48$hiLa*qY zsICNMP`YHy**ksVJR}TQXyS0;Jr-|KBrPKt7H(BlrY4bi?U$$&uWBv2_k2oqBPfGX zOvblu+sjc*K!yQUJy-4cgjUP#Y7Xc6UEY3=^5u!Voz1Mip`Ri846-xTouCX#F&Y0P zfWeQQ-av){<;LHh)=PhJzBG3IXTEl||G4Y zL=gt8`s(N=^2&!>gzwON`#M#Pz{MwLjK>d2rJEacBC;FRi=YfjF&W=`?w=`uvlTGl z67P`3?uTW)1zl0`oc2+YYrdFjvt^y7-3n+@05S}4Yx?GV zMYl)eYg&ay+sakZF?&#b2+E)olkpF2ym_R56UZ=tw<~!{Wp!wX1i9 zjBodKK|h1=jOtHN2Bk~n(=|MqK!yP%e*SzrzFkc~}--Hg}lQ-OS!tZ<>zI zO8HU)2+E)olkraiL|Re|fD8jJ%GX?G^O;T9t`=iXzMy+z(5iWzwb)>@#SVq-$kO(s z1`?D(DJJ84&$j4N_}(%M=yd$4eJ+vq!Q#i{17&v{@*Ak;zW(8hHZMPFqR){%r@kO4 zgVH7Py9EKbg~5Pi{-gPiC+2R?4hOrdm>0FY(fCzaC|A@cIIA#(jM<+WL{J8$n2diC z;8<)f{OAM>c$ZN_EucO)xO~N<`xk9hx+hMwXmZDE#rjQ%Nr0BSHn8I&#=a~N9= zeDe+lFta~+#<{|+>D(K85Bb^VUBb_@57cWundz|AL_c2>NDUzE3Aqm%d` zB!AAU+x?7|wXtv7?D-q>Q{~;==-X5A$WW*n^KaK5shP;q4x>g9ltC#bOQ$e1IjFA2(^6qE6*8}o=^VIac*7NIGPt3ETk#v;6~1Ru`}{h%Q=ctFxl z=)|oz+=v`WjUgz5QcT8=&w?pI@H+%BfM&rrXfU0ALgD@E6-R?@3(b$Wm@OQtW9NF! z#Ei&M)K>&$P>RX;JW1S#6geQnfO~ht7g$RW#f5v>kPtqR9FyQ#Zh`y0WYv?P}I$=@$92a0*v%%kt{zM{S%D1%Z= zR{0~-m%b_kG7QMhor*of7<_SSZIuCc?-{OkaYw(ab{Rg8d+wv>+p*NQ1Z7aVWNCk# zE}{oA3<&W^yiH%O{q%Bq^M*rr3nOz&9GshS3r)7WttKN|SR6H;pbSbe8Gk~LVI~27 zR|E!pTrthm8MB2W?YzJ)+KBvynn+LvrI`GGuO$z-NB|iI-0kN& z)@b3HZCsNqpfmMb@EHC2@9%mH$DOl-(dS9tQr{7jLFtk)8>|qE1@bKrutrezTS)rt z+Ef~Aku}Gk98UDM+Fimpws}aYs}>n^JT-}+3`#K>-+LZhJu3-h7~tbCdOpGCVu^>J zV8XJjOuf^u$F>V>&H7H&%n?K61nPT&GAPAld{0ssw+(({0|s9p|LC+dVsM?Mp)UVTsgaf-)$@WPF}Pei;{>--ZG4&I2^}esTZ2P{`W%WTWjN zg(KVQij73{wAor^kTJibrVx}t>5`>wnZ^a@Jz>Dtm5v8q#OA8yyFXFgVzSe9)!RT) z$}-N;j{9@y7Z8%D9|+2z6qE5^l@Hgl(?Es+9}6#+TQ@DZ+2vF{+@#WWC$^7cjVq1T zITJsIFl5Z{si_2IP>RX;p5(o^dlHagK=X!8n+9~0Iae~~ZH?IWg^juAt?SLEvx4{L zc+m6hWNI2g8I)o&zJ(o-U*ia57;r;A{_+Q=%Ur4P7j@T_FdcN!>m8a~Uunb1zXd&3 zrBKre%AgdJ@iV|kmf&qbh5;?lk6nssVXyK|?Ay~5>G6hNkg=YVpRQ$N?MZ)RX@8(* z5R^eFCgWE()Z|^kK!yRQBH0srJTEEzC|Rq!YJt2UqRLW|s}taSSV%}2kyELe1Z7Z) z$@mr~&?M~wWEjBR6!yVrfoJ*58!`E7K7};%WNS^2y;k1uY}3)BWEwS#pbScvtY@+Q z0r(ax4EWB$CAjeZ!?pzdww}!mLmL~fdABa=C9E$}m_XkpO{ZoPltJl|^}MJ`+W}-4 z!1&|pgTm@PEKm5V?igfz9Zj4I;fhwU7G|LdLGKP`P;&^%pcIqwKe1+if*C$R4+F|4 zpMJ1S8Wu3{E{u`?S=cp@H>6s`p?W+_@YHc+J!evL3Cf^!$oR0g z{nM7oqMmMla#8YXmomw9hmNjHzwD04S=2m&GAPAld{0t$O(q`5Fu?B82kuOJp^m-7 z?x9x2FN+_l)pRDE-M(XjQ4jsfL^ky!K^c@{GJZE}Pf!M>n2gW3-t$)92xJ)W>+PEqTikJL{DWl)OA_;Y~v%aX-_3BnRe8V zmrVtc^QlDyWl)OA+x}c*aPHpN24omOd%|HLU6ijq%iu~$fQ+oz+hv>5MU-vxKh4^q zFE@h4)qg83y?IbZXqbBcMFA2ug`t|u5B!+`0tFpL29zEehj=C0RtvV52c^JK6Cs* z*ZMN!C;WVs!xFBPDJ{*}KVmA#+Wtl=`$zeIy_r$E{@)*AEnSbjI;$0X_zqZbSYZ|x z_J|Y~sut=TykCsYGP(_p%7epcGr>_z-0CNZ4y2!vNOj4}RTv z%AVKne&F-TG>a!L-&?=ds7EGM-wdom4K^c_dKQ^|u>Gv7rgVJSd+r{q@3CJ)&WaX<(#o3P@oKWQbKEh=Dt?uUV znAFIPAU>N!bi!06sg|G&N--HfNf%0MjRF}4l+$i85#w&3cfYqjZ9|}nbX22zgVXO* zr(^3R(MzH#QXN4VlwvY|i(@eOJrl?heJzRQM|S=$>V|$n{=OKQ{qOCTvaSmnQ&veJ%Unxp zASi>rEfi{nV<|xmo4*Aqdd5_ zVZgS}jQ87Gn={XvTy`~+)iONQ|H@^s9Od^1>3z1t$?F3~|ipltu!F*^I{Adjf*rcgpSwg3AX-0l{Pnyz= zr_=WyiR3EiU6Y@bLzlUg)Im@NrI?K0MBlYI=?i2SaN<1At+T#q(QBJ1TUS0GW{rGM z(Xw@Pe(fQe6X-;mHc}@+8I)o&KJ}8G$riqF1OtLC_Iqhhlc}o&6|!3cuGlED z-|v*#?^^V;51(M8g0(GM7$l{i2p&#URtyfv083F(dspGXZ{n zLXCOI@7zx6CMbhaOvaC%x?FvEK!yRUeht_R?dm^2z28HtqCrQ?P3ADqTjqN91`lrZ z!Fva(hoB5f@!$Cw%~KPlCn-I4<}{Wxuz+v%?xUWgS84aZ9cS!X*ebU&YKT2T^?l@M zF+KY3eJ82+A8Gu0fAxQSF=n+m4fxLrs}?Gaz)l)*aa!>e|M73^O4Nnl2Tn18H72`1 zGpZh#p`*>tjW?l_@-;c5k{ql#wtrgh0Xj9Xi{!&b>_v%Y>c7?iK8Z=((?S!-FyQps z?RpCg4r;Ho*`KXYS9f@<`ApZWf^V+nkNP6=t9O%p3Cf@pTlZ)Feuh@BDfu5UH-AKJo_;Q56HA3jgn6I;Sb85e zY=dj)C`bZ7JFMsx(X&j>I(mP#YV966O*b~X+D)SuuS|@7KljET;eV3e{3HM0Z!||K z|969n#&$Vr8~$dR_cSyx;e@1-!`K09#y0cg*R57Ov|+a2qS~;v_VPs5P4wC2FH*q& zwCQ)RlwWyvYSy!nl!F}2-hSNj!inw~v zmih<^>0sIK4)j}A{iO2$*-HDjmFD)!znf^;-`xYh)W|k0gWvds0fnzGoNZw2co@p! z|8l@WVZVsq`|&>*@)^mQ6Km zmA0|3Bo1UQhOFH0NpjaqBcZoTgQUuTl>gU`S@9p6X2riYO~<}9+rWau3U02Rv~kmx z#PAFBV=k=p>n(Ce;#mF8OdGXzOCbwBM5_8n{=aXU|FPiMkGYcadi~^`q)<{Z(<*j5o|jkEDiP}fx4{J zDe{Z{uKwJlnG6n*bJSt}qhjc}cbHT|PzI&TXW1Ek%33*S~4VWgi5R^eFCWE~SyP{d+ zcAy5FHei5D&&wmvpPOF2GWkJOGOfGQdCo1!;FJP5gUBo)%REDB{YUwKoi4D+)mn6jwfEANALxMcJgNO3`Tu^>#5Z+(gX@{@ zlmjvhIPP;Q_Hh2{3#aTEHxJ)cs!>e2vv^72NbQ8VGkOQLKE>mBamT$ zd+NhLjw3TSzo&J|Y`Lp)a1E1g);7i?1Jd3vM-X|5)J0GRrI?JLKs;Iv{eTPu?&gar z{Oaj)tYiyI6PoGTd9A`Am7lk4z}B9f1(6?w54(sq6jKbcigxEYac$vp&RP>RX;2}G#<1)RkJ1L}4bUC1FP*RB0^ zT=bUbgWEF`x)(0)yFL88=sS8rXh-fPD1%Z=#^*Yg{$6DRWEil9bA*-UM-->SebP4O zX3gocHX5t9+iLbRJ_<%pT~Ek;1Z7Z)$@q}S1kGCw17zOwCDtnJ&`pT7+PZ&qj{n5m&3ip|-D~4t?M5F# zI*NTh6O=(ICgb}v zQ-}%t6X-Bt)MPYBWnbn6A=fGT4NY7AY*V-|HDY+Wo_B;E9cFVOj}VkW>9VzLJRITy zWEk);`TX-kiOmwZ<{Ae(T14~Q?+o-`nWk&*(|CuTt6q>t3Cf@plkqD9XNe7P&I}Bg zw@7>K*tK};pk!KTl#N=~$pt!2!D*IW2fg+6$QJgJ{D+_nN--HfZgYRWvjAimVELo2 z^Oe|1iuKH9ItxMB)-_FQqi;vwSox4w06lKIlE(7 zW&5*PjP#PYQ_s`&Ma)j41G%rrlLTc@iplsfOfJ|NzJCD&c3V<-M833h+J85@{FZb& zK4HJ7zRg|l@6JqG=pd>Gd5WM6N|*i4`%^E$Z{fiJnYpjBFZim3(;{`K5@X87w=TV} z(WGqrs&b=79a-C+`4F=izYaT>J?sT!7_cdUOC%-5RAqiBwSM%D{@C8n<0EdHc6B z$Ll`iIf61M#bkU78)~J8?-s!Txex4r9$jGx)zw?eS(GBH^{C|Fd+zmq{7PP8J;*Zq zlIID^pcIqwGsfGgC0!uHfP}F`Qy=fe4MKvNJIHxsYxhY7g|(j8!ZvnmH#&jiHF<%c z3`#K>e{T+aq;&_7VZh)+UXI)E?>>-Ry1{dx`FN-N{HpURYq~sHBy%<)%j`#9Bq)PY zOvbOnK9)rl02v0X#mF|8V3x+i?eU;jZyyTLP@^+ee))LL=GU^5|lwHCZGFz?P=;7 zrVeBn!173td}iD#>d}=;;|7V&oHvwimp_b3_(B!oLAUKdvK>JglwvaejkFW_xivtB z0h^K(G>*n_O1*1$>}$w)Qti@58}vAcmHs=jJwX|iVluuz>r!msU%7_?F~%Z& zZ$8;=q!=XZ@Tf- zZcfep7&mQ;=tq9^xpFAkk)RApF&RHgzGcgXv!-EyHIB3D znRSnP++(tcM%H#1*@>VGN--I~GLYNu)(d19kkuZqbwjvDqWvQ4t4{f8FC(Mu#jr5G zsmG)LFsU@0>`YJwrOSTjxHSSRfeZs=PibZTclXXlf3xxSYIH|hjllWWm_VlH*KqWc~q zN0MIf|~a5jl$NN>B!+ zn2aB{k4BT>ojVLT5*N3(;=X>qU;8!f1M4e!tTs8yg;j9RGOl$(-=~TuyAhN@DJJ9h zl6nC(aNC9f8L?H?DJAU=>{V*LR%?$lP=^N39O(=EdL@kueOUOB>`qVyrI?KGRnljD z;Fro^!0ozy12Qhfv@u=|T3;7j>31<^O0G^CW^%LWI)|+781gHEGALcPwzVrSQ{nFn z0z!)Ht#y?yb#9pJ^?DGLC0t~@`1))z>x)7W-z-FqC3_H*K`AEV=PJr%x)_jQK&AT8 z9V9j7NEY2&Yo+cVk*m`=t>1KvDOJ4DTMd!p$esjcP>RX;oxAG#-WVXmfVcsN={Mf_ z^G9#J$q(4LF@r0`QRY48wojLC4!uLBGGAPAl{J8BCOat#FVF1OAUdo)lp+#t7 z!#)WSm+>m`UZ$+|Ypkp8d>uvPc(OM^8I)o&eoc~pta>MqVSr7e^)ov9r*iiGc8&TY zuiNZ*)!mZ379iTZE{7hGKa+h3%AgdJ4gVfJrDe{**)1^O#$~O+?N0wf16`qv5)$i4(+P>RX;aob+ZA_2%S;GowKzl(TOk?$q`@>`}F1GHT8 zo9qI^1uQ!U(Gk}#RVr|1Pt3el>hF3j^X7-SQ7vFN^1rAYGzROeF_}uVubOYTZ-@->~ z;ppgN64{@i3`&>D(XQ7AfD8jtPd?eOt2S-HN2Z9h(0t>$M5~2=1v|5HdRh-U$s?Kk zhM){eF&V#?R68D12xJ(r#@qeq$$LvDGv-TbbA!&i+n7drI3Ii`SJV}Uj?ko#0|?5X z6qE6L$s-#A;TwA}U@ODGE%V^-HPh|Idm5RWWJ+BbPVU&D@GbT%{|)4KP9+BtltC#b z<3~@miSjfc!vKxesD90{XvZ!^??c!7b~sg-E)@0!ym_nHQnLb))5vcL%AgdJkNw@Y zog>mu!es^lUd+PVS!PVDFUfj$jelm`d3LQdNngKZL1v9I`c;s0@;ibuD8*#_%3xr~ z7JjQ12Gmi{^oH}-E2yx^(T;J&LF=hD1%Z=#*dzJ9?`5o zh5^kfH7;Yne})2BpiE`PKo}+dzf^cT|mj z=ctrD-nRFIK>}af4t6@*Us95X&iUBBL(k7y zI(~dkc<mz#P_VilAqs`ON zc}%psG&N3omlp_sK$ba=98ORMrI?H#NjT3lz%vF6$UC`RJfS$^_}tLz!O1EA`)6ij z^H{#y)9J04=0W7IBr_ zhziIb2+E)olkx9V?1`Ia0Wu624WpjiH+4wei(~b=_?*4k9DDnkaw>S&8Gj?AckYGc zD1tI5#bkVcPGshC2QmzJ%Sq>U^w3?a@$$1$!pVbeBKRX; z^Qu;{yUsv{0T(mPeFxqP2<0%Ep3OYfaoTau`#sG%l9T5jPNLVI#pD=*GAPAld~Ar` zwFHhP1Z7aVY;A8`>#qW27|>;KP+~^n zlKN+5?;cWoI)gw9yWB^PQ{*Q?T<8l;739wZWl)OA_`T$vgVXFlh5->10&Bl;=^7ta z`_-K`G+I{b+DU#NIxd>2%^-;^b0s-}pbSbe8NV9x-lDh>$S`1E%H+#>BfA|_u0am# z;%N^zzZA$gKEHq8>G#Kk5V?x{g`f;dm#uC2?er&r3gU;hq1GcWk@MxP<7#ou#c}KW zU9`|AWwqpFf-)$@Wc;{2vq7{4$S^=O)asQzuXAh6*7@h{4_qsAbJui>6tqgWxgFkx zEOQ+>g`f;dm;KJpb*t9`83x2!?p@htpR-xvPmQXZ)NZGs!`HiMv^VZIG0*vi$luAS z1Z7Z)$@rMTjTmcAAj1IGoDPxh`)!%!y}^BU8!uG3*lw1+dg6YQ$oI}mh+I!jBPfGX zOvaz~XNQ-=7Xx9ywV$tjPcUUkcD^&OG=E%lXiT5s{sq^dTKj|d(I;gMEkF9R|R@R#Pz%uSkbsO-zP9eL!W|AX$_;^p9HwOi{+1;{ctk~0X(pcIqwEi9zG z9nPVF0Rv(>zDqXBJ)cpd8>&>8GdT~RD1(u&B<&7(Qgf8Hg~TP%n3=M1@!B$=UM@ zt==Mj`aDJJ7zHo7{+4(C?DfMn*hfc`)~$%^Wf8BxjH zQ%PNg(U$w1`^*pA5<}#6@>hZ~D8*!a3p=B820pui0c`JVdFGfWS68|LK!yQ4iv!xy1~tRe+Z=S;bz~oJ__*1vfQ7t5^I{=7PSQjE zMo!V0Ztj%jmo9ex-qmlBjgDJJ8475i83 zE+E5z$quPTE9cKFQ)<&Ixr7Q&&a!^3+#`~#tNAP&y_ft&E+Z&|Qv7%R*Q@I}xzpfP zCm3Lr6Xl&Boj6~T@}xNN82jCk%8O+!?$!o<`CB89wcSrH|3~?Mz35fO{_i*E%h)l# zyy)Br;A26>@55~s(iexaQ-XYj)WmqE8rcKrL$!bE>mJ+_jPL{G3c`HpfAFR3*jq=A zB56m#dWRK;+8&d;<_9TUA$01LF6{%#2H}*rN12Ae%Q4m8Oj@({V|ALakG-YfrO z!7Kl@;CD@qz#&0c!TK;brB(8|+_M`jaSwSsH=O?B5#DtD#Qs&+-n%3GZ*uiN^8eop zj!(keu|@s`kYT{IX0e3Mcg`+976n12?AXTRwmjj+4f~($H7G?NYYdZX2+E)o+tjc9 z9YToP_6bfqfB|26s-MiJM%*n+ke;lqFLZbP!^rV|CC802wM+YvWga2d5|lwHCL8>f zO-`O11TqY`d_&U2Fl2O8qqi{d+U%Y4Q9<^bPfWa_UG;;cgUF-gI)XAN#dmLP1K+ss z6&!$t0m-`_m(4M>y=JR#JY0XKm3&!}X^?jIjPR4d1L%hb{*b>DltC#b;}cuXM`^)R z7Yv9z7iQtq-o2>S5NEJoi^tpd(c;_S$;0;#v(j-O%REM|Cn$qbOvW!1D))>Q02v09 z`>y!W9aOK=WAU=x^1S5I&y3nD30JH`o<+u^Ti7_cfuIaZF&TeM*+X{)KKzFPDq`;Y zo_tZPj_Pa}q#gegB%ilCTgKW*IKQL)2(ru*Y51}G_xeeb#|Rdm2=TH)5!NS&Tt zU!U-VyVuU|W=}2`6q8T?-NGzhoOA;+47j*U>Z)|yyscThZl=i7c8j{;)`ycCf|+9NU(i<- z7RVh0Wl)OA%70}Ib$U4Q6bAIO-P?Ekqh0NI>y?fCytx_u=@!~sN`r#hgtv_&Ti7DG zlb{Srm;KHRvu5zh00ua{n%F3zCB9IgEa1+x^OBQoVyIolijY@Q_oC2Am`mg?f-)$@ zWc)?!z6lNZc^()rR{mbjf6bO2ooW6Ut9z#MdCF3uG;j6=P4Tg0{WS6qx6uh=77*b_=0K^c@{GJbw`FXIgcG7Lz+ zb|;rRVD6dxpA#}$&OY8eRax%wi;n5lW~=1yh-^>!Nl*r*n2g_M6$@@&0c03Z6zUQZ z>m{;wY@;4`y|eVD&74oSP3@-fb2D@=MdYWHUj$`Pipls{@}%HRIA8+<_`81IzM5{d zveROm!I3hXuqMtt2nm$p)4_6PTM=eQnzes>WDT-SCy&6o02@3hF_p#*!$K5v=6-H8tL4SwyL z(NA|cQGOGYK`AEV*Cc1fSHWe50Y>L4R*2YT@Fkq_jFHI&r)rtRZef&gLxhk^E z&Xi$-GAPAle0VH*`~`fh0R!G-Tqv6m-^zbZ+)uKDt7f_|lj-pdy8Ab$jEAU*>_QnK zD1*{vYda}cJ{8C?;4+Qe&+40RBRghh-k&KgdbZ+An6=9#&j!ZQ4d}#{7nD(gGAPAl ze1B%M@;(J*7+}7C;(n}Rjc)XTt!l9cq7^z@COU@tOrqD->+eOD`6cBKK^c@{GCsY; z%9Q;NkYNDnbmpy(QsSFcaxA7o_pM?)V`+FO^V6HgQZd0wM0TZ&5tKnGCgTrOHY9tk z0Wu6ID@@^+4$RmRl*}&YNR{K=)GpVsLU6M+ZB2v~BD+z>3Cf@plkuzG0@vnAAj1HY zOX)>>CeJksrqVjv@N7MA#*(^GhjYQh{62XnBD+&22+E)olfhnv4byZt3uJ*|8W=FA zP2qj#F&{Ij9CtrSbB+12pFw8shxZ=4OOZhz&Ay^c{-gZAhG{1LW0+>*U&Az+uR!>S2rGz4SZp;j&i`Ijalfv54$L2<4K1_ozIDV?UVYLdr z5d#BsFEUR!-dPB;E_5J2_*K8wkA6U`-+akP?CE}GWR-hTrU}ZR6kp}okVWP~&?6wj zfZzMt^cWsZ9->joI%Jl9#m+7Eh(Yfw{(v1#e4dExMVTQegHlY!ck09+-{7o781Qr@ z%h&yR$K^GcLyS7Z>hBIUhW!x?<&yfK$K8v_-jrE_GAPAld|u7&i$CCP9Sm@ExZ5jU z{d~I8`Rj?dh9}Ruw4Cp-6UwMCito)vWFN{LK^c@{GCr^7)=V(mwP1jQ&!^)RhgA1{ z**Upp($Ax}sUYmbf~xcj(jK=^ME0f36O=*evb8;E#a#(x7@(0NecDY^^y+#y#oFzv z3?&hf=7Z*rb$UA&8>$icHD!UI3`#K>KViA0D8Vnpz<{dPuA_d5*Y?{b8DxEpc*SG7 zzFoOTCFbD1i4ydq_I{K_f-)$@Wc;FAoTcjykYNDh+R!JwU!$)jw0{T6r%=_a*KAQ&D!p$K-ywxmr5D z(=l~uND@Dc$Zsf*IS9TJ{r~*VTN&kzfeZr*FGspwbbhtc`s+Khx3m(Sjyx)35~&PU zcFcWYh#Ww%B`AYZY?<-5fG&hvT?R4?xNh!~uXoCb(P&?=L6luWGoQf8FY`_GEBdx``J_mrmuWl)OA z__?Y${~mlN8U{En6osc=$hw#CV0Wp)Rq2lJAx-m@KVB?5)*-RmmDh*)lEbUlMz0cNLgZk|bAmD`#bkWoK#*am z2*@xXjQ>Dly5+Z$4n4b~=YCRV3Bz~VJql7=#Yr~15jlk7Ku`vyn2aC8(sK60hZ!*7 zN9B1Q7tXbG=O{`J*K8^-q}?qk-#XZl@l-$NA|i)U90|&xblKYeP+bOxreMJKB(Iqd zp&bl2Ozyc@_t#}MbZ(88D0}B|J? z8I)`5dv_UVb7$N=-?g!Rpo1<0S>|wxGeH@YF8iI=tPZLNG7K=2v>ajW(`I}cWO3Oj zS$6AEl@V)G>N-!~g=}#|j-a>@ltC#b;}_lEa$Mk?F&MBYQlKBPs1S7c4joV8Mwe>| z>Tgb^NqpPss{Gp#ks~QD2+E)olks^qhj}F6Y;qXT9V+YfjIQlT!l{dCWI>i5%gj)x zEPr}OceY+#ME*c|Nl*r*n2hhw(YC@GK!yS5C!gyV=9oJQhO;Jce&rstn!4(BQAv4w z;XVIYM2@1k5|lwHCgaDj!&V8RK!yQcycgr`=Y_Q};eo9g8Bw31jF)|}QTQ48n_LT@NP zQrrp3pcIqwa~1o|a`;z?VZa-g3Egm-bIk)^m=nvDRFr&dZtYk5oFBd9|Jnsv+cA_^ z1Z7aVY?&?h|ALQ#VE|9Qkm(u~9+%@@Pp+gySU)|jXpUiRMiTI+u+@pz;mava5zpbSbe8Q->@ z&E)lg3vDb@^~;%8uKQx1f3P!rAkNp0$e$=)1Z7Z)$@q{3iSZqr z>k9)KS!MVHR@A5}Su5n+>Td2fC~;irv?u)DbGOxB5jmdXO;84pcIqAr<<`WeWMMHbU=my@q)D) zcFpP+C78KfSF_svSX-^{G2ykXlh!iW6x9); zxWM-zzZ7(9wKjzqeMaX`B~#uIltC#b<42NvV~a0=3*CpP_>zS>GO%>yL$^mSA_3Y zUWHzVrBU7zltC#bO*3OJSw9(JuE^7&`Ia9MJC|L6(Fz07`;aQdk=oFqzN)SOA zlrCG_Za4bA0~rPk=kA`P9=SSeURSt#^m6s!n(EoP2wuN+FFD>kK(??fN-#kglwvYI zZAtve(KH~#fG?orp)c+VH?Kp_&tEAK1Z7aVY;B)=Jqy=14Dc$+y^_-Gq$MbO zz%D@L@{6$(U(bk|g`DEQtb@+{%%?;WltJmTwas^N4++RH!2ID^hRtSUtDiDdy(qdq z^>a0KhyI50n_D$Eb)k3e1(Xj2Wl)OA_|fxs?=0M(VZi?0-mBi#VJ~*%rOlF~h0sRW6o~PBy9Df$mwmZPu^#N@>eaD=-C*ydKUktL$ zC6ri#GAPAldfD8ke?9}b7Zr&?Wx;`!*z5V1npL8V?YggZFiyda@m3}!To}dg$F&Ure z+9@jZ3CJ*jZ-2pRCVy>zohCzd=+%`Akp-rI?Ig4Gpo@ z&I1_+kgZHaGrqj=VB790=Cqjl_>kn(N42nuPSW9>=zC_Blmvn@D8*!a3p>XtrUPUc zVArL+M%yEQtVe*BC3DA#nNO`oH8ZDNj@hjioJF>u>1I_O&=m#z73pp!mpDaiz7P>RX;mBEf)75Fp(28d}X7atOCi;HA6 zjkuk6eJ;i9kT#_tX=Y{0B6@dIM@by?cqVjsx z^u{+G>zUeXwy1}G@u{AYMoMKnSN{HM@$sj0$QcT9r7-u{>egPQ<5Um(sW0>5d|K)b`+S5vs+Q>3D zQ?dxkpcIqwy-K0$umX@_fYGTp&%l=jE9_EGn0cz=ec@`r8chY!1at~d3niPN z3`#K>{{lL*_aOX=1`JSiD0m_;y-HoDNMxh#>>wUgI6qSk4AW0?_7vh){fD^r|15OzNoS=()tT!Jzv#bo@2 zBgc(ya7-Ttj9y!M73|D<##HOz7BTYUIZ=5w$CN<%=R1Ccqu=lNLCGU1gHlY!zj44m zGo1uv7;xo&fU4bBXQ^0`2#HlP=hphRE8Nja(0t9f?`Y zJ~a$jOYZbC%-A)f9qRP_)&h6iyw#pA2?0R~r#A)YI7tU3pP&p%F&RIS*e&jz1u_hv zSf3B9n~Qrq=^1{1@?i+?h9kQ7e%&%;=YBJZz8cX6dYW@0((@Cm*SXi{mT38JN@Z->Vv4NoE=nOm8I&$t+l&4jaDRpY2eeFe@Mu#v`T}7$rHG&mN--HflE_T3sR9`WY`hpEmCn2?`I9eIgPn`0K z(lnlq*t=cfdC|-c+c9Kq_fm=p%AgdJ@iPXqGjkV^VL;0Bg9fYJ+K<hnvIy1feZs)=?n3Ht3EF;aM?jx&?Ci=(VKqL<$3mJ$;>wBld?fdB|#aK zVlsXVdlc9R=K;ci)Ei_{SiGa)Wwpe}2HC52baHeo_moZE@2UHT@0<=%stC%U6qE7e z_R0gd;fta$K;T1sa-Earo6(qUo|hNTZQsVdXmZZJ+c=Ghn~dzwzbVxOWl)OA_^4DW zlP4SlgaOBMnWgR(1?>o5DN6V2vo|H8B(<#N#0b;cAZbNJ9;Va~ltJmT-+4pbPx#so z3=s2pc;`ZW+@oWv92Yc#)j4C0o*cdTM~e{dkzi+0u>GJ3efnbh5n z$TO5yf-)$@WPGn$-P8r|++jfA8Z~bfK>@Dkb{0%EvqNcfhBG(MtXO$c*OMEaD>X}L zBPfGXOvazlZLabh1u_iCQRP(itvMgd-(s!gd*JrA$!|sR3eIZM7P4vbbx_;u7Wsxzj&zr}GsxPWr?eB4LFux! zojUIVXE?xsW12ecqE453FYvodTBMHEbM;onhpq|itoFNn9FZ3&9Ry`iipls{GRuA* z&TxPMErH`*ww3DZ+QS;?Gu+?m(@vbPsG^Z| ztdt`(ED1p`USwxB^6x#SWwqu)&~eVQtD9C-qG!n^N*6&HlrH<7uN38O1TqXLbZv6& z_xdVuovtgg@Ut~pab#-dVc|n1kHO>=WNkmDb`z9ADJJ8u*_&sJ!!Mx2fLlg8t~s*Y zj!fNFZMc{cv@Yv1TV26ojIvE;WHTb$QhNx>pcIo&|Gnz9H@(dOWEfDntFul_hU40& zt6i0AdY>+>7xvio_S^l`k3Kc%`Pq)zOHc--n5_I)UU1NXBQ!8T{y>r6bcm)-@5`Fq z9=-RsKFes={r)lVlY>!cKC;YDsC@)wP>RWi{>m?=-@$K+!2ssx>z8iabTjy79l$7Z zGJvg?9P;kdo7;kZ>2XgH*`E57p#1+nyNT#rx&&kxK*yl`W73kWDo(xUzGh3`2hHs^ z&PmSJ+#&XL8i@Rq`ir0pO0i|epWUS9L@fdt2DrGWP1cZglVKF+B8M$qMhZPCh7V~@CsyOG4hNSQxQ<)MC zGgD4i7?e5JpZOD6$7$?ahs4uf(z>{9mM@ z6O3J`BLrn|Ai_3v{L$>HX=805!vHJGhd$Mda(`I4ZE1&x6EkT7#=|creoy~xJ&z99 zyr7N}ltJmT-TSpib0d&pK;p00GE<*(Jom0AIbBJfh}YVj9TB->@lmO|%41}0zoh;l zD1%ab_r|s`n!T6cfdvM1Zu7t2f1%z*=e|}WgOnJD$J`gb6;25{I)&F25ZRSFMo`8N zzBFfXHstae^`^#g-X= zq0iaqvks79fPR=SbCkDamxI@;is}Ysu$}P?xT}DUQs6r%AgdJ@d4OWKI!W~ zh5?=Z>|s9T=cRbQ_mUFycOCZF+~)r({Ptw8x*K|l;z6AvD1%Z=#xDqWK6ShYWEkM; zXU%#(^w!W*o3OGT{a6n(2j26WjQ6Tg_20B1zq2QGnxG6yF&Uqbm|DsNhXi3j%VYqT z(8*Ja!nJ!gI7y|7nFLR~pnc&obkBJ!I&I&JIzvzfrI?K0W;L~pAA-va0t&|qF9`fl zrnVNXJlDQeV%x2kCnn#KjwG_(mSaYi*_%2`PzI%#j1T?xd~+-YG7J!ktlaHzRzSF5 z_STy(CqFYEzQ*Fbqd+T4zBgANk$tFh1Z7Z)$>x7A%O0I|gBM3I;KCKpWA8p13CLaO zRhX*K`w?9Dw1jzj;C1V6TTevxrOp$S!7&e$RsPD%15Yjh83rT^rS>>)+LomJM|!7+ zTAgR1?93_K_uR7yXRe~hu-DWDf-)$@Wc)@x>TIDJkYPZ2NUQrxFXbErk6E2 zrtG?GqHgVbqdU(H`K?R1Yd{#E8}0eimM*n2V@w)q@!SC=_$LNZkSHN;w8=ZErps1n&bPV zd=t&N5jlWrOHc--*fQhCuwfs65g@|=t%=!97c8~vZzuY*Ugv21Q&7p~&n(}bYI~Fg z{c2qx)sCPHN|$Y6w#keafeZsWrF$cJwpz8*zZ*=w7E+=a=@2#*qV?=a6d&6j|>J&*fFt1^F~XN>pMrvznCipe+s z9!U~extf6t1DM;2WNN(+S7t~$((d>c|AXa*Rw<_ zfeZu8UcOdbxoS&q-O5n@f~pYr_NQE1L$;Q=9S>hr0oyS&oDq~ zg#Lk|f{&J03sNk&#>+w1D2P*rYM5H}OmN}H_NKgi)n2cWm>V5f^2xJ&wpL^Ax zviJO*Zq9Qhg3499iufE0yl34#OaglzB61kjiJ%NhF&RI4PRW+TY5OoBiPF(kFT?T4 zHd1$RRMktmFS31UiB0hM%Wf5PxI3KcOi%`;n2cWyDN_$F0T~83->V!~n867TFrb3AYG0Q1Ex{^x4!|m(L&pL^Hy62g?*Q%crm*c&A zy(1T$@DNRPBPfGXOvaBS!#7Cq6>u1^f@iCV{EtmCGCTG$9pe7wms&M3c+t*RwAc7o zD6-5SsqO@2P>RX;{_Jz=wKI@mKvwlHm%9mPml6_GKJq#}S<>)JQui>M>j^TD9Y^FC z>MMdWD8*!a3mauU2**iafFq4Phe+qg5z{*FNp*dVfQ}VniyPL_l?oYj@FH?7)q|i6 zN--I~$LMmptPf-uFrt`BIr~uAm>l29qo&`(c%o6{EPc{ef%eWX=vgw3>Pb)rrI?IA zT%gM}t_3m-Xqx9fr};eHRxj>lv!OCuOr{wpJB{b(BLxh8tjIEdqIwaOK`AEV*I|B4 zBEmq10fVAyOg@J=x=1p8?|PompKUGQa$!@#r(B*0oehW_PxU4!gVJTc^8-PX86d-e zo5Rd^l^a{i(*$RQ9n4pA1lX2rq25;1khl7TzUcLt>O)WlrI?IAf=`KkSPo&Q0)Yk-MP>RX;kwlJ$27WOa1~i6vO#Y;JT=IEA zdL#8Y;7GZzA!E$UouVk4v<5^@r1}w*K`AEVTbN>y06coa0DI@7cf`MnPVv2$ic)e= zHJfabrig@o611$$Fh}Ghsy{&)lwvYCl3*j<=e$>&fuUv?utDRpcH4f7gU&BydQuB$a+tu2L2=e-$%OfUmV}c`??+BgDe9TE2HpX@y$S}a)|LLmu=#t}0 z?JCEHpBBxW|5tKnGzO7+v+qCfZ4T& zdt~R&ogZOO7c1^}sC28ij>s9*_XK57ipl@?gBF%4(Lja)wSRbi$gO=O*tgMWO$W!* z{?p}pryh^rDN!7GflhwSqy`a`K`AEVBcR(4ynvsTAc260SHCv5XMAK`MPJpe_#xO_ zYG=h}HD6(kp(Cf!KWm#s4JIgqQcT9L$cnl4!jI;`0Hc1!RSMrvF=l1xr|)a#+OOL& zJgzh^xvQIZA9{_DO${L^gHlY!KWI@vle!1UFu7o3HiF`!(aV|e%f!> z`@O^w>xsVdl|v0BD1%Z=#;=QZJ0FFk2QWZftl)dc+aKcv(W_6;`K>>?rSiD(jOTZ^ zMxIRcaFR<6BPfGXOvd+TeSx82Aj1GDA(FeZKq5{n!h)N}8|TEOfnG-v3svFUIqb8Tv7jFkqFC$Yf2ZsI2f1J=Kk@_PA^;ASB9s-=~<3NfyZOTu6-~D1%Z=#wT5@ zkMVT`G7PAZHkdggA;HRBqQt6Qy+YrYTWixyP-?SX7(kk<@?{io1K2e`*hQ>I_uV@*wycZ5V?dJOHc--n2aC8 zHXoRUpX-AG24fFZ8sq}FkK7Zs=u32^ZZkS7yzE5%--tUO}kTZW!tCM{4YpC!p?^mC%KR_3nY;Yjt#VznmIRPzI%#jE|UnXTJ)cXuyCI zZ|hopMh?n~9`ljS6?#{Y{fMEl`E%|Yov)T_#KKT5khn7+`2PaHZE? zVLl_zS5gxQ%Aj=F+J1TDf(npffW#i3Or4Zai>h1l zkJ`TG@Z>3MV^0-fOZ_ZlB7w+N)Gq{OP>RX;hXOPETm^v)1IiQ})CJwjm#l($ckEm_ zA|cADw1?8SaKp&B&;*gIsfh$-P>RX;a|N5Dk5hmQ14Q=qXn%UXHixk^lvnFmn8msR z!7F)#YyJ;s_Z^Sb`#65wE>Z4bWRyfyLQ9AW4T*+^q%_D%WmFnu7uh2b$|h7)_8yrb zd++R3W&`!RuJi7EzIj}|Ki}{9)nER4zRtMMeVyx!+b%fv*EK}0C8ZFQK`AEVSL?Ws z3i$#V25kFzq*QSUmHCv=iXywooDEXC75yPXHSJ5P@9Q9P9VwNd3`#K>zpmh4bQ;dp zz<^AuR&;ZKsosPBSfhtLa)%!#`{nL^G%vw)LK&T(swbrpltC#bvq#VZ^YXcS>|R^7C{-5Vlsa86wjz<1u_gc#>6p6r_8#ds?_`Y8%_~9 zi;4KBN>7XpU3}HhM+aI+*#u=!x=3~{6Ac714A}3G)I*~x$?ujeu=Qr>hI>^molDNN znsEG(|H^_ab1Nx_pbSbe8Q-_#<=(CbG7QLIh@B*9?Rgz^dvex$gZjm?80L1PwuH{Ud?5yaUut=pjaRN3nF1;24p_XYGG zwRTb-K^c@{GXDLnXnr~#E;9(QJ$#kRuKd`J#rmb=k9>ZY3uKlts8t;?RdXH^Nn&a#>;+57`DvvaJWEA3#o?~>A3JJ=f6qE6N z`{85PI3U9S!3w9A=C>0n6@pK;?^Nn)zhNi#_+zWa&EA8z#gXmYLnPo!dkGAPAl{QUU|jkyDm zVL+fpROQ=!M;Y8ZN{j^_zcW5my{4eL?%}}mmY?WjZoQ-uf-)$@Wc>V@<6FfOAj5#G zHL^;AF6H?dOam;|(MPt?%qbVf=jI7>L>r@XfPJJ=f-)#ww4H0qZw~<(2E4k`@cL3~ znL=fHJf|<~WpxLk$HjeC9>KIX-I9^D-A^hbD1%Z=#`o>27hUk-Q5cYObZxtDzh)-@}xyUxtq-!+@66^@bg)H|x`#@5t?D6;R;LV%-=>QS)ZIw%Zet z$4J!#Wl)OA_+5~^b1UJDD-3ve$kVu|)xfjFSJXO2a+&XVMDG)?-SV8aW##V?d7M;3 zPzI%#jL*GCy6$#Ek^FSPL2%=wi6_+pS=&>jMuIXZ#g-XAlHANV@ft2O2#~M&xoz4+ zzIY{hBHp6j@QmNDOUJ7#cRnlTRV_p0X;KqG8I)o&{%Ra7B@*Jt3TB z920Sr{l<#3!>tLj3F%SDGS8A){y$~>8zY_n7!ksz zj=k&1BNOJG?CA3mUrDV5Wl)N(ZG5D^e&&M$kYT{hyT&?8-&?KRlkUF6SJ0fs_N+~b zh-ZJ*_aDVSxOoT}q?5Kcm>C$Xd?Ly6(r<{z2;atNh=tzJK}SL9Z|WJm|$jeG7lU7gp#{R<`LFz0jrDC9R35GQV|L%UsXL zew#(a-rx%eKS%2PEC278zwrgfKh*wyo=QN50X3)Vqgm71UOwsZx|^*!^+|Z&%)O4X z%TumaVljw3PwFBlgHmi`um62o_ih>caUjD0P5$O0qbs889;k7%Rs7tme)r89n=#f7 ziEY0^+z@$z)J;$ZrI?I=sC5KVRDcWv@=S}vpV~Pnj|=VOS=~RNT_ReMl58X=S`wgy zUa0#?>LDnDQcT7_)IV+LrhyCt)J!zS3bf@*4jI|?w(yf0PMg!Kyj$)3K1@{d2C~e* zNS_GGpmdS!B42j|$S|Pu4j=2y%Rb`lVw#dH*_M@@{`^&H_e|^S9tP(kvJJVHpbSbe z86R{P{yP5g?RDQBUwc-t@0K|rvK_gfpbSbe8UIw}q~C+T zk^%!Z6`s4YZ_eZ^zkAdrX)~R?+;IM><~I*H_7>aZA+kMrfS?RYF&Ura-pb7n-$n!j zT8DImnl2vr`XJVT>F|uu(L0q%Ys&9zreD5O8J&)EAP*9hK`AEVlb;@mdSp7=64c6&r5&0o`n4kMy`kDFG zsEqYGOz5Y|h5VVI3`#K>zqMI&9rh+n7$CypbZb*umfm`&PG>gSO5b_2>n?K_PDN&o zhWhSK==`I#0F8?L;az8dN25GG(1QOo))Uz@W=&8K6a0j8w2w^|Rx$!hqF z54E&8RxfAW#Sx6OO4UT7ktPI|22XBkJUgA?s=sJcfF4OaR=<^KR4j=ivQdS zcvunqQ?1ZRGR(o%boVLF&A&8D-Uj43T0Qvq!)_TmTlSdzlQ2K>2S4)94Y=Vn{_tjR zSaCzkUehhjnDUG^*E;t8)f1D|In`pd%{@MJav8{;?@RvmSN`A6K=5xe{QE@Gcj+LI zVF2&C;M}cTw8AO^Aqi)ej@3M-AF=?P>Q zARO?m!Frm7_0F_4>lkJKt|4DWdE-1!b*28ZH<4xbBij;`LFuC7jE$UQACO_dkRX;<()H2cdZ063{dsE^vm+=!+F2; z#s!a8#AsT+lkwJ-Kbh|Hyc#{i1&|#G%AgdJ@o_`HeooVt zcNSTzwL|p}6(2D9aypI;+0KFF2LxqMipls}!)r^naRM0z>~MeMoG+{tO#T-2T-wO} zV1%l#m684lpVd!jcOr5S`5{3WlwvYIGVL7EvIQ~>km<~gQT^yQd8I)o&ek2j3wSs3LFyI>PkF6_OrGIc3$W9ztm&dSSxw~RTyT+bk=QHU0 zjYG)J1Z7aVXl=W@M6&@I2Gnc4Xm{HwDPng?ZF%0(ucy~^vYZG^V3{t`4@KYI9ZGf~ zD1%Z=Ui*9g=9|u}Ignw1u*iJ<49^YqG}=006~|nM3;EMqQ<|=CTCxzQj%?>JvMWIu zlrGxNjEXUxK!yP;`-{G9Umdyk@UYCum8X|-&UNk->dIC2D+-i4kI3OB1o=t66dt$Jb*vR4d-S@p;3L_FcxIhWkFfX_ER zPBQM^@A|Fm^pfeZtJmTr7X_qD%ivxDDQTK29Fs%Nektlg?Wa*99igDmqKav(t& zlrGxN_w^?B0~rR?+hz@}-qJfYuNc1S!pK&qq*E;c48b|<8G(bYi2RluL{J8$n2g`} zF{IE8$0RV|c&@r|eC>eaGuaz~EZJ2W)~hlZ+(&5jSZsN;5&0eY89^D8VlqB|lVe{F z9~_1O_6#$f#ae3=3b#+6O54uKpvz$OJb(RBu^cAgwq;fry+$4kjprQcT8g{oSM2stjZpz*%G> zQoXUfNTW>Qu3n;d!&tM`c#v7m1qnkbKSWL@hY*xODJJ7{lROhM@J4GG!0NdMfTwzBhCZDoYm!GH_7? zr7moK#<#96nudj@1X<=ZayUU5lwvYICZUs;gTIvn1LWwLs&87n+5W;Jbd4Bzom{65 z)B54FGbd7f+0ZL^>EsB4GALcNw&RjduK_X)s8h8#KHz$7Sr)5KmB7UVp<1g)?j2Jd z@Q=T93BCOygB(dv2Bnycj{wgce0vGVFu){G&t3Jg;H>=~2CJmB%@f>XPD>L#-!LlJ zPaj0qb|yKBpbSbe89$di_m*4^WEgOtvtjzVW4TM&d$sWMW+4aZvWtAK-(p>{N@_WJ zj*&%61dbXO8};(*iUf`6^Mx-1g#qi+H>UG$FXl-&wwZ0W{6X)NnnUI{%`+5>T6pvkIgcDiPzI%#46a&Y z^Ucj)21CGvFAT6h5%yVZ?S%4%H^NHmi*GLbA??B*5NtX+b}bb>Zs(I<{#C}O-TyY< z9LMuNZ(NAu!QKIIt&0;r83Ze4&7Ra}8Z|QBeWY_z*Goy}=%iEew}rcwdmLVHBMV+Y ze)U)W-zR+W1;?l3oHzQwLnsW05?i}Fd`imF(zDk|!9B3_Rc)2*l4{jR!_+AB`Puj6 z*92uyim!5PnLSseRRb9YC^gvg8GkcY?z8IX*D12;WPanWYSeJ8a4y9Wy)0iyjwdLC zQcT7V-ZTXb@FW@rbUl!N;GoB_%X21UVt?~dI+n~O1rf{NZcj}X9zxc35jlaN3`#K> z|Em0EX%C;=g#q(dJd>;57FIS(vmdx}$b8wn)4L_=x6;ox(Xyi_8pY%{1Z7Z)$@suz z%dI&xAj1IpwTE`JsuX2Igy|YN*68j)1aaXAj5#^9Y#up z4I`O(Zp_)n0}`5>MC3VgE;%vJi``&A_OK7+B!V(1#bo>o>pCaH3n0S)c6RPchlTZA z?XBbWnl&pns7#GqYZX{0Zdlrd-qci1P9`XWQcT9bHd+FTb^{p(Jf2nl^|;+@_KLED znt`WR;i{}BuO;7B-S1?)gnp_j$SDM6P>RX;j}T0>=-eKXss-}?glPvyqvBRx-@sz z@}04>7kUAwikwbR2Bp|CEBs#O=NX;w<~115FUx;_@BU*>Jo=uM#RFTuq^zU6QIj-3 zcF}i-2C~f6Q|#2kBhKpiTV5pq0_u9;G39lzl>Dt zA%Ms=f!k?(Yh~32La!ro zEjf#z3`#K>-_DE$dm4cZ19r$)Ok9`occS%Kd(HmJGS;(Ww~OwgJzs~&b>wV< zGAPAl{5k2tVXr74!+@z>w%I2~!H-bQPj{)s?*8^B-g!>#`Ul^&E8Ei$xt^RuPzI%# zj87a~>d=9cVlZII>AMrgoO;J?CwH_KeNvI#uX9>vsFCf3%WM69L~bDG5|lydB6-v( zMGnX?fK*dq+1+)HYk|gNHjtCK_V(Ml_}n$dAJ3#SXCiVVIgg+WN--JV!+Hk96Mzf@ zg2d<RX;9+uGH0-xxF0k!7O zn(Wn`4eN%!J5P_e%x=6p^uXS5$FhZ&@?R0TnOs0n2Bnyce^+t+Ooh)$!vL?@v-=|s zwO8(s8r7{>TYvq-qm#~;wo1qG#G0aV3;8`k8I&&C&Ivo`9s(H#>~%V{`Uf}Pr~TfU z($)NIAM>_Z7YB+8R5e};Mo%bP$%O=EP`YSs+kH6$Z_I=N6Ywpq z^Ds$y*MOKwS)*wXdQ(#yxrm?)N--H9={tVi0^g4g1H_)@7<_)&+_Q2u+cMTcjfI4B zmWl7uNlE@n^jDDW+)gegD1%Z=#xJ~Ecm=~-$Y8+HUE;;H6@5N#{f^6f3Ll%Ymuj!# z&y=~+?O{(rLiyEltC#bu2i1>$8?z_AIvz%V#6+HNJ}xw>`TLk$cIN1Z7Z)$@p39S9NoE z^n?Lpmky4y*1zz!*sE|(r0P~pO0ID?_c#6BlC5Tph}=j1NKgi)n2g^Z$r}3@-e3U( zs#sLGPEk`&T@t0SUQJ(AwuJxwu2;HK3NrL^=o8xgKXad`Ub!B~FrZIE>RrIkJypqZ zzMC#M|9H~N|G{F6vDEom;;vR?nTN==1Z7Z)$zTt|CdIZ1+u4IjF&J>gw5=;;xd{EV z@ZswzUB#pCL|LTGCieMVJ+tjOA`g@6{wn{sNwL~LCdF!bFur(YuPfkTg}7z@!n|3T zRQ`?_6-ln^rUxx*7Irv&HIS0gL9bkmkn0KaHGl9m|IEo<{&^Q(M1mFjH*p2oI>|m( z`dHVNA5p1n%J-8_K>L1+2G1|_F4$3W!(aJ-pOeG4IR44b(xaING7PBCj>vH~XNg}D zG#8U6Px5tQI3ad>Tt$3L+d33k?_=aff-)#ww6DdP(>(<;4A}6^z-Y|H=x$jto z^HV~Pbh|gd>0RIbEAT!dkCU4S%AgdJ@n^Lak3RPUG7K=S-?Fx2hxPDszM4E?tDaGs z_hVZaHEDkG6-}bgEq^9A6O=(ICgb1K*V^;AfD8k`lLMKt0pEOdN>#dL48O5FRm_ZE zV%;6;DWZn{kl+_`3qcu_VlsX{+xVjjj=W)jK&3{>^}1w*)yhKe_;_A&e+&1gy1n6( zun=xRZ!ei3w-S^=DJJ9Rc3VAH!JBztz}907miwkXP9_SyntW92zhGmielU#f{8~*8 z7Ic1UlH5j62Bnycj}h)i>%;F$7?7$QU6bVuo7hph_=JQK~)4D90$9Hqse3Csg?QfG@q4Bw7_etm0WO{Ol{Ap;Ygf($#Iqr)q}WNl*r* zi`I57ZwkDW0|V@;PZy;xeGp%ufz9#RUfJ4m|jDV?yIO+xlLPWuLCne{=|}IkURw;3r#p^!AdkhIA>d$7#~ zAj1IL{d9UuclI6Q&Z1dm_~X{WoukW*^`yvFD_6WmZ~y#G{zOm)rI-xnUjKPyL%;0R z67a|dEQp$qbm{I-72y%9%$OeHTUl4ne)+`f19H4|2hckbe~^3sO5|r3ofHD7%X+n?AM;(#N3A*rbEK6%`G0ClR;>IzF8uY^SJb8$q3`()} zj_>7-vwbB%h5-%AQ6m)%EAt~xadFm|EKB#eef3jNgm;>MNhbQOwLl&wD1%Z=7Wuue z@&DXH0Wu6o%eNfo_ORS>O6nBT!^>o8q>c4OJY&vQ`*6f+~5|MwANB=70yV~C_m5%%| zEF1Y}RQOAoc^BYeMR$9v$ckT{7st$ZUz}p-QRFBM=W|%TU8}N35PgZF4P}flKl}$j z{LiTHNV|jqSnsgna!<>pHC!W{)8}7XGWhPV+3L3IbTAjq2eb1N=nZ?el<~jv|Gt;w zTO2>S$lCtk1~Lrb2raI9F7oy~&l%fI56dLtwR{3^uW9#6l{8X*gRFNu%4dQyD8-L1 z*e9Evtw4qWloxBIClj^i zgAc5isvi_wO2@+@GaeZH_JD&6dQ+1VWrm;(N-_E1?>+3vENdc=VL&{$VQgYf$;n99 z_1nwB87B37`=meg>U5mhFM>Xf^oTM`PzI%#jL*mJh`HDSWEdd&eN^{&P|91|Xo)=^ zR@E6zg)qqEq5=n&b3@{OPjN--Hf*VEmtb}gaX!=vQs zTVS>-e!+TP6TJb~mGYgS3`#K>Ki6VxN`$|A1_KW0@1W^nQrxz(ppH+&*miH+>)UpB z_uF0ze5r%pD(FV}K~M&zn2eum-E&NZ56Hj(8x2{LCFGdXn)d_`2MPw1v0Rmpy<_)M z?Zc<%^T^tEr_2$QK`AEVpQ_6~`Zs|L11LXd=m)h|@R)}jTrqdSxIvriyrRuuTHEgD zddZ0FL768ggHlY!uk7r(a|J&91p|VoI+|^E-FSIlap~n-Qj@24S8j;1RH^o*nXW|N zfa^(FASi=UOvZ0gwAdK{U-kh5*xaXFHD3>Gd7gXX))%`Iz71y5?0w|CZ^Ifc)5tP= zQGODXK`AEVvo@bJI#vT22B=xLZvUdNX<8>L`dFMmmxI@Yn|IKh)AY4tm2-&fP5DJo z2BnL(^J&Iqw}A`;T;C_yZ~F4S{4Hmt-qa`k{r60dEnJG=t}s+_YeHlniVZKphjxF< z_&pz_N<9`J!vH=m+od#Z2TLMmo?9qy=k{u3;}54iK6xVCChZ&|Kc?6cltJmDJ&cV< z6OMmjKn2s;_o9*s`b`#kwRN*gK5)NKHTc?_`<3IWofRVcQtSxIpcIqw$L@r zSzL(=p>NZr=@Gj4KzUCq+rm_~v>M&cL&)0pr#v7igHlY!KUJ2jeem242BfM>Uyfif z-1}+FSx!KBNzM5bs)|VSmk#sS?>->%Q_4evGAPAl{OEaz#|o}(7(jnqtUBV2)$qxd znu;Oy)&4)O>upH<$d;fwKer!|11OFJWl)OA%D=}XK{;RH^%WSf&3o>^6~kjgmj;4P za;%KlrFS-D+ z@_(B>aO3@-8?D`VvB?X=v$AY}hZP%^C=cpn+8xc2yFkAL?win{V}2T;WYhq(%5} zisxVXf1f?Tw>W+r{jsVy2*@y?G;p_;@D279)ft~Uy<-Z4`#$(INspZuSD^++A#w!8 zi=Yfj@hy%m^RmiT_;M^5pmIE#{(Xycw_5hZ)veY|;dHN^jn@d$_HJ7$ijMgsDc%HS zP>RX;{l6jtJ9B{y1FC1Yk-c=yR#1Lz(6Mo!YdmLIviGB;r1PUggCoc?M^StT%Aj=7 z=W92K9$w{u0X1c_=55lA3Elqpwmn;tnVv@Cxql+kJVHFPfD@6UDUS)tpcIqwZ!H#{ zL%cwS0j8dugF|n#)HzEBbVlqVZ1ze2GP4L~FD82+`P>RX;8P)*#AbdOs z2E1FoEVHxlt?LE<0hZVu(IT0;jbVCaeGNiZ4d_*l7|IiZGAPAl{H!wXV+4F57z~Jb z?iLj|c6Z%D1{0FB+`h!_r!+xqz9PKxA!6teJeJ}|PzI%#jKA!Td&|RfK!yRPnlE_3YSI_f;U*I=3 z^2hvz-N^!Xn0!~3oEc;sE566^3faytDNhN?_{($ zT8w!PCZ%c$b(H&y?(}Wlu}Vq{kzY{)2+E)oTW0(U-T~@>0+3-q(pKf=dgHSWic z6<@pe%-xTn`dEA}!>)^O(EER1QvwOfpcIqweOsnTEE>o#z;3itPMQ4#>(^CxTZNTA z2|fgNt(jhe$OHr#r2NlkMjc#-f zSw3M$=f8=?q7P#wP@WN#K`AEVWBwy=={^G)2IxK>iW>9yX+-Ki*ki!+#e`mYh~uVm zv1_9$g&En-Zz#_R%AgdJ@yB;hf4C6xTUUT#Aiq9%H?pI40>L0^85 zNC_no<@lxD1*{PYg^0Do(ae>K+N%o zRYc!{Vs!U1@^qcz$51x)Bmuqg`R$5Y=pL3%i6$t6QcT7lC{>e|(FHOL*kW?~YO*>{ zR`Rv=>fasmIq29}{0ug^=0$IELGORapu8X`gHlY!XNgq$%HZ0D0py=)-txiBBK>-K z3EBpSTl9xwcYp6Zqn#uufbLRX;+^g2Uv}Pc~fTE_ZKziD}bmw2IXTMZi`F2y_ zO?%T|YW?d@Mf7TTHYJXr3`#K>|2!8t82ZEQ3<3tS+toe2S#RjNALVRyFg>^S;x_#t zk+avXoUuno`Z<)B1Z7Z)$@p(*TYRQu02u~|U197f51AR>Ili9GUrNDp*0-l+Y*o|_ zt_2dhZ|7285tKnGCgaEL$2&&X0T~8x(4H4pZ)6ZZ$N(VlxI^KQAv7^hqC30ab@X)I~t2D+W| zDe(klP>RX_dEvu4ZFnvT13JwL!osuY)ZTpvl(mKzs|Pq;2uSvhI&tVLr?~# zi`Mq|vz%8zh5_vfe9ea*UZRn++LR?%S*t46%bd`^u8{JUpbSbe8Gn*M z<-|ug#|{IA?y8C?5BBmGYktnw_T&s!T9uPgnoWB7KK2lLe_|2k9YGnCV)D-4=NKb; zrWHVj0i3ULL;Uoj<5=p0d9@FPhC2s5q{-9zPRmxUiN5@xn370P2Bnyc|8mbS3dusnpvP9YuNGWO#|a{?~nUZm5is@>lu4&B?Wr{%20Em4xwQX?My1 z4=X;L&f|903A}E7WOvZmWb4g9f zS|G!K3oMd9m_x*9ha1F-4K4WBAAX{7S1M<`VlrA?2wB_Jlx%`BD8*!as%Tf#bv+=% z00zG=@dvVM2ftlAG*wQqpO9`?lhW?SOmE~IfgV_DC^-aWP>RX;uOFo{iiSOg4_mYrhuBGG>ltJmD?cD060w*tEfaArDzrr-R zRnLV8*B;xKD7x%n_HLu7lM-E*E}(CduA}4;ltJmDWe!;>`W?tHKzAJ@|1IT!*y%S) zUj?OYXi^+Cw#9AB-6d?i3cXZXPst}JgHlY!r@Mc-vBBE_U_kBd6I?IY8_f)QW|Dqv ze19Z8USGa^{+#^}-zp$pSJA;G=42uZkWEvtCGKy zjBe)^N-;qhlwvYIVYBBE&2AvWfHx-mRgUT@@18p)s#a*OJ#1>ky{{mqQMh1yFZ#ya zR!Rv$8I)o&K0U@Y<-Q%rFyPZp@3qC7mKa>9V1FfHb*(+Z!Tgh1ffTpx{C+aBw%aJB z1Z7Z)$@r9D(l>25F$V+KLTbnM%p0p8Wa1UhbzzP>CGoOcW^yV@L&&B9k=rR{1Z7Z) z$@u5_OHmbkSr80xTqUBT)pwk|G2WF#9xLM)V60E(dDJdlVXTsh$Q_gq1Z7Z)$>6yC z&*KCf+_ne7;{>ok;arMS+@pP|l`O*_v)FmUHvE*icyYbQn5+*cEkbuv%Ku8^Ba=ms z6D*?z|7ooAd6{XKEu&vX^Pj(LuwTN+*u}_7YfcMO)M5>`-(UURb9uDl@$Iiu+Jprk zFn(NT@{H;7))z=a7p399HZc706A%n^wBSFDRW2_r9UVO#4gSYJYr&kG(-f=)7^3U7 z^)r)mzQIAkX7RlvOD(BYP5P|Ixw8Gx8|gk#Ita?36yN)> z^MU@|se|5dnGlcJ> zbP?v;|KQvI8I>nbtF(gk4l81>z2MW|XcXF4bxf`0&SZ6`tR8>zl9t1<@^a|KkA6z` zU-|!ii{qp6L&6cqfeZr}=oF*M-)KJz8eMBq&}XoQ+m+qLB1BJG+%lmOS?~c$4?!7} zVp|;lMq3kA6$@k-;Nv2m`0M~7YokuAH1Z7Z)$@m^dotoPSWEikMYJ+aG z$amfO!)YT1mh4KO9kOPlHYCP{99BSImM}&cBq)PYOvW!k|I0F-d_79 z=%*0-gLPDvUoi@=symj?#@$-7gT)c^REenWan+ zltJmD?Yzn?7QV3`2BiCR|Kis&@2`w(@IEosbN?lO=(c*x-k7)i9_W1CSIQ(o8I&&C z&S6KR;9JdMz-FD~q@xM5%-X5Jck;`PolQNS$=V4Wk!tMkdxxy;ZvlD0i|0!itxq3UM6UYA7^uWG(eumHYE%e7=pU4sztZ?H^lzj8FN;S1 z@UMRwnzEdxKm$x!F>=h$*6*NFK+~yeo1{>lf#t^|4_(voS6F)bF48bZ`SE{mpr-}@ zX{^V1>1gS}Kbrsi^-qILJShn@z!byr?w@%tLtVoIgjv-OA3FXq-6V}GTIjZ6J@Xr+ zVV<(^e{W!-1^;QR2Y4B2nV9IAX#VpTc1GYZ_Cpv9$zjREGD^ZzWtrjalWR@CT>BNd z_xaUO2bQDvYIYWyBMA!>d%nLt;DT?8znu~E|1l)*|7S=pvng>0*a=}pi0WG@8~-7< zypXI2DT_npZmURF{ZA5dJ5{;jjF^56STX6FzL2yOdc4WEi0Jq}H@NseI{& zwWSUrPes2SKOcVca*)b}%@6KgMC4zT2LxqMitYaR*ICcQ;zA(9fc?U0j2rLVyuC_L zFUwHYlqLJ7`@?4$dSYD$SsI9JLw!h42Bnyc518gNduMOLZhDgHlY!2TX;77T6DmfdGY;dg%{LZ=d$vy|Oe_A}X&b zpiS$C^~-f8t<31)!H()gPzI%#jBn=;*UNgqGQ$9+u5GKY%T;dvC17<*sX{trn0a6! zjK_I|btYyBvds3>M+9Y1ipls;EoI5teL#i*kupYMM|O>UT{T{pux%uTW8y_kGT%#a zdp-H3v54$IbtWi-QcT8&YJ0^5;5A+t5PtskxG!^F+0D-L#)tFcTBa2^RX;z8w)K$^~Q?U@tKD=sDlJHsNY6Ba=`ahnD=E(+Nj6-S>8AMDN*n zNOdJBgHlY!hiVh0qww<#1G;*TPL2%+e-7_IAG2hA-jL*lCwy$7O3`!A^qG-mcBHxy zltC#b<3qI-Cko*mtT3SP$oPCD&HB|Z9=zRG;d?im<4awx#7}WnPyG$A5!s3APEZD= zi`I5yzc}_hI|$HTEvLXr$Jh_=Q-5`@{fEax2lWr=TOwSjUIb-Ox@bEaA3qN-#lQev zo)qOwHoeojrMyB1v&zhs5?wjDxUzigA}7p{we3pvCMbhaOvZ<5jCtwrfeZuM+O_ub zf4*=ui$`ZtXxCtekLxmrjLa`1ufjO%5ZR6DLr?~#n2aAu94x-WJ7i!$t&2ZvR);lN zf9J;|YxhW=x#N1ro&6$f_wF3$^@!|FeN0dWrI?H#Noa0{UIa1>V4ih&u2ucHb^F}1 zfO9=VBo=Q&#i8j4TR+9u=n0Sq)t8_QN--J#$ud#W%e_E`0VZmfF61c29aQ(&Z;_?L zu!g)^KIuwy>Ivo?y8Fm7ds3eeltC#b<8Kd8%S#LfG7Lz)*!tF2@AZ3Of$nRhD+U#I zJUb)APi8kr_}8EpfxM`G1Z7Z)$@oQ}iQ_KYfQ$v)?Y+jAzyJJ3ig8H=bCmWSN!Ek> zjT*eBW|B_GGJ8|~3Cf@plkq+5k#7gQzYqpo&>RaKTl<~QonP*tE~O${BvaKcF+@6U z(uUs~k$tF73Cf@plkq#=)|2+ZyZm6lHZ$_Ftu3vIyIdIr^XKarluR5RE5|Bzgl(IA zfXI)j0R&}GipluC?K^_q!3qNok=Qd6^*^_K@2qOL|LxSpmMj5D=L%IC^6{$&5ZRX+ zNKgi)n2aB{wLhFb43-%N*w#4Rj`DSV_-Xp$E32$Y_QSh>J|~a;tlVQ|jouLSgc?Lp z2Bnyce?QAQnZY*|!vJcf+L-L66xSP<+H1oVYgvhfT*%6E^vmf;tK^c@{ zGCl&_RJa*lyoUkpSCdKdY3Vc?@^QDTE^x(3^y)IGEW0|mgmL^bBKuRH6O=*eqGjgJ zkAfou7{K!M_%;cJf)vRP1(8k1>^>@=Tm5ZVQ1?XqjZM{v{FE9@PzI%#jDM>11`B8Iby~rs}F(3J#<(^xcIkLRcg!R#Pl?75m3Cf^!(ROw`T@2s<4Fd|q?B$?*{BbkM8g2~7NL(% z-?Dx6NKR>B2+Xr>s$aHW_27xShFzP{x2->;hW}OmZzs`VeE)NoUl4m2YSnsgH zvHV*1-d3CB(g~xq2hTjB=|5yjg}9|kn7+9#i}1nJ$iMRceh!asar|iZHYg6>as~q| zGClV-@MIZY&DQjv>AO0nDc>H&`m{1Gi`_p3kwd6a1Z7Z)Z*gpy5BRlK0vQGfO>oDE z=-BA)K5;bSAm6U&S1Lgvl-e-s)h8dI({`cMXo4~*U9^{{Kev-z%4)W9U{c%`{hj6p_QJF$85$ipltPzS5XF2xJ&gDkM2rS|I6u^z@VRX;n7_!y?G}(>0P|L+{HGF=p5rSyF5MDLYCBP(zedt)l^18>m_$FWHAv3ObmKP>EkhL94O&}

Njvk>7uk96A8+o6qE6z=RO5XDBXR;YnV<|xF&W>tkBwi1&%eTeA*H6ZyOz%B z)3ij|W|F3K=Io=o2j&Atx7GZ3ipX!MDFkIuiplud#wtM$xXdtMO&dj)Us{gu-jyzS zi=PMA8yw%eWsqu4KGrmdj``nGQwhqT6qE5;q7`PA`+*DthTRL?M9zGw@w=5JZpWU? zxRQMBnns)6WLQi;`kQ?3sA&XcP`YS4vrUzr0Wu7bU{H82caMBVcyBF}!&JxKgZfPz zjnVdj!FMW8B5OO5nodv#rTBLK=Mq}$qaXa>5*jQh;(pxzZQL;3$8XM9@sONy%)_)N zo#PswP6Oo`2%SXD_$!SMy%$|dWS|BAX{=ND{(L(#{Ok8+yf;S&Km$zSetIf&O>Mzl z*5wVNQ)l|E{FwGI#>HRWYb2V3-bt8D&H2AK{JxC$hVReIc_|MxQx{M#FUrfoL$Rtu0} zK;O10Th)h`1~$4!3_PInFh`scV$m=*=lda8^#hU9sU-wuP>SvOV!yvbO=Rmg0T~8J zt6vWb>{p89jjA2r!o^~?BWS``Ab>sa>w87?*qT8tB`AYZ{4)))*t$vP zyS-SI^t?z+uP?!A^k!vCaLu_DJVxHr=tnM-T1HR?rI@VndzmwYe#`Mj8?47jKrKfjaq$Q|ij z3z57rDV@ffANgfHo)ge!7O_O+Y-%|{8I)o&J~CArnVkSK3^=e~Sd1LHV&~EyMvmRtz|(vZ+FAANyU>&S9BKtY8I)o&J{!HcJ4hSIFrd0KOQ^*w`}){Lsho)8idPiF z%-7{}i#bfYd_(8Wa;cRBWl)OA_-wRq@hBXzzt&QBO{9t(FCka<3 zmZ$o9gLU7KwVg-(NKgi)n2e9oY%;J%j9`Fuslxbps^5P3Irhpxk&O&dj>T_UR-IE) z)cS=!ubEG+A}E7WOvY!UU-E5S0hSpC=-0U{r&}E;oqsB9|Anc;q8w~bm7e zj2=P@sMQ2zP>RX;Sn1uq;d?-a0hd&=d`_O&u;s#qr$>&f_^)t0W!Ey0uv)M};Uan! z@jbPMpbSbe86TOhY3_nIxxoM#qnq1@zt>8u7Vda2CHJfJyhgzNYfm2>xT@G*gKXzQ zYArz-lwvY|j+;tX2(MGafJSr5*4@!Rub**}pObpE%lty&*%fLl^gGSgK1Q$Q6;bO5 z%AgdJ@$sRJS022{4Fj}!B zbkQjlrGxNcl7n()nXWs z+nOeB&+;ZH@`f~ji%RnWb=IH4vqxT=Y4yygA@T=mGeH@YVlw_H**YhB3Xoxd*5%L4 z`bW-Y?Kw^xt)SZ5GT24oPSqP3_!V;tJ(84DTL{Xa6qCW{85@Nj?#|Bv!$25tQ}Mm3 z|Ec(*&P^xxnw@lbs8eprw0i#OI?9_tbUwC%+WJ=+AB_KP6x#B~FtFvHQ7BI*+jhXi ziUVIwf=xvgwQNrGey{qdf92_3fkgAk0I7nIPw3<3mDD!EeDfcC^FMs%G4+dphZR0m zg4wq;HLcH&f44eU;*#-HEKj4L_58!cHIaqL7XL_XC(Jkf!8iRg2WTX zP}?WyGXI0XDg&7kM6RRu5R^eF{_(~J{&kX<;Zu_^fM!?xcY(oz54x4F%~VUP6qc`F z|DL7!Cw0jD3wp0;J@peo8I)o&K5ch8jUGO^1p_2=p0$on7g>(pWnJpOan57|oz=rD ztlKS^V(QS#Ee+IOf-)$@WPBQ4zGK1;$S|Pk%YJsYAja=6_HQV2_$j_qO+GZgLnBCg zmAw&qslJigM^FZ(n2Zlm=sSF_02v187b%t*)LpqBnLF*bcWf7TGRa~oe~Zu9TvqNN zvYnf#{RCxDiplsXhmBe)d~yp02njE(4>_exS$f2b-907db0PZ2iV?Y) zIzUhcrI?I=T{m+b5(6>}$O?GblU2FuoP*`-MD>TP%E{(dwKqfJS%&S((c6t$sDlJ$ zP>RX;DaXm_Ab9Z@2Iy2bj5k(xN}FZKI?U%B*vRWozy9_xb@Q{x7W72El{!RF2Bnyc zA3fb<$Z+3=0VJ=L&#iXacGU2^vUwTNS2-ePAoT4T19N^&`Ga+(2b%dY{N--H91eeU-lLayi z7}d~m=gN*cS$!#NomXGpo!}^u3rVv7kF)!Z$Lb9nK3)kWq}=!2Ff&t>v`a;3Q7S63 z8ia@t(NL1TWk;frO*R?X8D%w$WM}U!JlA#VJiojyeV^z2Uj5Zy@6Q?cIoG+)AY(y_ z5!i^xjm*6SWl)OA_~2+ca|3*53w8lhL!GC1%^~vLmZVL>QM)(aaOFqjCgy&E zGAPAl{22C9+WZ`lVSuD^eE&#tE1Q7c8%5z9>@)3 z7%*XSHoN^a*XQ_%+2D$sPjgPZvN;^APd7wA=8E3LY-b)JD1%Z=#?MtgwS%QVh5;;; zuJI{(YLQ30^&2O&FTT7JW04T`Gw)XIQAYGKtAlxzpbSbe89!tAJh=E7$S{C)h^IES z;f+F8jKce6R|A57R8Cdbq;ltN3;FY)x?>zz9 z&st^<;#FgjE5EFsTHk!V{idSdhVO{n$vjR_2Bnyc?_uJD8{oqyFrag7ZcFEv5N~yJ zzGwV9{LZ)?dJ}3S&EJ`C5PJ=gyO<{k%AgdJ@jc8fH5Pt>2L{*;v04V-&7aLnFQbmQ z$k|m?_G^{s@qzCJa+6_*+|4{mPzI%#j32{Hq`Ba^3I-I)mdR1oh#x9x>FxS|41NLKn#3{4+aQbwo#~jQb{i)wDo-_ZQJLkq1*dX^DM0| z(2+$DxsQ2!XDO9T4isMz3Z#cSK!n6qE5kCiXIO1)Q*f0r&Ys_OaO9Um+z__T)^GrG4PJB?bm3n7y`J{z^yI_8{{d zK^c@{GJXux9lRm}WEk*W*(UX?+GpMhcZSCOs!@%W{ZZ$<9_%pYb1)P{%49%X()PzI$7wsU^9 zF`R&f0n?MV2UJ8gH(rv9K3(3dS;n=-i?wgqZ^!ejc2z_kW40tHgHlY!uQBcjb29)L z2C!NsOwqG-@}GQ9)AZi3^+#5 z)Fa!Os?XnAIC$>uE3v0rLq{!Fy}i<_VRsLaCz!1W%AgdJ@oS9mWA^a7RxsevgCzQW z4E-6*M+9Wp29nRx4;=gXo=%~m zzq~p-@^*Wc5jqq1i`kx_3`#K>|3EcksF)IvVSrEluTsA+g9TGdWBM8n?Uy@SF!ko+ z3x59W9pmV&oLOcEf-)#wu(n+T23mj&18%E5=r!)KT3Xb=Rbs#W=`n8F&dI%Q8T}EK z&)bo;J;&@wPzI%#jQ^gGEg6AdxPbwCy*?V*ur$i64i^}2O=VOt2zWl~*d={@o->(* z$n(rj1Z7Z)$@m%L%-hfXK!yRd56fg;R;}#fGIUMk-!R!~O|n$Z4ahbK&3lL5f`3eM zCMbhaOvZmz5^G7(K!yQ*0y|V3JwrcxUM6uA&dsE~(GA;mZt_hjWsN8L%@qrh3qcu_ zF4)cjm22Un7BE0t>)CAfb9;Nap~EyZ)q#bB@l)J)Njcw5d==1Zj3*>lf-)$@Wc&}m z->?RsYyyAA0R&Xkf66^bH<(dbTjni5yD#Xu_=m-DoOi|?&rFOE$9m*Pf6|sWl*|cZTrYo z!gCc2NLJfEBIu!8#8wsZ{Jrtqwh~hrBZ(!HV|2>DKMlu2!1~jZn z)qLi)&B#l0wbczzz8$l2D?H-cQ%;*qsH1;O!G`2XPzI$7*0y@E)C`bez!uqrN8jlT zc-6NDzWVAtlyYl_L3Mv~9PJ0TZ|MBCEy;_Z3`!TQ?T8)x>_CPAhj^ELj%pXK37R|c zE`3Icytnm0Tij&fFK5+D=sbxX$(x`IN*65imkzDtK!yRURE3j1D+Y0!2wx~M3So8| z4chYD^fmwW!D1~xWDm0^`4E&rDJJ80jo!Ry2mmq+h&tY>wJR&1p*z+8g5Kq=k5|mx zi|uy$$u#WUG>6CzBwvCuD8*#_{LFn)feFYkp!bwQon!l^9G7->$2D~dJWoyzKYGkq zRnffBbT=Y9lKcqDpcIqwt61lfoAA|A7;xv+m0WodwI1q5ImM&4DUFv!A{>n9ZVCO8%3v`S!@n}P`UoehmZ9N9z$G7Ko&>vbo+Bq)Q@1#A2AJ`PVH z!vK1&!Nk%VZ=*b!of0JlkJLK8iLl=`)xP7-aXa+Iq-Ufcf-)$@WPA@(Ssw*I{Qv_j zjz(~2#|+fCMBg@@W2gN-XcD#jG{=fl2?b3m$lCTG1rwA(>4NPXAXWuFg0K_@G>&%| z3(iseExK8pobyi0yw{sHG}3jN=~=Fhj<-EYuL#PZ6qE7q1)r5yiUKkWSTlL--H-a` z?0Pay^>PZvVXB31(@s7*%5^_x9GwI7BE2RkgHlWeW6$3YmtRnLeH=Vo4hxodKisCl zY2w%GIQs4QX@}^ymue*>?xlzxil9M zSQ4tI7T>$eV647GB|+6!W|O1X(McQsjs7Q3>Y~RKA5!?D3ICoY!sn;(anVrK%}^l2 zfG7HC%u~y=)lxj-b5sWAHfH*fCM7zjeH2E;&|kJMDT1I3O0o5hAI**wHYEWW27D6B z*gaalRi1S#n_D&9EEPllF-qKA1RWcjL(?hb2W_uZ&H8^1FXMK=Wuyk z`1w4Yo0HOQCJxlbX1-bV;=6P^E zKC)6ZS?`C(uzf*r#ydm~AjJ@rK`AEVV+8dKYAztdfD|c*{ZgG#z7(T}B|i(pSk-fO z{SYYMqA%<1Ab`j(NU;QEP>RX;y_6sOH^UiF7;vF_w}RvcWeo{G)lcq^u|9G z(DzkdlHv)Whwabfbbt#rO)HJ?JBH zL8P|?Wl)OA;9d&$Xkrfc&^&lJ4+fC_Z zZ3mMQ7M1bC=wgp1zGeHLSM=YqVf@MFEoOj+6`$?2A2%AixVqMy*lt9g@-17}cPY-I zP9m=$G$lX?>y3EUE9{6n8chzSza}0RDBtpd=Dd~ zF3SJ=^obwb@I#DxoId=j4-Bw4{Agk<=WcUg7DEJs!gw#3d>J2H4 zpbSbE9AYxnYb$^Z0~oKiTlR5@tH{~XTp0f5ysL+0i#N-Sq2k@!9->nz;iPneGALbe zh`Ai7dK$mgD&k4wM-s1m{?eF!#{VYu{RqL^x73h*ErOImPzI$7mO10> z3piwe0hzR&ESo)p`m*mn`K~`VLt_xY?!Q(f$5T-Hm?$DglHL)NK`AEV+u54tA`OsX zz>~3ThTfNnDLNf`GSAFaTa9ktniD%^u_o`a{ar+kB4rYkK`ADKVbbs6=@M)DEHFHU z1?2`2%LQAL>`LgQ(k4?f8CO|8ex?@ZE4C(PwH!i6ld=}2@#E@Z!_y$Pzu&)v|NGsr zLG0cHXn-lTt1dN!Tq-M zEBxQ@hFf0;hd~2ODPFxJf5UFxh~#gi2D>NbBilRqCZ|e`Szm9Cyo9WWSW?0Nf5V@5 zKz-Q$eg_o(@AseeuvPX?27lHArp&h--(1VwTpQ_PNHwH?A$cUVsq$p@?YcL$_t86E zair4!Y*_5idU&z@^=Cc6e}1opy5P+euohrQ%NL#D{;XB~$xg4Az6d#Qt?w)=64oWJ zHKO|#{n^Kp%Kx)q!QSv^OFUb!CE)+CqpgEJj|%uLz>;N#18(EgOY+ME^`yA94>Gcv zRCW#Ty=P)dWgmiUh_|GQMHBvgw8bZJH~*QqzCCjT-b{x9r!?DAr`5GM`%S)P$`^9% z3~YH6Evy>OGq1r|jK~S3N`f*d#eRkO4T^M|pW#4;0Wu~_a}9Y!-q0v-lX)xaCVgtQ zNxs14ZW~u#!#Cy=btjR9soW;v2eQnm zq%QRX;9>zbhRT#)HAV{!q(n6+jchA0ElIBH+6zBMO_9-m&ELR!#L%&jzM*2!n z2Bnyce>s?4w!|68Fu5q<3qJ#9==m%sN zpjn&g?_IvcVWh>;*F#a$md7(!J)J2b`sntAKt#?Y)f1FKDZaL`E2Lf?!CPRE4g)T$ zrIlYgS*DnAvYK%P!{rqg1_gV=1yh<_)zi^iU|FPwMP+;-LAA)ACaV8ykY4|Lkgg-% zd;u&ttSB;WuFD*A6C47j~t->B(@8}kYDrE;S+ow z&2*R2twP0Kg{Zik-pEf~QT1qdm`K_6)yUe;BQ+D0K`AEV2XCsp4{#P029#u%zWP?E zH&4|;QBAa$bJO!&64ET!&g~y-hyJwkNi76rP>RX;Eb50BQa6DN1BQ0@Jgksj_lBzF zl}cXKow}QxdpOH!?y?Ez#dILcTtI3iD1%Z=#t$dry`7tZ3_`Zt%o~HVFk~F^K*xthXyFz z%8hH;Gz%S_=X4J>3^&EMnxWsBDkgOh=3D>bTYvNUJ=fuzu&_e($kC4{`68$fSUON; z$Zplk{`I<9EotN9Gnx<4=SDt|eiG(e{^DDHA4~)1ChvmH4l4{T&BtdBc(wDYD_<`D zEXu!w%`E#xYRB!E^c}9q$}b^xF3SJ=V2bbR_zvg3vZ4~mFo1fUum8Kmf%}%*>GNZc z);_8K_5GL*J^d1Mo@sOnyOh*LPzI$7eq_6XMBqcbFhD~8mT~{uVxz!p1_P$_W#*C1 zk7TU$3(3WE`X0zKmyx;&%Ajb51Nb+nj%%amdp0rueEaBLvksKYK zhG7NY@^^mnHQ^P6fOsW%qd4EUy38q5~*4~ui z`Rm^h`6H>9pbSbe89$6mh_8WHc`!iLt0|P-`uq;5+`Rbv>EACfhR4l0Ty%o8niMx|i>La%9Eg>D!KkvpHw(m;5n6>FZ`!w$$B3F?5 z3Cf@plktH9jWRR5zJvih4nBe(>2J&DxT`oZ_FRoiIix^|l%7pJI?_wec&Cyrp_YaLeVM9?G)PberI?IA z^vCRd27XNq1{`Vo+PNgy$eG$uis^M#Ux+7_n|ekoL+e|?AJfR%t|ko;ltJl&wf*MY z2>eySfCg1hfm+e+nu+Y8(!61Ejn(#x*!_PN)#04JUPchw{B`$B*^$C$blST;2pcIqwUlrFE15+TwfYuBZ ztRX;5Jk^HbRCdkK_4Dxx!x3_=gd=uGDL^tsV<^eDZMm ziJI!f#P*YBh+IdSBq)PYOvd-^`BlEnK!yRb4+^47Me<({)@wPNiEP=QKd)tUd1;>T z<2vnjM6M@I5tKnGCgaC2U+z|Tu>%8|&qy&Ym3(%wW+a29ZDi)qyt32zrx&)T^-3>4 zipUM5X@W8+#bkW$#EMaFDUe}6kC@OJ+X8bLWoli!d9_V-ZUwquPky<%TJiZ$^cM4X z(hNZvlrC7?^inkMfD8k={Z&$0stn!Sl<(|6ZW_o{e62XUkEYUi^&QO~WSJXDzX-~p zbivxT84d9SG7NbBOL|C?Z>?9e+r%A(mYY5m*X@UI3Ncnl)kzd0@(8>Ma&EVts%FX!7F}<9WR5yLLXSlTk()?!1Z7Z)$@olF>8n{| zAj1G3htcy+hnW4MnC}=I?dDWq$TjWhS2TaSR#;~Uk(){L1Z7Z)$@s|Nm%wW{F9-w9 zU5R#0VdmT;&b8#kLuS*)I=kJ$;n#`Q=7^+P{Bs4;4>o97UZ^OqgUjL zM$)H3>7=Z^xd$)zQX%qBk`+N2lwvaedse@v{T(bb47k{CdhAH7#ZM+O8$Kou0|a&S_V!#md!SyVDY`uH`&^Os%*KSJ zhTW%G4xl$QyGgbLWl)OA__?ZEs$v?*FyL^5b7ZWgd7rC5(jmETVp{7Hj5SY2-QDGA z6@*Sw^^oic%AgdJ@sYv4PdBy$83ttVP3<;MF=!mP<9Gjh^MO(2S0c1JpQIYTub4z9 zV0%gS1Z7Z)$@m`jj@f+;kYRwuy{0;jRqFjy*2gW`uHQWxwY#&$GLIq7!SPxOvYq=# z4g_UTipgew_OO}z#qgD57!aZDP5Zgm#p${J9WFhiEX|pRPGXlo4f$M*wiiU?ev%_W z8I)o&J_3w=PrC!iFyPAhpgH=h71o|%xqgQ}x{8(wt(mt>f0QE#A9EHndow*OG%IXM!>)U9g=! zu4lr(76JqGSQA+U%3lT0kBM)5#UDTM@Zc)%oo|$K)@U-K%REGKAt-}VOvWD{zFo>L z0Av_&(Z#&y^^)}xbmQz_*IpZls_nYRP%3D|uA9yUsHCn$r`1>2cQ^{fMsVZaY7@wc_k zNs>DQ_uu-(?Z?qDTPp6Md19kq<8JhmU1OwY1Z7Z)$@rx6oxnm#FzpHh%2JK_T@E); zmltl)4jDe~n<($gWEo6-@pNM$dL$Vqc`Pdb*QE0^_Wyav@EJQc=S_8jlL7FsV%>xK z!1PBq9_W8Crj?BLl^NPOb=+(1p;X_wnptF(Pmnwb^X`A~?(EoX;K6m%U%+~Y73^Bg z_njurYBcRNZ~mxi^R**xYU0xoR{QE(hA$C*lH|20|L<8}e2e2}t!K>n@O!~9pj7vR z?FQy2?Dub3s$a8+X<-)?l-VL|=j<*L%7DmIByWN;C|$6xIqxov0Wu7T_-Y>`<9Nk! zt5^KF3+#cLwE7(mzI+v+Y4!A44I)pId*6qE5^cGuxL1=dF)Qx&GqM- zC_9`Y(zm#AhCJr7wL0@=kr%E>z651Zx?q`S58hr4WEimfWnkB<^*&KmFWygWalF6B zlzslP2a6W9rph&RcK;X2kDv@nF&V$YilF%hkE1X^y!YebIL_BnTE{LX+Ek_m9N(|G zgrm5${pz+6bk=v4^qim!N--I~!g~FB3f?w^0k^;LP=v*&DyCk{3`YkUXRpBz-@t&%?8kbOjK+RW(pNpHoQDr2`aV8F*%)6s z*hWSlT%IQd5R^eFCgVr&5SoVuK!yR!nsQHVdKctub~rv})Iv`7yV}(I(yaqdx>nm2 zkY#>MenC(MrI;-E=iJfswuS^`7~plR`l$@NYp}@BUb+>V(zq_NE?xb~e!5y9HVeIm zw;%@+ltC#b<72^8g-ZBx6AU>1u85R;aGvIK6p77S`psC0@T6_4ReQcB!k^(VaC?L8%YJBtzqqkG3JQB{_(o z3`!R)vp~lY_#y%fxK%;=YU9*j5p;HKK-zOAp~DYWMvq-TkT)271%1xtDLI&+3`#Ni z{-5m}Z*H3kWEen~J1ydRJf1#G$S|A*Y8==$l@A&Uzf3W49u|A}E7WOn&%B zUiC>FerOT~2q*gIx^y$O%Gci=7FHKKvx~}IX#5Q8p2?$!&=Hw6`87cqlrC7?nR@&{!pj58(LKzL9QMDI)&6YfjVB%9RV57A=h@YJYl*@M zi{_~(56U}!1f4i*G&A;wwopwGy~465zac1tQf!&=>zvXgKKME;47kz1tL_@J^lH(J zso7DQk0Z=HSE%0?oMn4o$9MZg?F7@;? z@Bea;TUFs)b=fuaQ6?vHBtaRJVlw_QKF8jr@UNl3fP)-<)Jtrlj->^7rce4yx-%C& z>$7IOvh$#Jni8_i&g3Y9GAPAlxj%cD^-uacK!yQ2%dQ)d*Ym75dwPa({k==plaW8O zr>OL{lt&GtcX3?E(FA2siplsL4ePI#@b?S@_C~CgWcU&}kXEfm9V5>eb10@xNXUw9 zU$SXc0kX`l^W@^31<&r!0e_2&*&c2)$iB%MN_+vz0}^$&-9C|OU&`; zONS8IjT}o*2Biy@d6s@{HjrUJ*;{HIqx)Zc)D5GvMxIv{rzWsSwWPf$7uz+Beni!s z97j+FrI?KG+p1Zq@G}N5Ak+7rY?j3_ay@BF%dmC+P!sD`GhTtMRCOte=yrZajwdLC zQcT8w&v|@J4}lB=ey+Hc?7O=-PHL;Vw~FEJZ}RGwbm(O-*3w^TLT3X#$ZrYCpcIqw zF^P_e1AOfn2DqtRWq(=ftYH1wQQ!LLlG(JiGn{;EsjSL!o*Kw@_9Q0|l))JWlksy^ z?Rc3zkYT|4OqU+<>Y=%9Nqd=gyHb=KO<9AZ&XAZc98@nuWG`~!|5C=sB)?je;Jr~8 zz*^K=%vF2atyFvX%ayF37pA|cJ*Lm<@l;x}X%ixQlamO_pcGqXAY+&BCl30lgUfdq zFqxsCqtd#?GrrAuC{bA`Vkp=f9r!30<`|=%MaC|UU=xk{U zWEf!Lzs)7!o!5TnWUbM)kIIU#mEEN-ERe40P5=2Bk^RW2|4SL)&D>Ykz%M(&fKs*1 z3RLkt6}ui$t>(FJ$A888!-Z^alV>x|uIO<8IXR7>3`(($z3tCwgZuPQEs$YAjuhu; z3UghppzMiz&Iu0=7f4^0yLwjb=rM(-=EySplhX;xpmc#eTsHR$$S|PqZ0_3+IcFOW z)%YA>64_@a?0tj%!BY>3fVT7Kb`BtC5R^gb0y&Sz3tr-20ZYOzN1vNymY-#DFj_tN zE_$Pq^wnEOOGe#S%OcDCg8Yu43`#K>-_9>19N=vY7;tZ8?DsQFe)?g8iC<%tNNV{8 z8f8k|8{$`baiF&;1Id{LWpLobW$Z_NW4qTh_))`vV`82C-+yvQ<&kwS+d5U2mram_ zoNix`-6Wx-k1X>`a@L~qfBmR4|N2p9{{B%jcLk+`1&0-9J1nOy&SuMH8XV*e;65Pr zLsu$XA(l$X-gm7X!UvJF7v=x`qXq{7Y{Bts_|9h$IY5R1T!y*t4Trj{9f$KitJ!H3 zAEpzjx_9K!NwH~X^u@qnat=ZHuT_rCLvHCk3Eu#P0o|M9R&8evQW77cYrZgY&`>#B z+BKBzA#3PVBKrK)EAspQrHo%~d=MEP0Wu7r$bU83|Mit~f%1ez_x6*eNAB?y9}T6m zh_6vW-vE0}&Lt>=QfzJGf2*zh(=PZn6bv}xsKdB5@txCyi%DmDuGm>|YSTWQq?dH3 zdHwMQFz6VjVeX_e89%dr)8*(8* z8I)o&ezlQdFrWcs7_hfp^XLidvfik*?bkz&?re|O(;AGSTsVAzR%Q<(hm(s4%AgdJ z@v9B(`_gda4FiOT-nc?G5l5T-ABvs#q*MTNV5aUnR(%e96>H7D1%Z=#;-OG ziQhj4WEimL#yc~CcLC9EC%3p6g%8ZnX0O>Ox01Wcedcu~B1e)x5R^gbf@P)|AAxra zVZc5!TatGQlgVm!y(bAVe(Q}UOs`0|?zM>y`e2X9QREVWGAPAleBXX%Y;6l<7;xxE zzTUeOe!mP> zu&jB(kXBUWwcUK}?eM3E*a|<6#TU-ESGZWsP9S_NxqMOn-zO}5!SSC~>bggjK!yPv z?5ghiwuZNDqO}um?^UGv#@|k#O;;YcG&t@YBFB+G5|lwHw#xA-{%h&?8-WZ1-abEE z9aNpg9@_M|vU2?@J+~KcR!jX#UA_Cjm^LEElRpuZK`AEVKdq(Im9#*H0jV-{e2slC zmF9lX|JKkGg&Oz;TNt3B&U3bCCP~u# z*M-p$R}aQd^Woz+)2Oz{XwU3KmN|i3Nl*r*_zsQ@55}s@;G{DQsN@cP)!Y0vA#oRN zu&Uw3Y9^inVN#`h#mK7A1Vm0GR}qv!DJJ8O^{lw7TL5Gjppve4rSZrIx*)gC`nu9x zXRRrv0(-4J~2Qm!!$-{8tSFn|*K^j-jy))lGKCM5npY=)a%tsJ8gIr5c2Bp|C z*%N%<4ynf$zw51ZA*w7Od?Z z0!46m00T113b*cSZN0VC{@OnJHO$>fY^S$wuPNA4r%8uiTxF8$3Cf@pm$8$ya){D> zaFT`r%&U3sXpmIhv(&Ul#c%LSvotR3$PJ6i|8EVri6GCt&7abRNpq4C6+audrS!e^7e6Xxsw;_H5o5nPWt z1%vetE9&$`X<`h8(&fq<;zN(J(i`5*o_|8v@FX)GMa>$K~^8X$q;QJc>`+8Cy zD+Bj65D-%vbl#!ud52Qm(`89bhvJUAk3GsyZ?iUvnvF&l{5|;xK^c@T*y3^F?c!jS z!+?~zgtMfLce$8trVR2QZwYsItlJ(IGnVmcRxbdNbIDDM%Kx>+fBdz|fBatMgZ85E z?M7I^$*0F6VeHl7tCZ_*?Q)jX#iCEq&j;s`n-}H(y~^#P?o!#^Z?Jyc?D2$6WX8T5$>$`fnXjUs(#7=@XXK^VJ!5i5SB$=^pek^AF^o1ZD7u3MS(Z0gso@l>r$BaPBKr-l4m3 z)u~Q}@oi^CUedL&g+zP{y7?+JuMb(~5^^U&8I)o&J}NYp&e;NF7$7ijU#bzd3FyUIQN?g8{E2O7ugb^El`YtA6N@ zj6bS>b(%AjcC;6Nfzct{gpsWfI$-Wn?|P zRkzy1LLl$*XY>eOPVONngHlY!9|D$cxCd_(!+K-Tsravwn%lwvae2-&V@LhvsVVgV#Z3Rr zHU~tmAomlLK`AEV6DSV%=;7Z^g8_wuV`t{_=d``wpPVQfKkpF6sAHKa@JW6Nmnb^p zUr8PyD1%Z=#>WVXUaR1PA22|$P~}9f;s))L8cA1!(+-`okW{~X?O1X0<@!tsWSOhT zg9K$zx?np~$x6NiG7Rtw9_7Ep(Q3T3YdrHZ^W9cs_t`M|1m^Q=S53tuay5B~pbSbE zEc2vgdkv6bz+DaL@-gF;9ZpO)MLyWgHV=Pb=~>G=8c=pi#v74q$ioC>P>RX;+{t?L zZaDD`18R4z;9XWt^?u~&KHo@_)3VBT9%+dV8fT+u!3GkJud3`#K>-@~Z18T5b* z1K73(sScF~oM395Fx_!KOPos5wKI`fmzleX1O52H7xE}U8I)o&KK(5CU`r=l+aSPz z66dj}DRx&r)erx4dnxrVeG?3hRJE^tJYTLv*7jHO7(p47F4)dq?_=R#|A7I1Vz1?u zN#7^!i|?MlPtUx?h1vL5oU!KAfRHBo_tw9W#|g@y6qE5A9H)!p<$(+Xe$fV9tp0RI z>3}ZBZJqj4(yaaxmT7OeU+Oiep${F`k|zktpcIqw8yxxf^x-4kFksq>yu>1Pz^>8O z?v!FSL*#*_RsDu$236i#vuw!Pt|LzpltC#b<9pcXkrq23!+`xv1=fkDo(I2v$zIu? zQ)T43XQiLXrqZ^IX07LlTu+`ND1%Z=#veH`nVp3rKp0^0Ug>%3$h{|@wZlK@6$kIx ztDsSIgY2R!X0a9h-hBgknxG6yF&P{^u`64{c&qbZ=nDgww|-aQR6E19JBEMVoS}`% ze9%=&@pPLdv*RjscJ4cQW>NXShQ8B(4SlD7U)l1nd1D25Sn;v=u86Sw_R~r?XXxW- z9~5WRKfTAxQ8T&Ingu;cHw5mz%fD-<=A}TZ_C|K1y(r>Fv)-3sV9EzWO9|yS(=q& zKF#YU)q{B@f5`aWLyzFieQZcnkS4$71hJ{}(^|7eD)Zi~Fz1cLVDkR*ZcqJsx<|ImARW zzb)y8D%pzl?uVm3mNRh|ybmILE7@XE{{Owj@w132W3>U0VZgl5H{<#VU6u1U{4PrC zyY#X4d_1e8@lv3CuLk<~U>o@fK^c_dzj5qZYke@^Wgx?VqlUMeO9c;HBwaGo8jW)h zPf;I`imdp!dsTdv0kX{PWJ`iFC|z(|vp=*Y7RWGQkE|>m`Q)zQ2#t)Ln>QvDPuV)h znUwApt|@L(N8}FjQ-U%m#bkUQoHxG)j{adl77feSGqbtN!zBvW7FGFea?tjSk!80| ztf8q!U*q^mwjwBlQcT82|C!$+;Q0#+7@1gB8uzqA`hXTWv6%gc8Y!gSvp!jLx;O^? z*heSXnxG6y7cBE_kyX2a3$S`2Futu7V(XWHAcFsz<8~iZjy>$EF-Fuov4pTbS$ad}~+Y*#P zDJJ8`(ImZCxVB+H2iFPau?G%S13dBuvGHQcLa&yucx}wh(Xgv<9Fcp-b_8Wmx?nqp zy(@vQG{AtV;Fmuvcf7fAF3nmcF?WvHe-DXm^9;=orpH3zh}=uICn$qbOvcCH22rQs zJzE&iC9$DW=oFKpklaJYgXxL|Vcq)lme2O>IW6`v1(Ey64g_UTipluRYPT%zI&koY z0mUYkUF^G-x_)BSIPK=}NuV<6NHxc_7uSqpT@@ntlN}e8|LfrG!0|sXTsm-Ir>>sw ziT;3x6^=I9?Cqw${2@Y1g0fVvw7Lp(aFnb5T*~<_=mEkHkevwg_J8sAzxnXD3BG`b z6(y~{ZRe)*@+S?Rd-C1Szbfu;Sd%2>vcuu_d@sTelAQ_jc7O499N1ypQ`;8a=7JRu zzKr}3{(3QN?~1XBo2g-y@k`83t$%OLK&7 z9}bjn4H?+HD$emx(&{7opKhhKVo64y(H$YX5tKpcf}i)^aVbL+MvbM*_&j`w(6#sc+dsxN__BJ3t0RguMl|*jIB^>9y^l+Q` zdLFe2BWs?}(sL*2NJsY~@;KRppbSbe8NXG3T|fm6%V5BUXTK_%lOXKyoz1^oL=)77iXn#$r;(A zJLYv~nXlHABJw2Ji=YfjF&Q7Ctep~h0c03(s&J1(d(TP3FXk`r^U{WF`*^!hW=u#b zSaZ0n0+FZ4-UMY(iplu1qAImN;KUUS@G`KQHai{LK^k<)OSbzyy2UFhUo83hDINRc z?-6;L>_bonrI?JLu19-9xPS};E|yuID!M^^jzb>Mu9q3kxo%mo|?~vSha*@*LTppbSbe8UH<-Fdr%c zG7RX~Sgky_m;R%~CtanBq;cv~7b6=3CEG{}eHQ5B_VeTbf-)$@WPJEt?G_5Z@&^N) zEPrGh*e>5@Z#2fif2>K={lIEAF4x0PDSR?(k!5~Nc|lMHrI-xPlE42NM=bLJZt&MQ zU_oKa-TRx)oO#1>k(+Ph`F*3T?=uoae|E)x9Lh&8Z7nE)i_-Y$XtBS>;m`5+-$jA{ z``u7`I_fHDfGKB$G_?C=2l5^!?;AR`_2HZfM~=P!9POKze%@qX0Bv|s(!%thoxnL?Ucg2Cwhg-_c@89IpY*%qvzQ+Td)3Kz4{AU8yB43vG z`D>Wu^Lv!$(PQZicv#_JET-8z-`O|$xzxq;i%>-L@!MNI#IHa1mH#EW6`oQ;3G?26 z@!r2jX+kj%slfgRD`GFx?^0!AT^@bpvh0h^{?~Pd&A!p=u+v08U5&otVnqpCl>hfA z4L_>mH*;5{?S?PczyRkm)p+k<_EYCfQ}yp(S=$+qZhj+d{yg=K)A2UQo^MTgLr?~# z3yw2}7w*FEM8E(Z7hjnpj=`5(HQXZ_?DIP(=VfnCdTG>=3R3D2*@hBMPzI%#eDcq0 zJW3`!TQZT}8-_$m_& znAj@0!RPJ1?W1=zNBwQ5n99q1FK*mlN^ZA<|S3Y2- z|H<{kX*N%Hkr4-or}Cd=w_T{~#9_{~Mp~<@jy-E# z9`%`|RoUM_zb5KJi6va5YkGJ1?7JNZQ!5ynwpHjeT88 zBkMEEUUYouN_k6A2Bizu_LO%Synzh^)}ApuapIC{y!e63WA58H4kXsQ$fC%v_2yeGw_~idYu-w=y)*Tacxi9{-m?Ue z-6@F#Wl)OAl7H6rgsuTx+c04Bo8og6C&n+~pIFHI=XP9=+|21TDWbDE#BiA;B0r-f z5tKpcg0th5@z*TI|%kf-knerVD!<@^ItZ7uFds1RmR+iuCS6mf4GvN>B!+n2aAi*%|Mb z0vQGfbP3+7ZB8xl;pC($tFmN` zCWuRy#aYRtUoHrsWD}G@DJJ9JlmBLV{wDkW@f$*IOpg_`nPyq zTb0vefWGkXf|5f}2Bnyc&snYY;|l{a3|RG~_0o#(O+Fa|ht8#G*{pxWx3}WC*|CN? zHBEYCZ3j}`6O=(ICgbPljmy6!02v0bQq2|LeERk{*E89l4FKEI@_uUbdQwc!tP9 zlstkmD8*#_=2l_IyB$D=0Xdp+yX_mA+gF-+ZocP3yL5W!G0QP4(FxsHd-NfkU`jqg z8I&$q+xBsGeL#i*iaP}MZyu9h`)b>|&pXztikEHV5xuyrrAMJp9KDu&MJXUCgVF_S zdsiV-A&_Cf{520o^Q|8To@kY?;<(Zp8~pLZ{8t{q@na{s(ev|bN+CfRlwvY|3=2G4 zqXuLc(5=J0>Pr3L;^sEFJl+@+MJm3&ohLKyc+>D~6+^aj2&IUi3`#K>KR<7&J_WCC zV1OlG^V`~ocg23DZ46pYwfY*9;#jwX(u4Rht3LGGlA)Ahf-)$@Wc=uPb|Tvd$S~mQ zM`yv$BsKA_Emf%uVw&lza&4ofg=tT0;a;)}S>`ax2ZAyv#bo?k^?hRtyuShi^9POW3}7`vUxf^(loFIdDJJ84n8kcB{6-lJkPq<*qB#^FWVoD*5w5tK55GAPAl{J2e@dI7!)2?K^+6?1Amq<{ZpwYzCd zD=Xzl#CqKUvrW^kvCh_r97!oBD1%Z=zVT-?6spVQ0%RCqs}vPIs(NYT8mZT3O|9xi zGaAh}#`D}o?$XPmH|V1%9|_8!bivxrHFHe_G7K1#qOq8&@N^KNO6BrS%CVGCWVq4L z#5N|s^Aq}LTQubpK^c@{GJeMJ^weGtWEkMQ(psl@?c|xH48QmQ2BY_%l`Djdvv{{G zqZuhe)^-e~f}jjaF&RISXs9NY0~rRa+!z`Di`j`mEqUMJBUziz?1~XmkwQ)hlOR%_3?G%t1Q^31)OD{feUzC#m7 zsUj$Y(goXjXy-$?one4|N5BX4LSL zSd^s)WEiks`-NI4i<@`etn#muH$_M2f_d!j?<83}zkXqb$O)9s1Z7Z)$%cP^&#@}E z;P(n(09}>*h);8T`3rHj_9YpePv+QU?QHgU@O`;)JPMH$DPIW6pcIp}|H$E7eGNc{ z0dt2RA6pkuZ=LEr(oJQ~!I8b=!f~2odM@wzKdB&c66GsF8I)o&ewH*kR1Wtr81Rvm zbFcENy3tH4Hlg6Af``{#d&4?B$ja7TGpL+Q`9@F%rI@VyXPLj`o!bj!7;w~u=lhM) zYggsJF?kewJXOBLDKg}E!rDMf{R;=O%qf&wf-)#wu(sFt7{L)B4Dem+LSehPab>}@gAj1In_?dmq8w+33ls@}t zIUWA1cv)V}RSq+`@U#aEh@3{LCn$qbOvaC4N@7Q}feZt*nGXLro!9p^cYK1-dYNa_gMP^~gYuoA3`#K>ACn~L9D?^TV1V$)<|CIrcjjueNDAI|+Q2&_`nvZ7 zS0~$@r>otOwf&CLNKgi)n2et#Y9#k zsfQ3bi_%0;2Br8iV|OTzc5)PgTZAwmiusqq`ymw(K3b#oek`=(8@+1N*HiiLG)qy+ zMC5Er^P)0-qjs@7ludu#B5eA7hmvZUS1I6OMbfgfMJEh)+Bc+mN>_d#-Ptwc^zrIL zx%(!Zel!T5Lunz*|M-jl@tZ$W;j;(uuwpdMZMv?`-h0`K3zHebhpWz?8oZ;ltVD6$ z8qYw4e@|&8%s2kUH~zjs`pcEP1Z;L#vB!Kx?H3PIncWQ&yumBhkB&vBzpIUNF`{W( zRgdtwl(t3rf8QX*cXfQApwt=w52i4{=1ae~>AEpBkyQSTNt}JL3eI+UsY5-ZXBt+b z(>A z-wz;ATC~6p4g(5yY)X7F#%|=kGjHjJvzF`Cs%aue&Fh*}Ew)M{auKC#QTe}q-kpE# z;GMsJUpqg2UPQIN{%M3SrgRhLfBwb) z{LPjk$V|Nqh$~!RDEitQT_*(x9c{%FX}X*XXrrq50oCle8*pW$8TQp z{N}5GhZS_ZmUQl`zU@D_#p}E;-H(V$o9UTw4u<=_{z{z)Uqb06%(wr=xBou6$S~8! zgS`$`xJkSeSo3cCLzbMMLw92+_E)Htg?9UtZ`F;kM<2E-rSvVz|NH2I|HknXY00~4 zAt1wm@xhENUeC-{2k+OJG>2~s3Z7|A=w;Y{rBK#G9a-=)NDP&mlPRp;`Qcaf_pKIbu}%Lr8=U(Uy$mU*3=ot->4M)lB{p#_kYRvvxoXhN z(x{n6CwaDVQw6>Wze|O2h7aZgy#vuf;z!CLK^c_dzj5ri=DairejXYI=_GZLrCFa;<-j5w*`n3uF(gq>K=hK`AB+{gHW9@>PHg1KL8(C8Pw$*OpA2_TM^m z{GM-$JDaOye4O()IdoFCiZV)22Bnyc4~*}Yeme|g7|=l@CN(Ch?RbHi#vn-YQ&TF< zs&%EW_pHlJOh9i@S5w9a%AgdJ<^L@6j_0MaK!yQ|tNWck=07tYI=WHObT_r9#&ycP zw)M^WbgM#dt`yw~mRv6Qt;`GZ} z!u;rK=V@d+f2K?f2)Z#7IhMDWzcWRJB zUlf`?;YMepzfq0aKA6*jKIKzS znI$NLQcRZlBd0Ld!RHZRz@(B=#Y+1crd{iecQi`2X1U68*ophBi94x8zXVy^4U{>8 zGAPBj^Y15fRU~SKpd?WLzUQMao=Gk{Kil7z8;-i+VN+468dgAM?t#$XDf5fc z_^*1w)4|J^f`3aM&2UgHUA~-Z`I7(pzo$#B^iNsZk;8kJ-dhS&cvST7Rb0KP?6a$K z)Z5QR?WLHc4fFEIm8!4UhLDCviY4cNy?jga*Q2>KzZ=ZQIH;EWPsH#4V!ySwIv?S4 zZLsD3?q;?3;|hH3x7J&D2uAtu3{uNi^$^}mEpv$(iTFXWSv2C`#~1vNe)7-OpZXd4 z3&=3wNom{tpcCQuHgPRiHt%U4wa_|vR`AmMDlyiP2t;n8*bc! zSISbA(K*iTivDD`zJ7B{Fc06oGAPAly+0R#xuM=$!3h)w>^?7k$*JE# zwqwWcSKI05_pj!9?%E?|(9%5E_Y0BRD2|KD_)m@Muic#L5hRy9a^U>yHAPPB`g6%f zM=rp_ib69}>+{Pvgtl(Db*9^%;R>ybeSl*jI z6@6F6t&MIqsUDs*oYZO-_!#W>m^5Gi!mPj|BoW~|D9(g=yT5olPHa~%FS!Q45Cbco zooOlCnEO6=hb2izPgx9f$6W8MFBTJn?)QdJoO3P;SD_5NqSAsGqU9hX4+tv$j?7{%9N+F&% zc?AXD!;CpT>OAjPzcMU{aUXwfpA=|>tnx038$lVA;=4Mww)^r;W`O*EIJ@(>9KOc! z|D{bT73sPzLJQI+NlA)Sq+N?7Bq6D6MG4U+DxrPfSM4Pg6>YXwEz&}p_I-~^e&@{F z+_xUb{r>*ubHD#`|MPsEx#pTV*UXs-26QYwXko49F*9IIsJ!^&$6)%#EfdinS2%vp zFGeq-4sd!AltC#b<1_O}$?x#j2n^V4N1|3Szd9%;wAE6abL&FDyZAN65}sTUReRBk z<%68w1Z7Z)$@q1=i73XWK!yRElXS(NoN1jA7#+*IA?Tz3>F&YO(Z;Z`mUY=$$kra> z^dTsNQcMOjb8Kth4-|)wDZ+sJ`}qzAtL-CCJ&BMv6t7OObybd?yn2hgX zUXAk0feZug1g}i1Bq)Q@C1Vb}BKi=>Fra?< zO76)aj!=F!-{rG!n?vG)qowX2%vG=3KY$*?COCr#%AgdJ@t*@{`aNZU342OK9Pm)}aF;8*^6O=(ICgVQ`XoE`50~rQf{=TVZx?=B9 z^Fe8RX;w`$SF3SLox0U3c=b4h1}-@Ennr>`xcNt5g2y*=2o zY1p#X9DTLycg_fcGAPC5>%WiN&5d)*;MN8K6$OStk{jgW&dtUP?60w$Zg~IPCf6a` zY9jshEo96;I3o$lpcIpBf6HUZE8*j)Fu-_uM^>{*fCN_t=NWJIw{#@M&F_0w9a1&7 z2rNV7InF48GAPAluuub=g}yft%Ldax7|=2xe)?{5`xRyb#XEZb%f)9St4hBXC0o8YGhV{1bi~#@l$Gn~ z5F0Z3Mb5at^8b4hjPL6B4yPW)XbfZ+K;?d1Gn~!h-s%Ys2H$19Xt?1TWAC| z&=n)UIO7S*pcLQXu*p&1T;diW!vIEw0~aRwc}mYmc5F8!PUwzfN8M8CZX{>`}InvRXtzoXT7hfFFY(u5pN8;jb0dcBqb7* zK`AEVlOtuCa`+4Y47f@qc2ef4_ijn`4KDsJhiJVj94m*tH(z6?TJsATvlA(apbSbe znfLdYzwks?02u~s?;?wbDA+QNniRYqwi*|G>%np)>3fyxkN0245ZRfOOi%`;n2fKV zH7UMw7RXpYd+%e9-Jhm-8$Y*|$}9*rbe|5h2?b=nAI81Q8B zisr|c2ICuo65H8$3}~yj)YN$1*jwe`JB!{*eLzYlD1%Z=#`o=_;y2?!h5?gCpH3!G z+qNcZlrRaNl*r*n2g{12s@Hu17sL*Ds27MMnlVa&u&eM zZBs(SrqX-^(fM=jci-{`4Z->c!|n znO{-l>e#BV9+UIy&{sx0BxMnlK`AEVQ_=2_E%25(4EVZ7wEo1^vSWL*tT&D59CCT} z+NyCwKx)=I!=r)7m_10(3Cf@plkq)Fbz?8Qga8Aq19UBUJg;b}52S602-4|v;$`fd zt5*;@sJ~kjkspz=3Cf@plktmGPdCe~1u_iSX5ML?W1;)hPsSqX*A2@BuC2>#wY&M- z_6nav|CGpMQVu~GlwvY|^bCzEOad|tVC9~Eli24kVqf?zjdE3bedy4RX;k;Ecy)oCEZ08KW-`&@k2C!Yl?kq->e^L48VCB0gCN!sj_Ec(;Z zlk|e13`#K>|2}VPKXntxFu+eHTy=O^RLoEb_q^o(mjjyaIEJA3B{l#H)nJ-lRN&GALcL zwPo`3qJRtoVvGu|v(BWNxVtnh{&JRI!;<<;j#l-w#(q-U4n+1L5{EIQ#cRT z48s7U3kF+CXH=w$FTI}dF*Ky`9Py!>&emP7eDe`{@xqr>Ku`vyn2dj)zcI$c?=uXj z;Az?2Pc^^b$Dv`?GZvB~TpU*?@40hJTd_a-{HY)56+s!4VzR~WnL(BP&*wmf0gvb< zR<)JJB{Ok(E`L#3yw+ z{S|kfxzf&~C#VXpF`Lxiq}zO7uN%Fg7(gl{D1%Z=#*f=M3}*^}3IcAYBT!k%Ro{QK^c@{GX8yj<#R{~$S{E1D}8#?&j?)`(Hl&c z3ltLON2fMN6eqCX+!>Bu1`Q$=6O=(ICgXcp_3U+dAj5#Vl}`O+`k6M4podzCP9N@k z>wEmj>S*xF(Q+mWr3`ppgf4Xvbpg-q$Fz$zVD)Ck;A~#{R$&(E!kanb`_~0 zV-6?1BPfGXOvd*xepMPfAj5#xH6`cF2CjCw40OKzcCt6O#n+8X;`4E`Ku17+u~ooBi&JGRm+S&&anCCNM!JwHd2$_dJ#6qE6z=kvD@;Fw`R zpXYtms=QIIpRrB$8&tf`d^qz?af7F@Wb)n1=qrn3NEHNSP>RX;9=4_Nn<0>4K;PWa zI?7OrVVJ^3bq)8+hl>1O@zUI4r{6}Ya78}nSW+cH8I)qO_3vYteQVZtAj1Ha!D4wy z^}LF9-q5tKpclFzxxL6#lJFkt)k^{&T;J)M|N zy*QK^r)lk7$8<+7W~B_%j!bklTRf?npbSbe8J_`moU@byG7Pw?7`UmqSm5$%K_;aW z^CdiZBnYhT4*NdcJFsru zQ~r$FJzK?SSdPeaAzS+?sfM5oN--JV!{UDZfGhZ5z-Y0OTXdcAt0cYTG*2U{Ra_@& zbL+!K44^{Zx_Z1hV$cQ;#q zxtDpCTN{y+NOc5dP>RX;oMhfu9-c8^fc*-I=`7d6b5hB6qwL4_|v$U>gl*%Io#PTlnvo3nOwmsga-zN--JVx80+! zuLd#;ce z#HCx;EDl|4-eoQKQjw3-v-h5~` znjcN`8dMIWzvyyF?F3~|iplsLl$a@O84(P4&XsLny=RXhe$T@)k6PabV}=34GAR!m1Q-vklG5C+&Ar!(W{1|NJYCc5 z4nghni2Rb&Nl*r*n2aAidvixZfD8jvhP1pzYx>A+`?qPia$i1d>SBGo_G6l6__=lH z(y2VsH-a)K#bn*zV_p??0G_L0fKdV0L;6Y1%QwU&X|nu!Qge7q)YE`YtT2ry(0AxltJl| z&pCGPBwR)W0}89|DDlUxJAHcXLGDbZvT)Cknfk(miF@5QUtvVX{F>BDPzI%#jGr;s z#g)iFh5^kVtOM?ZCMSoQEZR{7L{7Qtz6{GJMZ5BeS;Qf7A*qj`3`#K>KVvxb)pY_H z23S<~Z28QR!^p_ObZ1Y1F_#L9gS3l=xU?13`NN1@MCvCfgVH5in_sTW6v!~ZXPLZI z_KS#L3EfS?RYF&V$^Y5jT;{;q-n-$ZKink9lP zR&LOtQ{9z)=9~LRX;8RIdBP&JTYz(Fo{X3q2Y_V~R%2eJeGJTVL2cE$)hd=k>8Mp|0@66lA0mz|1PN+`e#iGErT5c;9-U1p^qt7tI3Z1 zKkx@X@DDGgSiTSNu)=li3&w{s&S8ljX4^sp zZV6v?Ej=F1+V1B`vk_fT^qw?EnD76C@Be2-Pff-O_}T(kQM!IMs46O2^2ZsaVABqMpTp%!pi0nj|QL(j`Zi%0acmK!yRTYb;My^)Stz@Lkj!e$}1RSF@<{q-mT2h}<8>99Qzk z(53v9q-laOC|x4YaB1}c83t%FZSr1RGw(B_osqReqyM~wDWm(31U8Km^8V-_E~p~S z5R^eFCY%4BA>Hwkhxepmz{{fsJ67%=6vx$M*p(xEW@p#drRA#s1z`E~0bkeRQ@McX2BEoqLR3`#K>KayM* zmdFD#3`iTZH#*$>R&7m3so~baTGrYFf%F$lf)`_ZM9&~{9ciAR3`&=5?bR=SS^yaa z*iyeP-gCR#T<`OG$@Rii?I%YBd9{LSEtP`yq4(|TNecvJP>RX;uRQs;ad2G?40tF^ ziWgwGVIwQ0#k|bIyf2TldDn{cw}EaN+UR54pGZFm%AgdJ@!9{1q#Q>e!vL4LKm(;* zuV7B+rdZ!6kKeVX$Jy2=t4ZDCR8L2?b^~dVpbSbe8J`Kyt#|`h^uT}z7e%DiqdI+0 z6gs?Dt|}htp8xiG`@+TJ*LHKG%i2Gaei4*G>5{EoVOkFNZ5R-CTjrLox~TQ4e#hAK zXtA9i^f<=^HYysL*Bk2~V{Rnb^H5Pyv#D(EGwD^=xSwop;@{XZzxcn;lAsim@$d6n zj|fg6!+@5N+I3Qu^2t`dQSXT19^`d{!Q>C;+(Zu=8jvpOtj8z5)EM?JiRWr$vpbSbe8C;#i=A#C@cKTpS z1OsGZj_|+h6nQHUuP!mWB9rOBWdP`;Ax5tKnGw#o6Y)=L`d zP$0tqmpc(RZ{-P-B<|DPZ2Bk}O@afgpTYwA$coJTW z_-{YMNB=%YDDHw-$)@y1W_MQV)TXjrDMW53Js>E9QcT7Vp(GlvM?i)F7ivi2RmB%w zZyPC63v!AmM~4aXew({s*F{$b{DU zM1gyA$!!$F_B~I-<1FrFRv=^UB)JikK`AEVCoIwTn}dN21I(bCmC zCfxmWEmNb&W{;L=1bSoo8_AuZ3`#K>KZKqz;_e1A4EWJyS*)e^KqF|2p<$MIi|j9^ zBUCRcGZaijn9x6s(M5VlPzI%#jDM^4RrA9=3WLVUt9@UbHL8U{c_<44|_!J zBRwG~gHlY!7r(CFG`bJSFkm;s7rLp6hhBHr9H+7UCU~WT>O_U+M$@iC#}A*6qE7G&j-4b z$AAn2eB5jK9+~P{Gqm!`Pv5m*ALY2YDMV11^+dEzDIyP(d$HqSFc2>yDLgW#WA3+(EVlsa8RD3zH0mv}mg?Rlr z@-TJKW;*c@@2!eWtTee|O3@p4sh-<)6Ol(r{sd)Eipl>uWAH1z0Wu5-jPARXcPG}5~px}7TjC8uFapqI4O{z3`#K>-@`=p_}~&37;ta2`H*VGpna3T?9HtQ zRL<$eb<77A>2B0>KgEs66Qm%5GALd0IUm~WAPdI~0$lexUS2kwR3q_9f_!96`nS(a z=kv4M{ za(m%dM4l#v5|lwHCgU@Mn3Oy4W;_f~avW&2*~R{-u7@|5GEOtfwL8{>b9L|O1@E(G z5qX9bMoBAbFTJG57S5aDRP@up|>^WNznvlP>RX;x9ZAjnHV6$ zfOUCML+2(g-jqCLrn~yU#ZU5w85M`Obtb9Xsg)z2^8zV`pbSbe8J`UaKRx~e$S`0P zqlFgT>#OS<%H;(wBbkko49_wivNhnzew=}Wl)OA_#@fZA9%r=v@k&Kn3`>j=5p(K zc5&*GR+}-&6Xh(OD($j;t-F&E*`Az0PzI$-wst(}q92f9fRCcyYQ0_dXJ4gGuAmlM zL3>K-*OQHY9sX|>d8ZNCf&7%93`#K>zsSwZe;XdRVZhX-{L!=fd>%jOYS?`|%21n^ zMq<_c7tbS8EzaojEk|-9K^c@{GJckv&MwFWG7Ly+GWI`f`BQAc<=9s3`}eM~8og4L zirA&fu$%WOGG-@o5>Xu(WbS~9NkypwRQNU`g@WUwc%UHPT6 zA8se~AhI(#nV<|xG1>C>zJ2@R3SA(>fUZ{X_viT4F0?xvxU4|32@zv#JruFrcYRrrcI6%|&|OoFH|Y><7+|4;$WBzS`fmu^3%Y z^(q+0hjm++P>A328_Z__t-u>xmxN%v7(x_YVHlHkr-ra-zTRL zltC#bm% z`JDxf@x6V4Z_@>rUmW>X{wVfd?^KZ|B0nN$6O=(ICgZ;)-M<7C02u~!7?`H0nlF=* zt?IkS6tTTgh%M@z|NSk@tE@hwSJoesa|p_y6qE6}=OAkcoaw`Wfglk+#rL12m`BvN zQhz$7=&e!UIIc78RA|eNK7{myoJ&v!rAt2NWqt|*K!yQ#sBb44EH~U6G&LN!{rhoI z;VL%UsrljXUbERCWNUkpUl5c*DJJ7*NfmpVWFW%;J=U?iX?BN8_dgu*R-an^sj=|- zo>ljgEj6x+Mk2Bo`6WRalwvaeaLOAdHaZ}~02LbJqwMn6n)5aWrB*vm>QSaYcwY#< z@#LE8`4mLpoansXL^gCCH zI{JEPA96lH8I)o&J_CFbTnu000RuW89_p7{7oMJNwu|F*z%7fy5ykI}>s{EK_IzJN z#_UTjASi=UOcwtAeU5+eT?NQ6Ai(eYa)ntQ=h%3+GuIgoj=lX5#Ul28zrY&fZ1lIJ zANdtQ8I)o&ehgc^unyjMhXHL8;cD~p`p@q>t|n!fq=xi1?BdJCDBTz@Pk@ zpbSbeS^4*vi}dp!02v0j2FJv)#_1FUSX#1<*%X=WmNgIY`!0C1s5udR1yTUHkf01o zmyDS|P87}nVZe1|fzmrivQFN$=PCD2SX_}`G_p6`dGz(BV3tYba}FdI5tKnGCW9;M z*sb6O5tn>$D;NeCj}q?^|EC&%c$^w{8ONx_fioQz!24f(opg_FV3_GnyO1T79F-l zuSseXjcgFeUp9jjgpl75E-3i30Q}>hH+)+uSkQwve8UhOd$<0Dh0TX0w)Jli3vF&6 zxhH*%{+>SbC&`BINI@w1-M<$6?G4{Ac>a9DH~bg28{BT#-wbvGSaNXJUe6(=pL1@d z*T|BZ51Ml=H#<7*FYOG_zEq7QgptetHG$@jj{$%4aqb_xLGC{*0$c?BUBLbiEBNLs z-6l4vhWD?DG@ND2x5`}jiCOBV(hH!L40DV3}m zmg9V=DLaO};Vhb~7h#X0-A}E7WOvcY=-`@Da*#iuq;U796;6_)IojBp6 zWLI4A)-!HBYgm)?n#+;sQpG58H9;AaVluw?JFl;;7RWH*(%y%TSI+vkZetHz?#6L) zz^FAyLB+9t#?7F592s*o`6EFYlrE9^jy%5vWEk+IorkvP#^Y`)-oXAh{--1^WrcK^c@{GJeghKltodAj5z`LovpYYtjmgz1r_1U-z(!K5a;C^UN^e z5@?V`#vDtoB`AYZOvYzS36{K9feZu6tESTIorE&0^X0g64$I|pstC;3t29~O*u;oF z@)$?1BPfGXOvb-e-}LXpJ4-M?u)JmT(WGk_jY_hJit;4G_w7{LX2FLxMjjfxkBm8< zTu)F2rI?J*9ul|vz(vn6AYbf8cj}6b*FK1U`lztBWcT;*oT4ai3lV4O22@TUeRX; zFWtQ%BY{AM0Vk=Jg%|DEdTSlOedjhxszO!0s97zQCZ%iK41G;fBKb2x8I&&B+Jc9r z;V&QlU_O;-8Q`EScOyPzI%#jDMd~buPRFG7MOD zN{hCY?eStv`J3@~YH9BMi!R9)xfeC}$L?fD

rOl=_*^riy z1hZMr5gP&TPiB{oeH?FP7ZE|moJRgaPzI%#jPKhDL8b7;Jusl1Bl`Lsj})6mle>40 zcQEMe)RGpOWM23B=(i8(wb69)SAsGq#bo^cjuSO^8IWOsS(i|%TU6u%+l6MPmYdYu zPl+A2`a*42UokmfSat&7pk=t!+JVr^jOC)vKf}S zor~cUTYfd#vjLGa$!!E>P>RX;(UUiL?N%Ve0N$W1nSj{c_2p|0XDRKOf8o*)+~s#{ zZ|DAP&FI3=XXJK*GAPAld`@D0J3t%AFd&>&@&@}6JEbB+*QB)@?7YfuUHkDPuQA}P z^!9_un6t>H%=BiyEq@d$h99MhCUNi!lp zCwCH*K`AEVv!PA9>}Y`u1GH_zzf&34<-OP_eN)c(eP-mFh5j*#*lw*+G4!#pZ1OjP zGAPAl{L0ns4a)GLBp5LEZg_8YP;ht?-Bw!xDao~8ZDVe>_^oJ~Pbqdl#+*a$A}E7W zOvb-etr0Qso)!#Pox^y2&;97V?bg4{z;2Bnycf2$52 z$}s>k3>bfO_XX>Ox!T8_N#{q8yhuz8NZi6P!oyFWrLrB7Uy^$X%AgdJ@w4R3Z(1He zh5_0Bb&s664p=9aK4B@8{AC)ojA_(^YkSsc@e%abO&+DJJ7H10fD3_&5{{I5#KewDCyUvBP(mXCF7d zj$V_^KPa}UM~bD}_bMV6l7|V(pmfRSJl20M3CJ+OhxMypuW``UqNz_J>ofQ}*wii3 zrI|vHJ9LrIrwEG3BLrnoipls@+2$$IdLY994t=eU?i}kEGW1E|B^7&uOwLZ)z2giK zsBWA`uhtclM+wTHbjj8hJ!=JD#03MGepwvaE*DYa)Km5NP^BX))%i#-MU;xu;U-#Ye##c^l_aXO;(Fw1(=3qsR_8?tujxR*yH{=O| zGAPAle5OC-$pX*MFd+7%yy1)XwyVQgd5Nke&O4&rsQE<}`qqZ3==&h@Tk<498I)o& zzV??yQC0^s3^?mK!a)Bk(R8`)?Nr%QYlMBU!dd~5-;t*X%AgdJ@%c7a z)L|wd!vGVep?4b|x@gBXUV0^d;4JIgO(aFGvCUfwKatRx!F%#FK^c@{GCniNH@k8M z$S|O)@!obxCcBG1zHS+IirpWYOME@zvjk;Oiplu-`LRfu1dw4s;o-_< zWbKyspIB)7s-^3a=ezro$7uyO_4)1LLgaGtcY-n~#bo^aOvzrA4`eK0liTb5{L<%q z$7FV$kk%dz%vrf&ui|=f-@ATFh+IMbK~M&zn2gUo6+hT=0~rQzscW|h-< zwBz)tpP#vx1+9J(5(C=LpK66qE6Dl^8<~yvhs%_7)30t-I^KZ^l4kZKn1Z z(@f>5u1y;|8xB@SjUi*MBF__)K`AEVdsw6|wgMLhSO;HrQraC`*Lis&zWdqvU~9do z*6bY0L%s?c^x9B0d4Zq|N--IKa-wy@>^m4U3}AY^&7DqE?#;6`K{RJK)duS)w#ylc zDqiVr{w|1&`6Kx!K^c@{GJf3!V&*z>77fnXo_0g%o#HyO zJukt>Sbxj-ar?ER9(-{R49Ka@ICgtiGnG?H;h5wXY9&p>iRvpo+qb@7*Si)Ob3NIC zpbScvZ0*e#eTRVz16J(`a4fVDHQnQzw0oOguHs1ao^Opb14$dMEf+@QPh>}eGALcL zwaf21!q=X`fGaa)i$CuOG4T}WNhmnb+$u7z+vD)WtH){iSM+(L2C@@D8I)o&{(bge zT?c2qFyO9Hn1hbK+QXVVKRm}JYlqvP_AAXNTXtA6EJPw>{!DfzD1%Z=#;+MX>7$|s zG7Mn5I^$H?o3p5>eC_*u@RmHQa~1X1=W65BlAF+bg^lEU1Z7Z)$@p=bCSHdEWEkLk zI)-|Fi}ULHwU0VRXeUaHmK_uMHF#T&a(Fv>*Qkl?LQn>!n2aB{t@nIf17sL*DUR#q zx&6ClFE&Q-t-5ePThwe@<7Sm(epjA9C_uJ$GxowtctzeE__>dIOu5Un^9l5L)*!~ zqdMoaM?>Q#bW78WA5 zZGCst8C{v#PWB)ugHlY!_pleH_If~u0qk|bCzIC+wVxB|YDg2Mu(VJ+v#K-*4vIhX zL9cIjkRK70LFtmMy)CU=6UZd5y&v0)S+dVbMmT3KxSg5ZMp2rxOJ+js%L(jpuYn8$V)aijmblDE#5~a!?%H6^5U1l2uSV;`Po5GPM8@1j_9Q5SQcT8= zo_kkdFHnR5L5*ffyKBGf8yy=Iq1u!#x=!@?9yV_Cca1@%GKkzw_97^QQcTA8uytu0 z;Zs2{z}I;#)r}TClG{G3I-c9|eL;TRo6aU4Nno{STZhO!WN(5pD8*#_4ob3Za1)SW zKz4Q9pbJ?bwnggxS>6-2E<&pnf@y0cXS_M>wGg?N>_bonrI?J*dUr$}fY$(F!0|JF zE69RD_Gz^u>yIpF?dLgdy8h9senmB<`qzluNA@KsgHlY!XZq32X>&k^0UldqLUirB zYbheL8)J@4lm4WnN$z`5E3(M z6G){ys6W=U{JoAc`bwlhasWXYlwvYI-#&S$6wZcVfZ}Y@o*T^P9vz&i?YJN+Ptm9Q z!X(2oR{7Ly5}j`kkpl_JpcIqw8DN4uH5te-z{_4-B)W5W6Gw2F@%?Per(w%?t(Yir z6?wD#@D^li50irk%Aj<~m~TBv=>{?kIH>Q>jEAdHVSs0BfB&}4Te@y1Ns2tyXJX#2((h3eM#^c3`hd7&JE)39RxxhZF)t#Mxeoxgh`TC0O^7=j6)H%i%VXD{ z3a)pC0m2KaHli1|c#s9sU#vcz+wA{im#wB%<@qLgBMKr z7+`f^?se<>eKNX~tIQ(aXR{c^-nD(V8oArQb~863Pm`kv%AgdJ@$>US(2YmYodk zbtZePdwfQu`d~=4Mc%sutJVcqY~U3@U#>Pwjv*+6QcTA04jwep>jN?jFxX*odyV?j zwPAV+ch%QNT990u#aN{lY#eFT&?jWRlVb_WpcIqwKV>3OYXR>X!2p@}($9`CEPP|z zE2N)n!0)um+tuc3|H-5l*(nC(bN)e&BPfGXOvcYuZ}jH202u~Uh2DC8M2=lbSUtPs zn;xU7U#gfx-pkN}md1P0h&)G*Cn$r`C0l#-`*XQKh5;KVw973GUfG?uixBgXbN1|h zQ`pJ&!9PVqyK)GT=gA2KWl)OA_;I`Vh?F;wVSx33T5mCHQj{svXZpR0>0GkvqH_?dcQBv@yf1+ zck#P6e?%7oE|Qb}O5=<9|F)R^DeoVP>A^q#S<1fmQYQyk$__)cXf!@r%UGRFiyVG1 zCs$B#gnfTqd8j$1YJA)a8T>DD%D)yYS;|gN4gRCL8_i2cO;1llPxYUFVefdG5??h6 zwm&QxaEUVM{;D=kyK-80o&9m6V)LwFfBIC%bjpq|NP<1(*_gZrNsHX7ylRq%P zc>~BWz)nzULyG&3-5)-e2|c1V@u@lSv-E^vl0vt)>|sQ9pkxu0LFtn3S)|FY4j{t- z(ZrgQx(PAst}4*=OT7(ID4W=|Nl@p-%_o#S>k-+J@|>UyN--Hfsx5L=d;u~Hkh~D} ztH+Qk`Qd#>F23j=^kVxg1Fikn_vSN4=peEaC7YlON--Hfs(sKM3j{I@NGtaX+!TJv z;E2o%jx#^Wo5gjnF8g_tM|RHvx-LX^rsNQmLFtmMtrzJDw>AvemQ_=^N!+ewIlcYC zyiA=-w8vY7PIDhR(tVE=J(Ay}-6&x8EMfFhHWyDB=8bx#O~|bAq$8_lkM1U-L|#=-kjU z>2L*+T`BnlWl)OA_*I1d)|6Qw!+=Z;lc;BR%*}Rv_u~?#eo7;hcyD-8=DwevxxEJ> zyHN@V%AgdJ@o&|MOC>!(h5>ITpLOytnAYY06lLD%zRfPm`BQ`G`16^nC#mR@IPR2J z1Z7Z)$@p*i{58{FK!yP>vMHMNi$`YK86T{TIm@zr%76O<-3ZI6?%1Q~RfLC>*92uy ziplu>!cr0YSRliI8kdwKJThSkTQn=UGw+P^zmQEGn7b6H`~9)FG_tilD1`)NP>RX? zzt0#FPREUb3If3M(jG^f9HFpbSbe8UH>TcpaVx zG7Q*M9&Vr9agL91zb1X+Rqs8HCerLfPMo%bqkZTfba_H4At-}VOvbOZE2LM#`~NV& z`*LBo$<+;ut{;zGlnuU`xAP!%v2ORYu7{b%6tcBFDWwEuP>RX;uZ}M61rZ>_fZ#W6 z5{|-`FLYO2c%U)a`q~ZLnXTrv`EudiaYID*qP!s}gHn9V*n@om`*(jrh}U4mubz(MEq5y@TaVdHYuxpA!DZqYXT=GBi(=u&oH%6oz`D8)88e%j!*IRYQr zh5^3YWKUW4hn&$pym;*^$MYvb*FW#PVir@#5v}WpjMh=F<5YPRZFX3AtL)z$_UD!6qE7O#?43c@HQ3<$e0P} zwrpd{E89T3`smJ@h;`RXy0WMeXD{D*-iycqlyZVHD8*#_aH8Hm2_Hd%0nZ}e>y4XT zc%eHrsNBqRdbFs(x-0m3*_y<{N^}nkq*M@;K`AEVdsud^0DO543@Ck0_xa|wGkaT_ zQm&k^o8MVBHNcRhQy?51!@q!xIfzn8PzI$-wsu9ypazg(0Ou#>m$$e|gB3I>oVVUw z8D4Rcx=6B2SKaJ>LkJ=VQ>qBcpcIqwlXUFU2g`sA15Sr=&$cw(UPXQK7=7;SJ)I^- zh2<*u1T!TP);1w>2&I~!3`#K>KVj)_)&7$HS#>z* zJV)HbD;e}>btvT{K^c@T+1hiVR_#EB0SpFm#|-oY)U^Drav0mhG08=@GFxj>(P^LO zM^DmWlp2CED8*#_Na8B39{^++;J4eM(JhqOUQKbDa@|F0a`n!%$H~@`aioQ}yvWuL zr_>UZK`AEV-zqxs^*ezK1M)yjiql7r%vmOw#!`-Uv!_)$J-pce27Lio1f`Ck z3`#K>-@`J))Omo61sKoxuHT{-x{_^`S)uuQmHjEwf}z{hY}401ScZ%_l2T7l2Bnyc z?_s+XCCY&e0}L{l^_h%ihs9q8>sTuVXC0B`b9NYNNO_-k0)3TW6y*~^8I)o&K0Qbi zP`eIf7~pDY-lnU^+{aD*i;uc^z~5I#LMm$6$)4%mzUWWcXi5V?8I&#=^Q-A@_^c$JLQ&lyj4q*yr8E+hK`AEV`}Xbi9Ps=M15P%NP2Rq> zgHzRUWl-Lw$Ge~Ms7_~T7JWBkz3>ega~!3KpbSbe8D9jjqPY@YaEAeb8%_o(rb_L| zeRf5_i}~e8lk&Bx&yuuf%~H3GAaXpVnV<|xF&RHsv2w}62M1sPSMWN1MH4}$=gMwp z-uX{2pAM9CIiR+?;nDH^I}tg7(n3%MrAxNB!+n2f)~ka9_5Gmv4xf~LSZ9m^YH5sB9&D)-RuO6t?JQore% zaarSBGa@HZS_#Ua6qAjA|Db&BmP-OM40xuLTEF!Rf5wcRwUCAAiv2e9G^1f{3C6Ei z#-le@k|}KjWl)OA_-(9V+mhW3Dp+2K_|J#eV>4&!5E@g4C-{e&`k$K@Q zGUgOYJ3$$gVlsY~)Jxj}m+!-X!sv>l_XWkK1nh_#1=z;JNvcfrA$o)H{{;A7B+TMdS=h7eN`6Vlw`M zg=3}%vVaT&^u9W9P=&sDKf3bLNji?MN1tt0&2XGrlaX`(_*O*Dq;wOMK`AEV=PEVN zqeei60oB15-@R2}pL^nU<9y>?VVkU=OJ*-`78>Ro4YnilGfEFZ8I)o&e%$t=6^1v& zVZh5%Oae}?NZW69M=9_5^m*>r`46Sao||I4hUWVbIg8RuPzI%#jL(KnnT^9skT78S zi~q+cr}2pO+po!4TG2jYD={1h%eUhYsIo*KLVr%_BPfGXOvaDfq*v?U@<15SuO3pg z?81Ys*QG;37MnDg-lsb^CGgCT8@_MWL&ls<=_e?IQcT8Y24;`9#{n4z)SI%~96q<+ z)LZ@R>e)?fX1TYznmc|xka1dQMi(aLPzDIfpcIqwM+yz-`QdUR7%;2ql-hHUwWU7k z+P#EN)323xw4Hh_SmNEp{|x;_mrEHWD1%Z=#`o<$-pI8;h5^k2wC4nS>}2FSg_haM zEO6bMdC};pbSbe8SG*I+$>DBJ+m9!EQAGe@AjEKF*4C#6Oy*D z|D}k>CQkmu&pqpeKTF?!hkVd4DZ_uI@oQ5{ZWhu|ga4@R4)89crUCz>`p>_x%VS~k z49CIcF<8PZFS_@ezRmKRD?G-1-nC4T)|H}XG%VYSi z4%WM11Mj83u^9{;1`Ktl7Oz-N(=Eloa?2uF^wSmku#wvJb&4$(o#+Ld0?Nc+WqjNJ zZQ%8k|6UiYr(o9w<)WHcz`$We!STe|yR7B4txfqoejh(7NVRlDXY9P)Wd1As1G3#; zQ6~S&|Gx(={CkGPvi<3PAj5!mxomCi(=tBqzUl=$%G@SrTI-^$VO0`fQE7lauJW2P zMNkH%OO7t(5k0qn3$EswvBIg+x2k5A<72>|+@xTLg7dKSbnW$}B+{l;TGhY!6f0 z(~}Bh7;r*%;+g(0I`xSa%w5|^t0%ktUOx*?*tp#`{R;Y+VF~3sK^c@T`JCfMmlpsT z2Bi0#&^m4$o-MikhR1!T8y`ke4*$AOl{0-)kHH-ob1CHqK^c@T8FMJtqvJq^0nX9} z6Q@oX1+*-lYHw$st(jf5Meww(<3)e(pR9=dhB8M`2Bnyc-&&DfB|8md7$DQsZr!>< z&uD*G{=!1Q)smT+o=Vdbijn4ywCDpAZz=NxWl)OA__Y&vj@Aeu!vF<42X7HO<(ZUY zS#C}&T$~>!RXegq*^1KN?8!mK{Eo6fPzI$-wl;aX7|y|Az|0`893|bJM6b!W*g*G? zms_^gCWjPscg;xR21I^O`AJX)rI?K0HjJ*FKMiCUpdVxUc^_M*iHT>C(0u5N%#+XV z*w8=O^L(F_Fdrg+pez!UK`AEVb2Fm?b{inWfNf{|4hy?)O*WnIy>sOGHO^=5f(keL zELI(QWPz?QDx>@&D1%Z=26MB2W(zqw+h)OR0T$?2hDgzebrB1b}zL6=kP`Tq87C4R#dwGi7HrMns0LxCiWuEjZ);gnsCbb+Ak^l2Adp|E~#uJ82Ga{r99f#D(!`bZ-;@ z4=YaGed+0cI81?Qgi0e;HBHOHtMGX2u+qVKbG{0MucSO6%n$y-4{~8gRGG~1yWq2j z6(3XfoDWFcZ?B#-$6Z*~`MM^*$E=XqNz&r&TN;F~qPYH*|KI2A0j_^Od;B|o-Kk*^ z$S`2*C2l9LB3k#Tq%z9aG56NGb#Cmh?oGuCKh;DhE7cS?g7WZh8UIC4JpKXxB7gyw z#~il27WvSoCg~eJnya}s`kn!8#fVjz+Ou^^$e2G;-2aa<{tAe1g~#}T3&f0f+ z;!C}`-4nAsNipuaL9wKpeQyTNjom<(L)3sr$^W*y|L?60O0jPmzO|PHzIy{?7|^^c z`2F#rAHA-OCpX!zt#gZ8p-^%oy{k+_a^*{8%(WB`f-)$@Wc<%jFj2*y05S{^R1gnu zyjs_Cejs2A_Y293O~!jz(~79fK5ROLzQ(4G@`#`eN-_CAzeS!F&=i293oIy!Cs`O? z9(r22yomG5S+!H`t1pQkEmD8-)}q`38FW46@n7k`9$jdu!GBbD-TD4}2QU2BKSww+ zZ>lnAfGJMf4og}j>E)Gr5mKX-(f{NFaEu;_Ph+z@QLF6|G(k)yO3S^x@`aZ zGGzEK>=4s;!HoqRVql58(DsMMS<5c2wo5!he0$pG9V+pAplr$=*ES4^)04`?hjU({*31=y45uDp6-gRXz``*=pCR&ivM5v z|NWi&@cTHU{QFl)$m5$eK!yR2_EL*BSZf!~4$4ZdpUa%e?+Cue&DZ((L7tKyGVmr! z06`g)Vtc;XZ}}7xXCshdz^>u^3u&c1YkqoM(Yw_8ZI9{r7Ad$cOQ_x4(A)Jgn$*jr-XuWg(QmeY?W(y#}=#fBx8ba)B>jm~9e$ zp8X3Ym@psk2OsbcKdkUZ7x1uR;%VG3hL6>=j6&n}Hz~(H<#zM&wpeV*ms!bEk8Jm^ zln}za{~x?R-#-T%i_hF2!NCS*OiSvyoeI;FHwcYSwCU*@ir%`|S}jet*L&IvU6RyF z3H?`p$#m-X0k31-p9ehnFKj~b?o?+K*!5tE&BObfgJSP>#Hc#aY1~$}*mV9d_W`Ai zY3dZ|Ze$a*Q6m4E@V~!q+3Wt<_3+E8@fx>GfeZs;QaLYo$mXX~**bsE-OBNVpJL~g zxy{$b!$}i;Q(HSFilE&6TgG?#^(RhOfsY*qL_`;e8+<=b*DI@LE_YUWc2y%SzuK>3 zV<$L-qmeOpP@?}T|JzO<^~c8^_0L|%Dx8oAcvvAA!F}s|`Mb7(pMEx;7yUHXzU$iL zZe}D}m-xvZ;X5fYg!#xn_((qN6E{BS25%m~inHni>(jp1giUO|knvPq->dHUE%Dv* zBeYfAS1uv^H%jbZ`TzGPjvrvJ83!){83r(VMQ-+B`BZ7Dtzf-#Wo{#brR%{q7c0L( zvE%4ZrY=exK^c@{hdF#-+pSgJ24omuXyV!|s6eL^AEmCG)WoHKpiiFNM@>}yC2LIr zGUjedJVE)7eGS{%5-)kJfD8lnK3pAMKQ1=)dUpNdsynfL=I;a^o>1L)@Il9A^apAW zCE@=l<5S4_<4$nRHVi2ANnSU(Je2Xxsaa*Ku-R1Q4U{tswLjUrF2(FZ#@tJJN>B#d z2tTP{V-6C@hBvxlz}}ihv+c@`KATsjvdz5-RhhhXnND8OJ|>6eQ#m5{Q4;@;GJYGf z&q~o6$S`2F{F{rK_XRE#)n?tb5L-K$v&%iw{-4R`M(=jWX7XMg@e9?y79x{OYyG+svsA=*_?ZN-{wilwvY|8?w9fG<;GN z2E+&KqugMdHt{*s)XB>yX_$9{f4}D2n{<2Jbn-Jes8PAzyfaXJc!O5nH?8|CC34Z{?1w0+1ZA*CF4@EE-IO|k3Y}M{N zT$QQwkA1w_=7iV9Q|CNB9RBi2@R0|)Hh-9s_J5S|Z`F$-krE)o0M?V28&(cn4eMO4r_6~Q6B<=f zyRI8RZ}Id($;;2^<-IXVCP5jLVlsZ*ZXR9x7058ac=42@p{C2*PFI2CV{4kPe^XD} zx56wzFYwd~^qKQ<$}@s8D8=OSzo+D!ba^L%3}{V87wY(}ana$h_d+O|O`?S(q*(CPjvz;ft?ceay zh^`J@GBZhePEZD=n2axtdObrH1~Lo~GA7fd&$Uly?l<|k`2TTs*I`v`kK+F+6~shQ z*n1<0gs6mqgcu-#gdhqC1|lVhg>*M?2K^c@{GJf37 z_mYQC1;7B!r>>`He#m>gHsh>wx2L&h?C@uZ8CD8*#_)@z=f ztTT{dK-yJhFg-b&T4Z|g#=cC?sUJE={h7wSzIm-U{sNJIvc4fGgJT{hD~9F9_AtP3~LTS8I)pU#`o=th*UWDgaMpOE$`=p zkEyi|Ga1gbywdHhQ+obR!A<<&w_5aRu36Sxf-)#w_BmJZtv&^07*M=eYqzKUOGZk} z0VXxYR~LaqQJ&$H$el))akY!6GN$zL>ZQ4d z+Zrk!TnUTIj6jzaEU>=)OBsJ6RqI<<(QYAqdlwvY|UKLPZI0a-FAY(p$J=0S1qr(Q{ z>KoqWrY4g`dHS>$_et<3Ga#}%sfwTszWsr zqf=qN_{pqHa9V(ry%L=nJRrUQOBp|gCF$)O12PP#kn0g;YCcKk;1YeiILIu1A~!3M zDz14)ti}fPwvz{`nxG6ym+fIHF-4O=h5@g4`<@$aXZ98dl-zV{!|%@`&xeIxeN$sk z43a>f=YL45At-}SXW7=~wAH;0WEh~rZU1_uCUuZryMK6`2F=av%NqPm>8$M~ZtHF% zpR*^a_Ah1pNWx@ZyB^3eK=yF9jIPnDFdm&|d99z?UKeZw)h^%O^h5dcZ*V z2Bp}T@v~T^%VCv3h5>xHil$q3zYzVRx<6+V-T10Kaf9Plish=tPbR{VF?*5f3Cf@p zlkxAfI8O__feizWKeW)N5n9vJaFFY~eJMGrQmj%x`dp|TIDk)RCr$Yp!jQJQXe zodgCn)R1MyIu3d>?`4%=A2N`)v4@5xaCmeoSg6D1%aL%=o@7F}`gA$S`2=kmoRC zkd8`}cH)S*@9Wm-m=BjW^Gd20-S66o$bO_2f-)$@Wc;riJ)SkB02v0XiXpA!&!3pr zZ*=^e9FrXNCd)Y8)pxjP%vUf5k^M;@2+E)olks24NcpS3feZt5<}6=KD~Bk!u(Xla z9(h=5?ltL8I?!Qzw%L0IkpoB{3Cj3QQ~VcWSDzhx0}Kp^nkW)im-jSKZ&YP-SR2+y zcf(Bm^x~x?naJDdil9JJ>tD*!_+q4ucg@ql-7{Elt!IlQ+m=_x3Cyvb67MQnA1H-& z6i3lJJ!!qBi3~c3)b@`wej8`m-7^Mi@DG*KG=-j;fq|BR>c9Wyj{WK6yZ0LobbZ*F{h+Fk}vsf=OTgpAGc?^EZ3x|J=bFqR>z~ zJy}UdM@vVAtzr1{AFylgq9?b*+vc!`)|FACOeOE;0nUb3i^040Cbfx2<#&&`_BzBY zAR8luH1Llh|9)3z@Xu{?{QS02(iJXvgaK981#1gW3uvYY=NVD@m9o4ZZm2XV>GjRG zsrNwSP|_el8I&&D>t%zP9|0K#bRRVl>HoBU_U^^kQV%b`UtF9{EG3bfMUuh}@gZ^; zX^5Zf|n4E8%?d-DeytpK+z#ek&q}lZFY( zpcIqw-;!pFp(-H50B^eR@l&sv$9+ae?+Ug(=iu(8kBJ?-IFXVeses54q!EHLD8*#_ zb3UtgxN-@nuo?Ee`!{wmHh`rJQ&3D&{~S!+^Ed zIuaYQWET$8UK!b2kzA0$$P;wLD8r`J;utF;$C0K9%AgdJ@%5zJ?s2UEG7O-4rE7ht ztov7Kc9>>sZrcUNS3`-GqrHdL9FlZJ1l5%eroN9RU*Pl zaYlz5)|&8D&YyEfFDWFEW(mrm6qE5|7?bQeIA$2|vmh;$OQo7)fn&Gum7qB4hQ*o9 z3+{?fpN^f{hm1LyG)GVdrI@Vw=llHIXR#c}FkrE`RAZFl>U2OtQ+vu!$mzO6#dl5G zy!>0Y{Lw$im_nK-D1*{vpK}OPH=G8-09)4V`e3p9$}5_}HytT%xVqhi?@N$db_T73 z9eR5@m9#)m2BnycpMnU!vv&Y849KS~nWB6zkg`}aaaTs|_|adMF`Iu}WBvG{cpX2o zwO^7J3Cf@plkq2REX|$EfD8k4uCv5TPV^`|Dc<5dHMmb>9cAUFV$!t!k?LIZwq+V= ziJ%NhF&RI4I^?U^02u~svf6F5fi(7*Xa1d~${~l2mDah@?B=VQ-27JrBV$e{{U#`b zQcT8g?f8GDhA*Ln0TWtXMklPIuMdk)xy)FyG}9=xRu9A+wn?uULYM7jkes=Qt%(Pv z%VblHS$ZJDfSq$)%HNO06or+=nCNv+{El>9scx=n`6+Q|iv}|0Op*&h8I)o&IEG=< zz+0zvs=+i62JAmBxKA;et$J=}{r3_^>Hfz$*X7s7@C_SP2V6qrEYh8Sl>cATKo_q6 zn+CdYVf;ew?*hQXiY-IuE`F)YGu|;?#&e=PEbqlhkx#n|oV_?&1DX*273nTv-uZuc zXD)0OD)fBkTd>_>g(=g`(ACk|=U@Mv_l!H9vBy+TamqVr_!XB`+Es*qO>+H5{@-Vz z_$QA4cD!#NI00lBVC=Fwx>dDsx9ryEk2mVh-8mb-q1O5u*WnZyGjx6K8`3?3GAPA9 zar~H4b!*E`Aj1HUzQx-e`wK7Gk*G|T9>%;m>lt{QYxLU!X=}qSWX#zlH-a)K#bo@O zE#mJCf2m=BjX~D4JM5*TuU>nUt~c>YxXiczoLK#Aeb@24Hbl-L-6tr6(q&^d+iVJN zc)$QDmDs??^)2HawrlrqSeI?k`C{wq0AayLyIWPxB62RtouCX#F&RIdUbjWdi z2bIhd?l0N3Q*{@@ns3PWx++Du+=(2FmdI>F-@TMadO%PHrI?JLPTMJdTLoko(CvJ@ z-_OG1j^EOSI+5zRqMNIXrByCCu*N-c3`548Px2rrgHlY!mo_Bp{J zw1YuZm*%rsix6%?W1JiPYQ! z3irJ%jm~cN7TUQYCmy{(T1a|CPzI&Tw)XqUV0j?J0Ld@6CRtj_^;%^bwgzVkhaDcL zV&AcK^g~HIl6B9a$D8I&&D+R|BNa9#uh&SZ8it-q97!|KKCYEx>Z*u;3u;&s;FM?=5`39|pW1=zo;fQd#TfbDw2LEB&+Tb-7gZwT#ngq#D>oO`3CVU#C8QO=kT4%AA}W z^atx5$(NuEN--I~$@6|j3O*YP1I(@{oQS^I;S#d)wcY78L)B;3WpxKyK2xT! zGAPAl{5D)<%6Gnu3fiz_@>zOTnzeTTn#DoFJ=5!vQ>&6-l&EFd`#!d(`FV{$vj`bvTqZ~ z+L}N6gO%kAe!*i`(RZoUlEMhepcET3e)Oal<$=dA7!bVaiowUtBE9EsMKt{EykvVW zR8Q27RZJ*sYnvjnZ`YB+3Cf@plkqW!arD9q?J%H#C$w=$B*Vpe|Ju#z!%iLJruR=t zF`PBIG7zu|k?Tnj1Z7Z)$@ulDBc2CSfeZuW!~(*4$gVoyjZfT3J^JuFw{rp`#2dusLQ z6CyW~q6o^M6qE7e_PXOAJb(-XI7EYIZd}{G$?iD&AV+bU?c8Pyvemk~r97O!>k+w$ z^n{=czJ-{Kf2-aVm}UbR2HZH^9W?%7pO)&&7k58hu9>5$uuyERG@+Yh!*J@yZb9?y>ZYvWbLuKhb=iP>40zYJ#tjOD894V&Hi^wgcX9Q(Xij5h+ z>Kpj@C|s%t1K5i>6h0Rm$c^-s6TBT>D%kzEYb>#W3!VJ>Vcv6oy`7ZzaY4rHtRQ+pl}GAILC3`TZuT zEPt8Dzk;g7f=?T4Dd;2-#qx&YIZ%Sw~R}|JEWBx>nCn$qbOvb;@WK{+D$9!QxOtVl7K?A_`AG?lsdxAH@1hN+2kMQcT|SXEr2q_8Je6VStJ6xAe)b^)mEE-BPOrcf@CA z7a2H(3l;C{d0~W%xs#MgPzK*ZOvaC%hpB$UGj|wZrp>&#>V};J?OMkmk=)CnIg#?e zR8khI#BF|kN8~P2(qGE>9_DAYWgC!TK%kFYNMFRsW|)d~jaX8&Z4W>3$u4Q1o>+o3^!z2wkGiP%pcO9rLTtG-`I zsRU(Eiplt#gztzGTv7uAF6-ZVe7SD%(f(MLin0P3zXvz;)qa%T^~n2emWO=Ky`+}} zWl)OA_|fz3ef0}Kh5`4aP0#1snIucNGv%vpv8^~{a>YAj=gOTk!qw=}vyYTUPzI%# zj4$$-R}9SqG7PBlX*V4*;C?`FmMU23n3QN>wfF!n$Sgn7sbQ4-`DN;D+HraE27{$VVfAy-h&)KjBq)PYOvYz`3R{lZ0vQIp zy4Jka&#X#^P3X$%!NBwW8^Y`rlgb9vzrMNf1CfVFSp;Qp%)?~-7^ds24<99f0f(=e z8z*!HOK{hu%6>_{cuMxLCg-d|{?bmaAoRFBOnUW~^7%iV<5wT4|h%q)>-$^ zSU8n-(T~jU*ig;z9atF0q2Jv3aCkbu;3LayzoLmv4g|t zs{O%RF439+DKf~I$4NPVDdWem{@oizfeZtbJ2;HLZ+-ad>|h}Cis4T)lZ#I`dkpQ0 zGEdur&U(L+atX?y6dN->1N3*}^#U>s7^ye(DA#M|&Za$c;bHf&qL{~r1d_~Rq_h(k@5)2pcIqA@AH3N=u)b3Fbgb6g#|1l%tAG-jaPo%H%hC|HV;0w`j%$JW!~oM ztEuQN_MMdfk2HRxaM_a7KX22`;ridV>B4`*?$)uJbi?&iu;h6y8?PZxpsXS7cwT@U zCtIkpk%oa*`VCRn+yvwkpCA?fW5U1Rt;6><{1X?;iOK~s3@}?KRLguuTXg-j-6)Hw z{|$N1HP64?tT&dNF5ivFKS)IcWl*~86ZiT2`4^C3fX|a7-f72+#wX6uH~YxOjHYij zUbpU^=~dO~SrQ^ol8On+pcMbav8~;C&mI1i0T^)Px*N+>mS?=oEn1%!O8!Uc%(`-; zrGun&2cK~w@)W6rpbSbe8DE-si6#GYQ@G?mH-#l_&$fes!wTc(1c}$-VV}nMG#f6&h4+?R z^1Z7_<11 z8_Vog`^`I`tL$e;RAkJvqzZyE_`$(s{4}di@({e00|Of02+H0+SL^TVxZU^$-}tY*q~ujX=c&g} z-#kBx$aADhf-)$@WPFNZx|;^dFrcC?WMAUxiwqKHlOy_kD^}haNIzq^({ZJxdX2Yqc?np7zT9YZXYqK_|ow{#!Yj8zonI~N3nBgQ|-A#<-6L5yg+(S zPzI%#j2}Xo4_m-zh+#m({HUGv!_5tj%ON*ZY*}blHJ3WJLZ=swF6cQcT9bRqg{R za1j*@cyqHT>N(2*xmYA1TW)2r?d-$!vemkxI(s7Rw;{4KxsIR=N-7~ZCP)6l><%=uXchfz}f2Mai@SE}(14O<{ZX_s!lOs&Vk0frRl~O>40j$Qs-yC1j zFY#U1ywsLgyV5;3wQP-H$EAZ# zg516415J{RGmViKnYJ*np{H5*$jt<0P>PKif5!()%;6j$!+_^cG=?WcpKZLb%|!TP z(YE^$4{|)Ry}GWy%JY{ z^y0fQZ{`JAvs2Pu$kx73{yGt~tEb zqR&7*B)1ckK`AEVOXMDXjfS4D2sx7?15 z*^}HsPzI&T#_T1g4wuxxfTMC_b-JYUie-+6U%#W9IiV5!?rGX-2f^r|_2~KCBl0JL zGAPAleBY+A^@S($Fo0_`UG4G8SZd4lS5_T3bNY71QTLLQyYCKalSnI&F?*3e6O=(I zCgb1d`fJLaK!yRwbS?W<_%DWJ zV={c676#l&S}0}eJVn+q4Pw~3{ZLor_Oz!hT(SpBMJvXUG5eCc|5C=6m+&49mjp5l zsJ^{N$|x+Rg)eJ+ROE<kIN|3E1N5gF zM2r&>$Bstw1f?C+oga&&jHl`(3`D1*{vpYw0;yI+9}12RXpSlrt*7SXk@-+X*3 zq`{5dm5=S^)0w!n9_afb1IT@UDgXBzqfCnx$S|OOTcFb5&H#mop!_3i7KFzySh25@ z^UaDVD;z^-fPv(Gf-)$@#*E(xUwA|x1>|NBz*?hrRp73~e(6&Y7T47ptdEmKx}|S~ zrA7{Tqe~%!$O8mrP>RX;jd0GqazP-&0G5?>f;&gd*JKU0_L!)+Sz8BeitT6n%;p}Z zh%VX+CJz#nK`AC{{P~siD|{mYWEc?YH1|YnqqN5LoC3F?h*53N-ux?k@%J~;`{<#! zuR_Q}1Z7Z)$@tOJ{z)Z#2nhzPUGzA$_D6HBNG$!Z*qoWu$-I%-qDa5mmk)M{Ap3SG zd6=LKN--J##rW#j#|~r|!1iGAa>pKxfXjTW`X=&M$J;rxY+rZlg+6(A7X7UXBaaZ2 zK`AEVvmp(mB3dBBfXiV%g4;hTKH}v`5$j~u<#S%OZPOjkeJ473CecM6;p9<*GAPAl z{9CnZHlz^9Fo5ayE9ZwfVmk+WGCCemjN=+TT~}Du3f{}OG=$CoBgkU}Wl)OA_;1w? zWA$A?h5In2j^VQYMA;;ZSyQt~Xw=Nw5MCn$qs9wy^QPXX6c zT0n*Y%#pc4E7K2Ml}dQICTumMa9;Hs(`KG;PBRMTl878d{`!|P{#$i@?&c*R!+?X| zZ}#p8>yF>bS8yOBSJFB=y8L;{mHUOsOUlZK{Dl0CpbSc}G2_2g$J>h*fD8i^Ri4~D z=J`=*eO)i(mC?v^c6L8?_SEHU3!Hg?eyg66zyGC-Z*3cuO>nsw3sIR?lufMtHYm`0lMX2ExTM{Y(< zp7dc=o}XGTqBH&J=uKp6N0Wd2rHmiL0@Y;TV>mFt@9L|Utnn{5#Fyms*qXg|w{qW< z%6MqI2*tGJ2qMRjCke`+6dN;s47*hR@g9(2fP;{>X}!#ThO2ryPsi7f%&T0q9SPnZ zIq9^)QW}wC$x{SnP>RX;&42rAG0H%O0i2}|IU`oSJ$|4>xF{^FplYuz@9$mAc3N3y zmC>cm&&ksSWl*|o%nstu;1W3)pj1B_7W?h{o@-~Xz4t%rVVr+LJucy$a;G#kIRhE< z3-V8bGAPAl{7T&6p2;vE!+_gk4~PAN>pY%3qxL!yw~+&6Ieb~|8*QW~aw_cic(Tq3N-7RqN?#4m%4rMBNiJ%Nh@!br& zLpiic8XiJn0Gq@n9(QKvH_tB7UQrd!-XPx6{Ww^~Wingn#Y<#sza;-AD1%Z=R`|2E zS>2xd02u}_9iX{=-ecCMshC=8L)$u0zCP>PKip9b!2;JpZB z7yzC~p44RR9tf3}=~Dd4{o_qOOVXuUw+@HwXg5U8Am1S(C|5C;$-9EBwU4aY(KKMD_&YgCW z5oMD8rEbNemK;RoDrdWOx-;Cy4;gb7*_EISO0hBHa^9jK{8qt$!*!)x-)6r&9j@Br z6`=IZVY}L`W7JQg?wokKu^EwHk?;MbjPGG0SMJsT83rtp#Ls7+8%S$E#8Rw3D?($X zP|=_JGibqpePj@kUz6Pk%Agb*Gkzply^sahy~2P2MIXnCGqy##@rkaZ4rOa}g=x92 zi+#5BHH=<^$ZyE^3Cf@plkw?+{<%$X--ZD)yOc|t^n3+!wYKTmEtaeHNbgBr)v{yt zgh1CSM9wC=6O=(ICJX;LlK2<#2Ll-fSc;f_tq}NbbStj6{F{JdQBHn2DaGi6XOyVu zBqHaK9}tv5DJJ7L>SQd>^8pzKT+1j{H#KP4v@u*aV0uP~MbKSlEnC6ap{ynsbggeL z*@K`AN--I~QI|3L7_I?@0f$va4_ECWDR1S^HuQ{5Rx=(rSgK_6HIMwwW-l`4Jn}<= zGJX<`AGc+w*j4};26#;K8>*xY9bu8MqMh5lG1kle%lDlXOtV$xh3Jage6lA&8I)pU z#=lircJgo?I}Au+`q5?n>GLP83t`x8JyTjF)*a~RX*oEtAju)IS~7Crj+a|y1tJ%beF(~+6qE7Ox|`lM@JU7(AQAs^ zCDUGAU*Y3+T0ghT*1D`~)QLZ;W%8kq`xqh@lYI%wpcIqwUrFDCgO7j=16-S0F6;Uk zdTmS*G~ATcpXcyhW3F)KT_oT1@FqkqAwMQ4gHlY!|Dec~C)YWE33 zyY=CDEo!F~lbjR3EHAvgC>U#w$nVI01Z7aVY-?|uZsG?r3^?MttJBEavHkn17GbS| zW0}?#HhXqCG_P@RZ7xCNQnEim8I)o&zOq`!W94HY!+=6x!JA*VdnDc2ka}CRR%&Fw zQ*ox@v%t?Inm{3z51)yK0X-tEgTKC*h^QP_C~I=QxTm&kMF{1h z;o6RO31f&{P7Wj}gHlY!FMgVB&4#lf81PMQy=&cD`Z@8OY;G}$=fQF(D@54*ZnB?Cab;qt-SoSkOdohE?Gqm zAt-~=WuNn#K>cPQ!+>e})^&yt=2jMlnT`hMv683w=e+ls+ju!=a-xfe-jhQK%AgdJ z!I9)YYkldYBCNn#Us#a+t;1pebbCa$+zz!e$AkJ+l?##*9Y2TNnkhBN=B_4({UeQ^ zzb{+syM`M4L*?Yn%}Bjw4ec7L|Na|xZg)<1j0KDymLvyrt|m{l~I<3NT1#!**a&D~}jy*x#`!#$;MTXnm?`7M(Z$?tgF z(956=yomwAL*TUOKtsaXP6|HoGhpp^$Zzv zBRPqn3`#K>Kc;vYM8KIh3^?`l&c4#>fQ{<~6&K$H=CXM1nB9^fPA4bAHm8lqP2^;P zGAPAle2(zr6Jr~YVZe>w=MO(&vtRnuZ@EZ5-f-x?cc#hk^Ffb~(sta4+)Pd(D1*{v zW2Ra`I|O7HaNER{Blf|~D^|&s?`mnh2Anh8sQsM2dll`!iOx=1$f*QnP>RX;S!LAP z4_ZKm0TM^^{Z7^ztmn3V>#g1IzL>yp<%p!n1D3`7bGFEsKagJ%ltJmTt(}x{y93BD zpjo_Q$Eim9r#se_2yaWeFdJZSOmK}7we$Il+2}iNKa$f3%AgdJ@!J}t+5Xi)h5>U^ zls8p0$38??Y>N)L$m{u`Mn|ptutIN2(E1z5m|Mx|1Z7Z)$^V`I8X27dG7R`YXC1{m zuE>3Q{<;WDRaAlb35z1u6>V&-?EBv#avM2=pbSbe8UH?C=stWD$TvX%RZmFiHj5C^ z8#_*QM%MXE+wVw`=GYRWw=!QmACcS1nFM7}iplu7ogG6`Es$YAzq;s=ydT3!tJpiv zs47T!?!O#y+xGoaeIIgw93pp+vk1zd6qE7qGqZHj4IsmSvELs$b(+@1o_A6FK6ki| ze^YORd4PJt9zQn+^tk00Q~vy=RWpbSbe8Gl|k&3cFv$S{EMqP#-&F*k17-7RhP%tfjCS@%*n^P5Dy84S@` zXcsw~pbSbe8NX$hBI5!d&wv4BwYFX-CN5Ssfj!60OZ2x2?kMeMls#&4pSSM}vWIn( za|p_y6qE69m4|2rd|NgQNb7TyU&CK-{EYPke~a9mJs%?D>XbCa%fy+vrV+V^oJ&v! zrI?KW$q55Z`a~eZfZm@&C(d21(krErD~@ZR3wyQKCRDFYRsFnc5Bi6hzL4_>%AgdJ z|2zNns*rmO=D)B&a3My~sEWrze`c6rn{m7w|1PbCE1d@BYzJpHB7^QF=l>)9kMrLZ z)ZiZ~r#IYm)GJoduAutwzhQGht+aV~1p}6JGi;ClKB1s$v2h}=&uAt-}VY`f#%uZM+`@CXhAKGpi#$4wkanxwk9xIssUoy-2c@{`;C-(SeR zMIS;MAipChgVJStIpe_W6p&#+Zt_Q+wa&Yb*cNYEtPkIu{%y zmlBjg>9W0?^JdKmkYRuh%joYIal2j09rT-;h5O#ne(LQ_OH^57ZFbua+1f+oGJ-NF z#rJY-53`xv?ha%au!Y*4N>VP2(?sUJiUnQ3CGn};WU=u_zbYLw(BtSZxtyR3N--Iq z|5`g;gU1vYuvB!VtZXXR;F2Cs#Q>Ydo`-vSj|Ne4n4Greqi<;(Ay*KTK`AEVbHUFy zWn_U212QGgyi2T`bP2n#V&vHELo)5p-k($~5H?;N#fYBUjgl(~%Aj=F)=mkyxe>@P zAjKjtVpO@Q(!j{xIrjK4k5fti)s$W%9mPii=zY5}auq=tlwz{#pF0dzHpk($6BxiW zB^Y)uKZc3@&@fZ(kM$L^MBO8Rxm7s>|x{N_XK57iplsKL6?727sxO`zx-(0 zjqFVM(+Yu=PJ6c3y9Pa6I{MIeg__UP7DWC^t|lmhQcT7l5fL>BdkJJ1kdaMuUW!-t zDSO+a#dS0yeR-_w&($x;yV-xQ>_Oyjp9|8>C}#i}1~Ay1JbrbY{irc47q^y-9{GkoBJzV=Pf!M>n2f)><&E?$_)ZTPFtOQBc2uIotH5w=`=!OiuPa-( zc2o~u(qvokpGC$zNp2u0gHlY!_iYimy`?~g0U?^+$=9D>+bPyjn)E|{a!@O^G|@I> z$59J&o^^;kMQ$W0gVJSN`!n+bTp$PoF0OJ)d$9Qh^M|;pQGbqv&276>4lRs1C}!u+ ze@5hKauY!rlwvZzhc(d$z(@OFz^+|_Et9j>+b2zRTJ+qmvkcS~wm!PCNOtJii(d2o zNp2=6gHlY!uTO2%+QbZG7+}m#!@akpon?c~2_uWQXM-&r2iKLV_+O5t*@vFn{UWyz zltC#b<8zYOx%xFgh5;XMi$*uF&Fv4LuAYDKX|<-bnDje=)~{KI0=TA-tvy5jKu`vy zn2aAucDB(P0vQIV8obd9t~$t(rg-@IPpgybY%VtPI}~qquPV1iC!w?Cj|629>%1Z7Z)$>11J{=awscW`68Q-|bc zz{85X6{H=N?vr*=s?9wIN{)y`^KEyF-6tZ;Yp^Q@;eV4q6Xx6hhi~V`=D!TdQ>H&m}72jS<7be>>2w}^6yh|Pvy#6cGVgpyfdZqANhYj!@@st{I|niKyx#Y zVStfe4)cvW>&k_`a4NOkA29f;)}sH|}mEK ze|p+X&X;OOO>ZUoY6~|?A3+(EVlw^+R!qizDImiD)_V{8rI*~L=;TQ$ky`H~WMVY7 zKBH;%4?Afzhm83?rJtY-N|$}ku7$h40T~8RC?>SqXce3o9d#-2e<6`@(yS13+Pb0Di zWr&~*N|$YIF@^{5`Y#M%G}d(eYIVzvapA%TS>;_#R!QHj!(|8Fm--i@=kO0H!vtke zx@>DJNVy0B83s(N4bq>Pldbj_TeET6>W0{3A*D1=2JelgE4+=7F?&)*2+E)olkxBK z_u?dJAj5ze1rsaLC$t5v1>WAn(#48qo!p1%4@ut3Zk10&S_9XJWE zTfhJkUrXrdp~5JIg6mu5B3LbMUM!yz7?|>XYI>09;#`rg8|CfDqW6z_F$t$JUEu4wnBj1!bWDJJ8`ZSHH&lYtBaPAqvD zo;W4XuhWpLR_ZW6+4tNjyV~x)`kv$6=(oy;@|Bk%6!0fdM2-n@86f?t8Zi z366M=+>sX3W)SdjcGME~KA(%ok15{?%AgdJ@$39T=TFrD83xqoZR8E;Q{4T|_E+r= zr(?McA$w#v)3qKQ_-TVq#{4J~1Z7Z)$@tMzX1zF^hrs{_>r}&UXAB#Zb6+kQ*^8#1 z3SH!lp}jz}g|`cRa>1YSgP;sbF&Uqe?6>(A0Av`jxJGX0*6go&@h?vZZVQMqVo9wR z*RVAHaY4Wjy`K|6nItHK(q&tF?YRyLkYPYjP`_7^@v5*OKZyWIlX4NSx7PWw{w;wT z8+p-3a{?(-1Z7aVY-`*1Ztn#$4Dfe$-04K?rfzF&WzBYwy?oE?YbMi!f+a01le)+r z7DSmQD1%Z=#=p-)M*i^iJTPEGoX;9EwW^DD3)LLmiK(EIb|!ncg3FFg)ND~l2X9>_4jR$RKNZsxK_(_G?LmG0oz`@OH0pV;@TDVF|?J|c%vW(mrm6qE7qbD@j) z9U#MiWBh#k3YHY@Ph1qOEPWT`FFGx_`_r$iFIjT7=!44PlsSSjD8*!a51Se5fe-n? z0PddgF9)OMT%3taW8o8ha@uQ2+BM`8I)o&etpX3&5Rt7VF2@k zRFw+03omN9F1$Ffv5Q^EXVrt0Zoi*>w$ySFRR_|k} z&-DJ^uUf6!WlL`PCT^Ozl{-|9$WJIs1Z7Z)$@o0%;^!l&K!yPwGaJ5`mp|C}Hfzo| zzu!-r75qIt>rpvY_u_Tvi-Dd}eiM{IDJJ84*oC$P_$vtmwl>R@F?>WC25`(DR1LH&T(hmo^p{%LywdQ( z&CT>tIUo3HBF`dQJDTD`PzI&Tw)UemUbu=02JE;$-$`5aKvyPw$ku>*G1GSUP@kvS zkTll`^z{BBg8^;bI!)VKr&hM_qo!}S9~~a7X%Xs>XWynjRX;Dke6b82Gp_3^+1OB^iBIgHRW>P3&&9T+fBalZxYb!knxoJ1Z7Z)$@onF z^ciY6W*DIM*7Zi3=Tq&O4+h53rR(KX`F~~~lV`UYr}q zFu+_$)a>T^aqEQZW*@_2SZnIeevnCew!ksLYKQ(cw?xVVf-)$@WPIO#^ko!Y8-fAt z9~UguWfns8jU<{*O=jeI4!(M_zojoB2E2s;`93F6JP6956qCVx+nM^_O~t#Xbe4pv zt*Bu^uEb#57DKA*_UqGSCEuA}6P@Zm;9s8N^fSs1T_~4KdH9bse!-beg!(AW85&cX zTQuf$G}Pc9D#r&r)HF1-G*tM%V3(Y~X})&>mz-e-J=ar?gvLr0+s+>rfut6>i+7)` zKKHCmMp)e>0~vb?<oPQ?2_|Y<2Pr)riT^9yZvWYm^vnJV^A5t zbFWV$*tF>+>9V!dgNdmWuYctK{gN}jy9xhU1mIfp<}i?9z+`6NQ$3+Rla;~N zTASoo&fmWI;^PnQ9eeMroH&mR{3XSkpbSc}4_)Ms%(rPA{&2wnnvftT!K&!$lC7EU zA1+$t4&HB0{TTCEPj|~{9z;%~_z;vqDJJ8O873Ef?glapD7Lm2zZb@Ke*-%oM?zvf z&ygM@QDa?}g+lw)O^BRM@g*pOQcT9D3H8aPSAh%zJU?#c_OmIRXl;4%Et6}?{**&% z-ylbwgAmQDAwNuTS zzR%$HM?}t~_z{#rDJJ75(`9p8P5~JP^uONFo6yt4Qq;IZk2B6daE+eU^wY#Kb8Q`I zGepj!_!E>tDJJ8mcOrqx@YVthkWils+j)wm{m3ju>PWASNy&Z5tdhpSD!w(fKN0yA zC4is|N|%lKeYO{TFdYV*sFE;yTA6C&<6%5j%DrdBs}Xm{xx2p-dBPIV8{)4ifdpkx zipls7SFPVRcqtMFh^LNFzMgV;#Q#9K@~bSLwE?4Q%eCLclpumKD8*#_ z^lo1Fj53g6z_pTmP5w4JlgIZ@I_R&buGT;1G`vgLyK%>GZv-M|Q-TS~pmf>R4tU%T z|Lg(`NWHnvfl;E;u!eGfkH+|hy%!Q+<bZI{K0>3tiNcLkS@$gHlY!r$+t0vQUNr z8oqZ&&bmj6rD(i0PWD<6oXc*x-8OEHj?-aNIx^;5N+>}Ylwz{LpQEQwm^WPb00SbV zy~!4Wr=|vv-aSF_UhAQ3<;+9h{IU0U=b#EA=TX84%AgdJ@naa3;1hlz!+_Fl-vw%t z&iQntpS>(wZjvPy!Cb&=6=!$el?iex^WnHUgfD8lL4Q{mY_vWx@^p&F0B1Ph5;Qk^5r4xu6OEovyg?9 zG*-+H=$`jq@9u1{C(a((+69zIf-)$@Wc>TwJFxN{kYT_MJ~iGUs;;x`8RKGW4_uh?GwZWg zbd`zJN36Z_9vO2H zMJ1G0h+euVraUDmgVJSNTjd@-JbJ=_*DYhG6qA<^Vx?sir4oz@Xg!1KehPE>7OWS*Du$F~%<`@B)KzY}x9bB?6;y9*g}IpsM)8I)qO-k)RG zaXuY5W*9)RJe#=rX({JMXXPCM5^hH`uWo6*nS0M$^ij@tM6RH`ASi=UOvabK7Rp&D z0T~9|=VNRX_y%M5&Nlyi z<@$R3H^-GkalP8?eAZ5V!_~-`YbZ$sWl)OAd;aX(_agG=fGh+80*@U(J}BNjxmI9g zFtPu=xs&0B+2MBf{NS~pJrTKwG^*$n2AR zjEuRFl15MlrOQ6&82=6M)&dNWl-oaL_`&|`rva@W_tIVcB^E1#OsjpAZ%%JRZ>Ka- z(h16-6qE5eiFe`vTtp881eJDhvb7(NUsI^Axs9bbKJ#5VuZ5KawaipIdO@(6l0i@g zrI?H#Jv&Wk1Aziq1VXO{%xZv|ZU30kWo!-Z_^7D^^T8I&&D z+Jm*)jX;J04?RpK#N5gS@7ms8Qx}}dl)~sk@7t&@@{8;CA|ijFWD%4>DJJ7Dj!`t- zbO6XOz$(V+oLoImy#MAH_D7=JGW(ijmjr|?>)n+Eek1Zn$}560D8*#_YDMx_5%@Sd z3<#i#;1syt#T_%qb1|dIwWa5`WAqPu1R!!dC7YlON--IKaZF{(+X5iNfHhN*G|Y$RuR2%@ zJ~;oWf}cqxL_zymhzHC2qv-i_2PKD~3`#K>KW-oYQkVr~7{D$ld02jZ6IjX9w#E2j z@u3~JPYQE155BAqP`ZkY`4c6VpbSbe8UH@-x#tTPPQieDj+-6x&(21@$PFrZ;qgTJ za`R1y^v(Iu;PsgY z?G|oojarN7al4a}Pf!M>n2cY2+%n?-0mv}G(=Z#8rK_tQ_@GgK#q$$- zVZWPFKu`vyn2hgX+m&W)feZsGjUL9lPCMSB^K7@>iZpJ9up?l8yi2Q|8L{J8$ zm~8cD254Mf31t|-p7-U#+4j@v$NA~cWzr0{TNdvO3fCz!5fe^}MdV&eF+mxWVlw{w zoT1DGU!4L2?50b`UIdM&Ic(i3n|{~VsCe*5+bg9j!hYLacO!BirG%giN--J##i)|( zx(8$!@Ox|M9>Hj(5VLYuRz=3}%3`aZS?u!H?6+8iMk8`ReK|=?KD@q&|Nj12VNYTeev2f)1sca?jm{zWsp)vPzI%#j2}r_6}V3T83rWzZnO~C zFuK0Jq(I>0QH~pFL&;tFN>|O;j`5&>;bDkUPEZD=n2aB{f7(;K0T~7`jWO;r%Q;|3 zx9}k=Tf2VlbMi^2mXIut)6rMZhckvL6$E8aipltqB=_hwZ6L#dOWLIYBcmeUkKAY( z*qZ#Z{eqiuyW7SeE6h1Y(MKdkD3t_dP`Yf)pN}|_feZuoMQ1d~1$c_5Mbe!bxTG<{ z=M!{`BXs}0=Mg{9YrUhCDuOa7#bo?jl^kF93dk^k+w$P4;t}J1C)uoy-HF_v+ky)D zmR=p-Moq_tE;<;ayeBAwQcTA8?Y9h9RDcWv>dd=sEv;gYn%wwdDi*bSbA0*zlwB5L zM=m`dyoh|O#wpbVWl)OA_`Yos(Mjm21nn%wlMc0Sj-P)+v5r6KF& z3G`d_l~O}c2Bnyc&pmgE=D!6p4Dgcb3MhykZ}{Z6_I^3FcbTwEMKYt_6^(24Ip|&B zZXwxSn7UXlDpwttT zLFuy3S;F4}zF+|ccnRBYm3`UwJMz-^kO#KUFSS_mcXo)}GF9R1Ko?H^pfnJaK`AEV z3*=O_e#5`m0t0sZ9)05WuBlybaQxF_BicLd3VEyryKkJ4lsw3XjCqpMNKgi)n2cW! z+jwRReEbIn%z12fS-s_rnBntVE<+i-Iq%et(q+^5zr7J#;)KXklqP~QD8*#_xNTi@ zUl7PJfHF$?spiR@6*I!f=YP>Wt?=vV?Ociu0c53mbY?J3X(lLxQcT8Qj9y=zKM!OW zknl|MjDfA=$J$_Z%9}GK2Jg>mjU7<0yB^vtCV`CkC#8j;3`#K>f5EQMC=+~K00u-p z-CiI>-MA!~VmehL<`I{Sdqr&3ZFB92xZt<>SBDv^2CEX*aDDp*c!>hF0^s zDeX8FHTFMLP9J#2sHmx~WB<0rp2Omb#WlkN9Gur}ISg*EwbyD?fIqNU* z+rCw)>p`dabCgnoa`7J-|A{=US0Dmp7;r$9TwV8CV%_SMYwQd;CqtR&ixbQY1jRUb zt7~4zj`gAvEg#FPkrv|lHMb!H5(T-@W9xSQzu|syQ z1xh(V8I)o>HvZ$h|D9|AkYT{&{`D(sbEtP6+_bqbVOzvu$JI6($s(!LOPuS_SD!9Y zDhSG;6qE4-*y6}Gcv1xedK0@kQ&yF=5*hQt#zKNJD8*#_;2clRe+^_9(3{V@ zHO{P2#3a4za)se&VeN*omLAT))Sdl%?j!QgjYR}yP`YT$`+V2K({>nOJmPJ8!_RM6 zLI3hiE15}?t4p0(jX4dJR_Ym|2WMMSF+mxWVlqDQtm+)l12POqd9|TRC->Et221Ut zeNDa+o<{_$?cO~!*yfVD3K_E=Ncdc!# zRp6u65F$H~DhSG;6qE5^l1wsovkC@mHjS{>3+to(pfznZrfAgjj#k!T`MAfPzI%o zKIg{OKUjeb1D<^QV7S+R&T4(C0YmNvdVOsdpVTvz6PNrC7A-;K`=lCzGAPAleBycc zvOzeIVL%3lTINKXm)zNw zkYT`Kwpdmve+^F6W`)!ntzHt}7>!Z`O9c*ZzuAtyo5r0~_b+Aq4>O94`NM@$FyP`f zclP2|TIc@rPZ;-=UXpTr6s{v*Q)6*xk5Uz~wLM7n1Z7Z)jT!&Lj8(_Hrhp6sR(E`M z)jk*V^D5b?sW12c%|#GAPByj328Mvc$cB3KRpaP*qv9+@MK9nBIg*x|HW= zy~{#C{wkNDfu2`xPTeLA6q2jfY4U;65;O;iIoB`6yr`m zd)KJE`~}~R`w=C`hwVrDLYQy`bb3!*;18a+3sL&M^Zb+K z7t|lq=hfW}lkLVi_aX!LC$$rlK`Hi$3;vdk_9?;NQ5Zl)A5k(#zr*uq-qdwBC7u9t zPA-lug;H8oCoi*e6-4WI~rmdv5e@eWb@C zk>A;PD9P720om>$q_2PF|L=k0izls4 zVY+RUZVn1zdNGU~U; zol-4C&Ya;R$NdP$9Y{db8*y(GF< z=%ZJmqyd65C|&fsrZI%QiysDfb-$In@RZ}q{`w&A#0%EC&7&uxhWk^M@0qfynCd+dc-ZXj|vX^5Z< zN--IqD}@d+!37^MKy9wI`<9#h&(!prGNRncC1Xk-SLeqa(o@;NERDz!q+x{Ihzpr7St%x_`j6#tu0opX9i>#FeZ6UF48{9 z>PP`?SgFXXja{1$pNZN1iRrnJ<~TCum!t`TGALd2IZv(BvIH^=VAi;|ytRSXctz8) zkY)_ZQxvhyHFylP3SAj9A&8PzM&cY-n~#m0;uG1$eL7k~@{q@1-%meq@o?LMmhp5>vNd98Wh zZoS6SM@l1px*=OThBQS`2BnycKMZtxVM-jxFd#NM>`dMhiw$PEw;Dekb;&(t8dg44 zyJl*;`&IO+Xe?=(pbSbEjk&5g9$rO(0ijjyJ736UW?IL*W(m8y()~%psY=U_{^BL~ zj7*R*$B||T%AgdJ@kg&%D~BjRh5^B%S%XK|me?5XS`n*yl~Lm*liP#ljvJ>;GsmP5 zIi562PzI%#j9;^=nDwm&G7Mm{dDh5%O=jy}-?d-+wI^=8kzjDje{gR>t=)b%A}5gM z2+E*z(bk@^W5FI11_9eR6lAT!Hr)26j0DH?^D*?}>oJg7{D1%Z= z#^+(Xcy!@)cNj3p-No2(Vwcx*n!4*7)fA+XTerP_Nn7f&>6_L|L{1|8ASi=UOqTh5 ze74VVk^wRd;BdUW$2IV??BIldzVUNYzS|Q6JgU-`cOzMAY7jY@v_Mb>rI?Ig6;)SZ zgo|8YK%#k;nx1gqqcdOM2#XzBvAUA4nn6*JXzn)H*P3`#K>-?t;Kk;H%u z13pT&8m`fGb`nrVN6|US8n&5bnN&x*68y-59nRX;9wxXqcRi3{fFmub zU?w9BXh1%=o({Mz?FH#?x`)F~tff-*R&VlsYwezUKr0?07no!i*>gC{Rm z4XN#pq^hBRXmdJ%SB<-r^bj~FnZiWI0YIlv+)+wFUwJHuCT)$JqorOP)r zo835g1ligjNUj8BP>RX;d6ls&b1#r#fEVLtzPK>`2m`*W?D?=<&$qoN=A3aQ_vG2Uz`K+a>a@pp8gM;B z^&-J#8W<3ny`nD7NjW4(yNQa+!*T=F*8nb~SDC887Vppn z*4ZTYzsmpXGR@~d-c$4W*L!Nrm*yIPfy0V>;%b>>?vb$c)%O?J4~n`}IeZEewx3eE z8FU-{Imsb;{FVRb%QW~}D*T5`m#!ZEi3u3MN_{D=y}9=doA)Lm$1y5W?i zdA;Zox+*i5@if+UnL6W%ErFk(sLwjZpf_0xNInE* zP>RX;&GA>oRw+P+0ljr4Z?15-ec!FMCDu*Lz^Y4wjc>2O=J&gY&vPPM`xD8RpbSbE zZEej*cj1W!40x9#zsG4}O*gI6564p@Yr|9u%)#UL>9pVXg`lrqEhPC7ltC#bgA+>Z z=On)@5B{9MfEbO$Ok*eGYfgo;Wj7N}sdGD%#`;NJmk)jF%t6LnM0)sF`G5VK_-*^| zjU~Tr7=Md=NEz_3qNBux!S*=SL0MAgrl#%O4OZG~S7fmJv3qaUhtpX0-B_ue5_EoJg#~xji{p0+f~HE!(7n8b^yshrKmVHH zpE!OZ?&lM*V&cfLNd5!vpgqyU04C|$I# zQHQSK0Wu6|vstB4%v?mhZ|6ytEdIl9ZVO%LQtm9v^x1xV3nG`10tw2X6qE7kx3=@i zZXm;e4Gk}}yVfUsS?(~r@zlJTPRpIle)8PsQV>BIlwvYIEi1k&52xQS zps?+*tjSgnPT}6Xqik!19E5W_NsX)Y8rSda^hD$eQZPXolwvYIEn_}h>jq>PFwLgi zl*Mg5t`hF!oIum?{pZk#Y-^NnSTAW`3?f&O9ut&7DJJ8m93``A@WLDn2%eukuu0FY z$jxup7cXuV6{#fZ+pc%i)kaDWqL)FdNFfB}+~21h_}XJ0Dk=D6B@D>1ct$>~(-fYy zQDpSR%gd%ab4UBf_Z@F8F3n&>##~K$LQn>!*qCv7oBv^W=@$m@HCHxpIWG~k;Oz-j z7WAz;=r%3lSPCFJ-8hZxsmjopbSbe z8UGD?eW+Cp$S|NL@7a1j#~n3mG^|vvgp;(|>!`TDHnXn^s&Yh+l1-!+1Z7Z)$@uTK zd4SewAj5$79D8g$_Z&UHdiJ)&C3oXsYo8K(bxZFL{yb3u$e5c+kpyK>ipltM>Cbwi zE&v$@IBucNTlDPKqHYgHlY!_iYIknE)WefOWkMHk2*>@@{r2RQC4m z31{7QCue5wzGf!317b%{g3`#K>KZiH>$@>Ik7;xIo|Fdcc{o}XmwQt^WFE8O33LE72lkGow z)eyb8&`nApD1*{PTl@Yl9r$OQVZd_M!-pt3(PNuu+)p00vZhkX=iE>od-6&TspKfK zwR=d31Z7Z)$@n`|wmo?Q_iY%kCwkaa$@a8mN%_Wu(4<^+CH^yQxPuT5+7T?Ly?Qq-26J zD8*#u-xniyXRm;_;9;6=BV1V&rluN+5@Dw1Z7Z)$@n?O z`EBLwK!yRe+0`YhQd5M}b#`zaSaV<#t+IRdu?2AN>b47dgY+Be9YGnCVlqC{-@UpS zJ`oQC(kq^cDeTBFx*JVqaD7|ONNaYaGiqIp?wN!B=vAUYQVKyClrEA_pLPL@BdKA) z8JXj=G)wQTOv$dkU+`t9VK~j)a)#lD+kh#UVc?d}$qw#sLYhxyqbdt>)dd_*gl$0NVbXhoGAPAl{H-3>Mh5?rwSKn$a za$jj&|Gu@Tblq7yuaJTHfn#gFgzP{s@sE(w2+E)olksbEx5b2FfeZr<7fxy{4+*?r z=b0G#=z2HJo1pCOp9xVX5{fs}LpYtRsi=YfjF&W>(j#<#$1u_g+C{E6{Y&;(nqQgoPd(kgq?$Sic`y|hOO(Xi+0=|J>{*&+w+N;?#%UT14^1&?|G>_(AC{D zq>lt;P>RX;zU@~;x(s9(Fv#6rcz|Y`>6GfR8ji?tlHQQ$gU@D1?Do+y(IaD?CFK#6 zK`AEVGyQ}YkKjEs7~p^PU4@+92|L@zo@eCx%UEt3Uw*qqNHN-&;)UK99i9(%*ZVbUWh>O0i1;QrDPDVGI- zduM5$n4GQJD$A&5Ewr?bN5fm&G83U4$Yp<}@tbB#{`g}xOBkrZe^fRF+vuqo7-$)& z{_`*F&Xud}E_gl*OIT0dks5ni&LnbHHQ-ZY(fwre%V+X#HtbJXFjGYm9LY6*P59?K zSNJy@zZ&@BZ5I6VzA#`mc|KT9C9{$(76P?qv1rZB`AYZ zY`f#Xqm1(A@NFtEKu%7(l9G&y?LS&*_?Xa#T3yKX1Z7Z)$@skJ zQ|3ze?iv_yPHXG>v_ETB(bJu&r(|XV+XjwHoO<^_b>)p$Yh}#0Ge%7G zqG!@RCruN6J-E^@vT`7Pf<}#AOo~I>Rz~f%V1gLdO_{bGkg`UC+Xb6t~bXYd&O@Q zDuq6>5lHSQD1%Z=#*ZDnt*UU{8VsPT;0ZT7Ft{U{=AxHyvda9ln#Adl$)(rs=jfv6 z9YN#)f-)$@WPElKk}d_WBfx;3H<2kUF+Vv*L%QBg)(D+tk66-FtbEWh)rEEn+1kP6 zZvOcR&=A#4bwNha8u;k29q0~c;inc~+ZGJL7 zYoC%u4!`2#qN9=R+d7IQgpf!7n()tiIliyq$Lx@{dN?bD0VX>{oEi4I&}=cf)4Hne zY-w-_(~b?r=6>35s;vj?aYs#%b+=34Y5JBxmh;OH`6+pvpbSbe8DHC@c9spkSsDhMw$oD5oM=3Lwr_u#dwEgx6IX}p zG{u6aOU-M85jm7RK~M&zi^lA^?k&820|O*~-1AXgzMPl+#_XYdh7>cWHEbs0tIg~y zL+|cGPV&45*y_b~s}DeEtWn2*Ht8G?Pw8&t6OzJU+TB$CL*d zb2xd5pbSbe8NV-Hkdr6}WEc=JFe;Mo`9p*!JAYGT!=8i<^=_Yz_P2$ej2)gtlmHw;qEiR`s0UWGqSl6`6c-WK^c@{GXCqyZT%g-R|E#o zE@c}`6S$t(6zU}&ad$)Vg!cAbLUJ2)8^(9KKJIxSdMO+g&H#ESLB}rWl)OA_-d1&r)S_J%P`=MK7+rZ z2eX*3*|U!Wp-Xb}+2|5Y==0!&w&gBPM(*O?w$T- zGIWgAdxoxtd4m1t-D>iOJG}K%oyeGD$#w)~P>PKizdDz*W&p0dfB`nwlq9IjOZEGH zO3&Ij2JL&br~CHiHLAgLS6iGAIgV^kPzI%#3|=XME#;pwbz}t-e;9B+s(wL!K4!x{ zj-1-OType3G8_>zUyH-3GlF@*gflm zRl*KUHV^8ZlU|>r*URlvyE17cbb5Ks4unr2I}+yY{^0F+u^CyWcsM-ofE6aT887I^ zuQ>)i+;Q$su6S;G#}QXA*CN*AmpsxCK9TJ7SN@-8WcVkJzse=wzoQ<=FhELnol)T^ zIY3z3skQ1Hi@orf4-0$~%k=Z~1<^++lE}^kWl)NJ;`n)eXM6x$j{pNW4nGfjf4bm^ z7}=LUmh2sTeYlR!iEZ%maci9fWX#EA7lJY<#bo?i#xA=&V<5wT0i#cyB$h{B~GK-)0!-+qN2wBM`bxqp~tH>B(Xx%01O z|GX!RQGRU|)!*~D>D&D(K!yPxj606rvYb@9lmDJW^+7VD5toi!T+>RGN10Omh@468 zA}E7W?DN3q>t`Pu!~5zm;2_sp9+jsDvTtNb8SLcTZWr*7jW^_B!9Ks|uhBDwEOIwN z8I)o&K95U$@@N3aFd%VxvzF_?$ljuuOARqWZWH0%cO~na3~!&hl{$-zIh))=PzI%# zjGv1}WqKR}G7KoJ`?$pHE$hNO72}P~%RI%~G<^0L^V1ifsa>Xn$T{R*f-)$@WPI)J zv}NfMAj5#mxigK|7`z_w4dm9VR0;8xw=%Ey zC|nN(0}iitp?kBu&9u$bt>2=g#9?1i9jneJI^iP`uLO}XekPW1sGnXt_ztm@wZ%m95{ zNuy5RuaaM>E>S0Cec!B^+T1FW9Mn>W-rp@CeQ zzQ&O&Ja~LAlzSxb9Fp@rC%ihX3EA48$b$rBP>RX;`S%6V{5BxN0Pes>xz~n2UaC$@ zqz?5|eRa~-U4FnvoJpW80{vDMl7|S&pcIqw>nv7713^HB0Tt6LPK;%=<*)~d3iGb* zYrfK~#-{E3MRljk`Jc#`i^#(SWl)OA_-~kCQsa`DVOMaEo09wjJ)QcT9LEzOMFa0N09$g$GD@c5#l_09=lQs(e$lgVXuy$t2# z$Tba>8i-s<9wR7&QcTA8?VDY^@R|b**!D#$PRdYPGa_=O*4)?0bLIzUPewJ4#u%lZ z@k8V?@;E^mlwvY|g@?Y7A3j|T1JZVQ(Q-ZdEFBKR##Lsht8c3^=yq-I@a--n^C%j$br5{+;!EZYjlV%=oz>dv^^YSCA(O z%AgdJ@qN4XjsraFh5?dq+uzi^**m|%h0#|mPv1YFXPf(s{i&cPz6~Tqt|WgaD1*{P zpY!7rg7C%AFu?N7A**lk$`rP8<~L`#uCdQZuG^>Z#KASl{uug`nFX?VH>nXmTV{{wC&$M#+?|uKSYXXr!lV=FZpcIqwUy^s99D9Ha z1HM~~@W-W{3br;mCL%7M9cRF^F8}-y>E^e0y3i+$Ysj+%Wl)OA`oHHSbbFR!cXdI4 zz!Ns*H@H*Z+P_@Ze_2RYA~NP$@*F`Klw$IZ-*RrR<90A+7_fHw z(R+HQJ8|lVJyLks&Wv2Wl)OA;5Y2o9ZtV}J>B3ACoEXd z<74_x8xYrAqP=dU%4tPO3Yn5S7T)40b|x92>&ZXLr7Tw{br3U{|*>rN!P}73{ zNA;h7Vdpt@e9>?v7A)EPabWAEtU+H%N5zwu9;aH%f7=P2pzuaVX^5zNd)vlY+KQGWowj(Hm zQcT8wM@N=z7bd2A5}#e2&G}y)2^~JodL7y9O=Nq5GAPAl z@H^^{o$=bppX8=?qo#%lAIH`G{mX-QnljR=THZgeJhwpmB$3^W^KsNyE5vRlJN(tg z&v;q>IOApMrlQ9FM`iP36C1TP&FX3@YMpK>+HNW;5Z4vypPFAuSUiW|j1!Bp^ zF4`nN9&nz&me1&YO`#*urn0)>7`t(RX;+=V*C0X_)>130A{_RXx=P~qvW z-Zz!f9YUjj_7KCEgi`XGesqOc8+m}B3`#K>KMQ)Kyb`V>fdMT7Q(xAO9qZry)}^dO zA^3K{a`miLN4DmNmW`lKJhhX*5tKpcqODD?5r!|LgaIwBemkSrri3c48q13MPQCif zA?`t@{)Kw~l4vbtYj==-F-;d6=LKN--JV!>$)qhXWY~$PNDJ9NWLUwBA}g(oJ6X+lApH{&dPUBq4h* z^k~{m9w8`$(nX(h#$9E2uL1_BIZ?5l{N~Pa=4#U3D>~v5cIu})&OKeWBr}6+H}X06 zkVgs1pcIqw;|cGQo610j0ihD9SLPTOVtxo_F-6@zyD7WL;*37$>q8gxWzoCMz2q^1 zGAPAl{Hliknoov6h5=hyUX!-TJ{ED%e3#rRX;w<;>? z;d&s$0Qb2|T^HCsYDYg9SC{`b8pXR_i`UexH}x6gG4xGp{p3l4GAPC5o%pONA)HwQ z%$i`qYd0T*ggM^rD?NwiI9=KH8gdKMhk1S6#mU2=hzxpw{Qa-=UuR9fp9-1T{O404 z@L#`XZVJc7yFdd>(P^xXpfdmV)G6uk!B3x@WhX^xdU92z`0xGH5keZik$?PuZ}`17 zZF2LUYt!JrY_Yk7pC;cvYBOq>QXY`tHImXg+^0CXHAee*zr^Yw=e=T11D8~}9wQBd zWIMjUP2KSAK=;Q2R5}`J@E?`+_-1Mv8d@4E{2$n@;KBp7cfrjX*im}yOjhh;4airD;p9EGIT%mH>mpiSq!7pqtGX7z*<6rrIp0wb*ApTH1GrfT~kYPZn z=|<~QrMlYBo=J9CZ&m8Nsb(9LF0TGqVa+y6L>?hK{Yx3_{MeYE5gUYAq`wkCL4U%Agec+Tmkfan_m}j2Q-;4HysbeXxi2 zblrhWSECbhO&{Y*(p?;w^L4vp5qXU4LQn>!xExF4WTfEmiB0Gz^)ni3n9xzq%U&uH zweGe;?^-2(-nn#{ZzJs&cOM%l;Y~&Caq_)?u<<{1OpE=Y!1IB{;6Z|N0Lq8A4pcZu6e`cs*&lhKa{Mkb45@{`2YSh$uFsY?IMO}0?s6%;@0kba7?X%j-F_wVPP?_uA&uSIHtD(UowO-ed8&$%^ADfO3d%RX;bx4}aitxSy47hTn;=qrGS@Q1vZ`FjD<3kkf(-q9b zF2+e*OhV6`=E&@gDak3z#VOhlE4CYf0(_^2Sdi%RQj6IE6t5J?p3N>LNCev zB&QRUK`AEVXNz%bV{Zc)21vKrTx>mXjjt=)p`^8Psif4Y9o4ToTF-kFBvBz_wxwhc zltC#b%m4nIxx_Es12PO)M=Gt23CrRtUuD>BLOMz7o@lnOd+!q6fzv-PBC;Lj13?*- zVluvRG>)qtu1$sk0k;AqSmrJ|M|*FQ%{%RczwVV)lTZdIdf^yw0NN+v-W{MuqN z`1Qo*u9j`3G+^!u1G;zS9Toq%Vb683Zk_s3mqt$CJ)sP~vOTV?`k#?8J5aLzD&v#q zzs+4Ucl>wmnz;j;#~D#fhQPpKg*jVP=H0ifqsiveAOciNY4DxB0VS8s{BS#@vvp|3zn30vs<#Kn_Uk6<&jJ$ zN)AC89MrK*jvu5)1}?*cGz^HCV=*+Nhf?T=Dq%+8eD ze<|aCFKkBq1$>AJ1}Nmt@)}!6H*6Nsu>X`{B{`*?%~bSI^hv&BP9-9{P(BirK`Az7 z{O^TnJ0`ydG7RuOnjXPMX_ggu^theod*-uPtcP%>7A$oD9D|5C=c_MVL8 z@E{EXA|%>Z>3l1TRmjn*eyOk1Y}dTPzN}*4$`KYj^wNMUC7+-SN*8_3pKT71fD8lV z#jf(KZ4DHYaPP>H_Yt_{#=_UPlPk4OFGU)CF!Da7;9tu4VWaoXEWEr41HL8kZ%{tK z*R!v8Z6;lt?zv3WXuF5?JK80S8JUo+?MC@TPzI&gnDN8La-Og7B~~zCI5e&2y6jqo zD8V-y6*Z_%IjZ%oQyjZsy31k*x}?UPQbQy{|t<;(ZZUzXXLnPk;{>uUhz#W0Ue ze@&6r%AQ1>U}Vf*loEn6D8*#_BBRa5U^oqg0S_ltIv+$fiZ#8Q`Jm+^?WL?-Fce`+ zzd6Op6#YJXQ%VWSpmfpJ9^CH$m&n0@8rJJ#v@3_G&q_XUm1no9Hg-GjL&sfLcrAs# z4;k|VN*O^JlwvY|khTq|a|ALBc=p!+rP0((#dfWW`c2xkJL7I$xt4fEdh`3bXG;*- zhf+>Z2BnxR^Lt|Acy?C@kYRw>=B>Jq)vV3SPpnm6y8C^FsIm85!HQG!3mM~{i0n(L zASi=UOvV>5ZJF!331k?s`eNJ0M_O#<9&~+C1-f;2yk2(ncMZPwN(gdBZyNYfDhbM< z6qE6N`}z0@1t7x!R<}x*^9N54j(@u}^gvp4Qi^&(s>r*NE}>{Y6}kJd%WzUIY-SzcOW*!sDm@p`clum4JmYYM3AlRPpKv-gHlY!_pnPh zzeoWY2F$&D_Fd`jNf#c$-Svm=uM!Yf?8yqxj8HP&F^Qf~KB9akD1%Z=#%Duc_bb9> zzA(U8;RHFUa9?p!-fB~YK&R^MyfX2@Pn4sI*PY`+#vDMYAt-~RDkkH{XBzM4@Fp}2 z@H~I;=-&7C0nSDn#k`-c5ehk%7tk2Gs&jV##Zg2Kq}2XP8NYClANSl0$S^>zqRpCW z*+8}+H;rkPs6>x!MOsOX*gHlY!zt6Ul2hRW*2GrS{pOK|xa9m1fj+R$6 zSxB31*kb-6Zl*{6auFgwrZf`%AgdJg?|5f+DqBC0~rRKaMWk8D-2!!ZAqD`&z1C(Gz~6N zw4ZDB2mMU;A@UPS6G0i2E|Nb!ZiFY%FraZo18>5kUE!4$x9DG7=ChOb>#sE$il;cL zyevoLr<7)bGAPAl;ooDn*7DZ{G7Pxa9UT^EDQ>mXf#D#dS}g7Goj0kE_&j;_qWCxv zIh4{uPzI%#Eb{w=a_d>^esDqw170g9%`x9MlfLxOpjMSv$zr2&URr{mtb^b5_%I@e zQCj~h|F09umOoA?TXtaQvpbtzhXD^OJa?R!-%rKU#-O$L45{#)tBg{~%=Yt)Z{+5g z=MerG%C~eC)7dSC-iP-X0w$ntf-r8f~9*T<*7Rwd9L(w_EZW{gOpc z+W*S`^XW9ctK&NyNA4*&QGfwTvn~1|A;nRvmJe!PC#}m)pKr}(In<%tFQ@hs+3wFN z9Ry`iitljPC4?<)p~XOk0b(Z6zWdw;#XU8Drfc3YmaoWEv*%{sba<0a&wE6ELFptY zgHlY!4D#**FAc$dYe5Mz6e3)>JmiqcI`2Bp}Taaq|@ z+6u@pK>xwDjMl-P-UKzCSIvR$qx-cRmPOvSd-|e(HM&ahC8dX;3`#Ni#P6Rp$*RX;X_k`AgDXIW0V~TN z6kX!?q#u5JT1-daxv`&>XYe;j z+tembwwSx6t`b>19D&BZ(no2T$I#~=V=4UvWl)OA_=IpTMe-JqVLImb~32+E*z(blFj1qs0+I zgFIx(p;O&^GC7s3-)=|bc*-||GAPAld?n_37EZV}3kFnH?0kB+@J-vv!Caj)nFn5P zcy3lxP73!E%Noo@7y|Ve$V+UuBnbZ;UlgtNP zYtcIlNt9uNGAPAld=Fd8=L`337_evU3Q@nUyOi^`DFq%l@seRk$}#;Z-OZLa#k1%M zUNU8bpbSbe8UI!-d7beX$S`2F=Z0Y_Gt0B8drS{mcPPvUKVE;=kbGl{?uLV%$mjf; zGD=VerI?JrvrFcwTL_S0fV1#Gi9r9g9HAM{q1z0B;kTx@BwQ}tZnn`RY9%7Sp^Oof zLFu9~ORK8Di_S10s-@_tm z#^GvS7{GQQ^L+GO+oB<{uRg4&`SPE2eY`xKV9e}(I|{K#IQek(&An+n?G5wQ&e1f^q9#d?hmM(O8HJu2BnyczZYVo=l82Xh5>s^ z1vCz*I?KIVdrpnHg1)QQ@}$fPg=!X;&-Li7ocEL|f-)#ww6(>!V&ED)7$CDtX8B~( z^oZec50!lHy4c}QE8aW5jxbrql8>&SPoqo|ltC#b<9E+;N4M61={^k5-gtDQ^}=Pg zgy3ygc>5wUCkttv8kLmvOuj&2|X0`$_a99x&HRPzh zFmj%Hh`Lu(;!)u{sT#gUrn4X7OeE2p@fnoazw-Y)-Ny%R_GuzCyVn6jt z&r)`0Yt8`kwu9_jWjQQZ&?Wj=lph3TP>RX;qKhjB2M+@o1}xk3w!=BimG^`u?b+Rd zd>L}tySIo*>2F-_7=XUxGMlnMPzI%#jNeI8Ki+Q+WEikgH?1yB;pMg)tG^~y>5h9S zuw-lvVK%qfL^ekMSVa!yCqWsMVlsZHthw#%24omO-h7LSnp#Xxtj<>Z@FfqYAolzV zuQH4ZoFn8Jkk2`nV#`l#`4%X}WPAa_R9qCiMg{}i>|B|P4R>&@Ty<{jwnANsKSiNs zu3BjKx*-4ki2RXaM^FZ(n2aAbdU(#km)pYtb?qsRPu96T>A9`FUS=9;qo!wTLf@n= ze4<)RbvnaXs2{jr?YVE9{=trcciyaxvu|K^c@{GX8yb6?ysv z$S@$qRj@hzbTEs-8y!ZiojqzV4bE*mAFa#&g2x#B-7cZH5|lwHCgZJi%3ACRRX;%pfaVdn=G(K*lbS-N!CkS#O@r z%o{s-G0b1v=2%|;NbS`QSM-iy1;vA)3`#K>|K0AOwzq>@8wAwPzT-$gJCSooh~M(q z&oib$+zO2p#ibASk6uJiG%6{c1Z7Z)$@n}hPdo=+j)MVTTSfUfVmmA3dG6Zjyg#6# zTXSpqkW}dN=__o{kgZ)s@ggXLQcT7#imh}q+Y4kEFdilGij?*CZi38|SykMGi{GWj z)&cW}j3WL|4kL0k#haiEN--Hfm6dggjRGraT}hgHlY!zs~}f8sMd77+_(_v*~zaNzV&a?Y;3qML$XGLYBErE9_rK z8eBr;8j24=8I)o&{;g_E%!X@BV8A&tU*Ex^=96b<19czKmWUZR>6_l$I2yz3w?Y7s zYbm}2Wl)OA_`V(f`U#we!GKA#+l2>LwPcnpf3Qn0mCr-#%!r;#gV2Nc>W%h@Tu1RE zD1%Z=#`mykMw)Lxh5?GwkHfAqzT-U>nbmnY&u)LMX8rs78@r;-s$DG+xt{WnpbSbe z8NYDwq~#!d%>xWrb--e-+xmk~&h~+FnW|SyG|!vxY+CvD5lwM4y5P5g;!jWprHi(9 zk=;J{azz+m&n(duwB)Dfr(zDDvr_^K-DP6Wb<;Ks7=96uL&n@lc|=eKrI?IgYWA9x zhJVx(1~6(m<#9-F|9(B+Wq)B8&EbMq8XC*yu7A&*$VbsH7k~@{6ps%0 z(q_f?o#ZQ-tIa2S=ne0tH`yEfT;PLn3?jEsf(Xi>6qE5;Z!rrmJU+t!sr1vG^#^VR zH644}6T0z4*E^*-^W)q20)BLIppT5TQi2J}pcIqwecNlSJQBz-U`gG@q46_NrluD_rw^S`8WV7s_LTGALd2ISc5DsKCz|1bhvA|7{>drQhy|!9>T( z^TyfxeWlWJwb+y=U!adlwoyU|%AgdJ@qN1?LoXM|FyNt<98F$LZ?2TB?t@Q-N2%#J zw90blM1~WUWyX;)w^N=FltC#bfsNS)jOGrquIlqq}sKj@<}r6dl0#s z@{FJiN--HfKF_Dv!W*(MK-Bp2kBWyqre8}ON7F?g(VL~ug=xjPz1PspibLccN;p9o zlwvY|l(ZF7hmStP09q$^$q@~muUnn3+_pa0y~Wc|{hHNUyE09YQ|J++ml8ox2Bnyc zujuLTq=mP?VSw`n?X|16du*TRsEw1Q37!$GU{$zL_&W2tPbRw7_bcT&K^c@{GX8+w zKuG*iAj5!&W0cJ?UXnd2iW_OG9`9`wY#|568_`|28?-G%wss%o1wk2z!xkKE*QDzKNIl?%MCzG+AuJ!14zA_>Z%6qE6n6}t3l$p9G! z+;4FZmXpzp__?;+`%Ad?L66nZQ88|5WI8I)o&e%*8JD19`LVF2CPYYtqk zH0<2TA<|CNKMu7Tk$BHVe%ZmjI%yL!=0Qp{K^c@{GJds!I_7yAkYRwW;e_nH4dajN zSuU>^Iqk%*K0{_;9UfYxxWlp?k%uU+2+E)olkq*Q@XmZPkYND*b;@+=8S7eQy>sqQ zg}&N;nsdK>arwe6{iD?95qX#rLr?~#n2cZR{d7AF&H!P6q`EWHj7j13iVDg@FU1oT zZHN8pP0YzW`ukr=sO{)c?(l3Pog zS7~e+)Ii2OMu{gVgHlY!_b|33e|W@z0k69g-`hR@aQrdLnvLeE&jxp$;~g~gOn-QT zPY*plk5du|%Ajl@x=3t- zl1NYnrI?KG+xl!0@X9(2(0Gt0wfyFJ9oY}#{b{;-4=tlDZk3#LrsjXUPZZhOlawTa zGAPAl{7;-W>5an|y1{_F?lSXLZldkp8up@3^NyHZD37guD=2IIF8c5)ME*`mCMbha zOvaDT`5q_XH9#0Zb?&Qoo$D8*!a z2H3IhHC);O11^a(ruBUvCDnhi(zO4eF6*`s+pV@XG^IKAqdPL@Y04XdGAPAl{Fmf( zONR%LVZft993dNKRj)C`_D72Hhc9zjEq?TD-bMd0F}V;#o}s)YD1%Z=#`mxezsqNV z3YK^c@{@_&BAKI&js;y}Rn zJ2dV;SKc)|I(^+{Wn$Ny+vqc{73G(tW`|dgaNbH)TAz zN&RdmpXQo)30feznY74J z`@kaEEzogDa&;OqW?Rk-f-)#w^f|LQuWbY}3{X7c-~L9d^qkaz_x$P4Vn^hhRX}x|mz-f(xf}mVMVZ%5CeImp0iV<-I=Q zZCOk!GG=?uOoB2f#bo?jwQ)i7Adq3ew;lB)eX66O=X+|xPOker@NU(UjjxNPx^geb zpig@_aApydK`AEV=g*(?6ye$m7%(IEaE5g-VGD2X*^cj1Uql7Z^=WO6P`dPixdFY( z?8uo-PzI%oWM8@s@K^-{_C_h0%Zv8v$n(DB4lgZP**qC@t3(|Xkd!hi!tcJH)ARj( zUC%k+>w2#1eE&G-`^W2jKjZ#98(}>Mf*O)CEX8E}%sn}8kO#^r;OZ;Ayz#zWgST>M zPv2X1vA4{(8Bg61EucchOZ==v5U3?7!%|F!zg5_)3XbKwzQc!yPynZ;h2#y|mghX| zW^a?doQEEtpfZXXUDYc1yxW2>=3wyRALak`@KCMj|G#=)D~j>&&pjW7JgQjriAv~m zfRbYCh%2qu!9NX6vlc5q>uKG(+Bwrl;6p$iX};#a_!?2{v3<3NLFgmSsDj$h?6ld^ zob8*Q2Q~Un$7jcXjmCWZ<;T^wwxxl(2w}t}&P(}g!7Mgf=U$`OoDIqLT zgwa6Zpr^h2B&X?S2IgW-!qN^0O(bPlx?=yL;cF9xG77L5OS9}A*wB=$@_qNd-j6TW zyS^`A>sg~tGn%DMkgtPgk}@pCWPAn}sa#JFWfZV$ZFI|z{0}HNm7QTn2c{>vWw<&P(}exMPAk}BJzrd3ySCXsMyw>mFf957~LV& zcX6KpVazwcE0QuSU9p~x4m~M_G79M1U+NJc{_EYDPNO-sPog8c*O{&ZNA5%=g_s5t z#OUkCl>tm|6ld|unNG-M5$5wA~0fi{vdEX9{L zHifQDYc+>cC<>@OEhhMI(emMx&@rE;@Ufk*NAl{q+%0bQ9jzc<4U7h_|55&5GdSFf zuF$|gflYz`Yx{c!_Y`@Y4tZ3u#p>5+Sv(JFj2TkZ}CLKV4O$7h?~YZk*F5zO)+y z+DY@R|HZfdJ%jsi(?eG#`OwWklWTtC3Ilr?6yl6E3uKJE)0a}UgxIvImHugG5PBge_=22p^Mv~ zi!|TzUwq5o%YC+aVE)jFtY3B!Mt=u% z|0DnJ%N^gx@khK{Dunu>i~`=hi%s(M@+mp~anITymcaZniwf_mIxdSfebLVdavXR= zQii42o`xS2gQNNDpo{`$nHH{Jn`;qxRde|?Ws@YgU*?yEeOyL0#v0#0`_UkKhJU5d@AFrny$Hh+@1;J9m|bog#jrkq3Z~8BIqM2!%|GfkB~IWhee=_ z0(L$YXRUsN7%JPTaa=DfwI=T4 zw_|_U{9r#pd26)sCWGDPEtFq`G2aL8NXoDjlkrdL$-g$*3S|_avNzaZ)X3xWu=r#* zA6LVpannkz@HlaiAi0n%f}8^0layg8CTstGe}FO6sshR=z%ucxwbQ3oDjOA}SJG^@ zHjV6J?(F`T8_sy95w9Gjf+3PJEXCv_zvXetZb2xcfLXPyrCx*R^E}xHls^#8I@(pF2CdkH{TB9=Gm?!DTnBOZLo>e=+c*h<1==z~_k&ybqBIXi$eXipIJ{=DlU;A=YdZoWmt;I_>sYeuM6l-Eei0vQKI!C zNp=lp0zySo_xB1O zuJ1Wgo^v2(f9r9B^YP5}bgIW$335L8Oj3rWE7tQPC2u<@qX340AIF#lTRVGYO?S~H zzMa|?G07JEZ1#+(*AVepy#nxsqzp?j89&}$?v&|(G72#IUYTjjf9=)jZPP;M!uCJ% z*(vnp?sVaNSiT7Ht(-zIMN)>PE5=;^u@61-fdXXSYSMBZ9xu9Pv~}aA36ta{@giZF z=S6SCRMut?mUa>NN>YZUn2c}R{WWr~P(}g2EH~Ij59?k?<0-pnC+E!)fBNC*+S5|S zN5iy;PhJ#*ZzN?{ipdv#|5o)Er9Or-3OK1*$CR@B_UMj#IVD$iAGjs-_#TIjK~V|4 zsyp#*jS?_TQii3NjL+KzhV))Yh5=eZhK~M6y+0Jp4gBP5R4W@&uvqspeSEW|*o^|h z(k=xvBxP8N$@uJ<^;sDm0iporyc6eCIc`z+4E~gk)H}7>|M0-BGL231RUscI333^j zB`L$w73=v%*0-HdMghDG<$Wa%hBjb%Qbp&er|!eX44T@mp9OX-!^Ca-F_#ij)Q|Uny&su-{Yg$aGm{W~1Yyiiz;}`|EX8E}8rkhZ z8}x(?3fQ(qk3IVr$jt|E(h}@Wmt;I_?i2}!j};!qX2q_OUi3{ zPu&}LjZNHZ>=3=$`QFRZ^g^404E2c5KU9DPk}@n^A^Tn6Kwl6<0o5_%$JFWizeW|7 z%yB5u>Tb;GwJY{<6AyQAsvs=wO7M%M3`;Q?KMq^?`3qf@LIG{{GwIQ;(=j@K(O-6Z zANcvp+^cryj@Cynw3mqIRaIb-qzp?j89y=@^OIJAG730(gyW+4K-%-G!hamiVXD@i z)phuDSNpv5hOrppwV`UTL{f&On2gV!2RCGnK^X-sKA*age~~rpF2B}D;cLw+Eza!= zPFtQj)raQI5tjCIuuM{hrI?JL-Av!SiQYv=0dxCOw3WU(v6$C9OuxNA;m)r}Rg~!w zs-xx1Ya_@tz(&Sqab%~& z=pjYoOi~N3l9XX7HfH>&*YOqKD3nnETTWbQbh~OTyGWX+X}wHAtz@a>WuIee%R>tL z2xEQ$d`Zf%6qE5IgF8zP&#(;&!o zz>lO1OEDS$mE4#pjP9zS0BfBm#loleFEwR6z3tU{R3CyDn22|$>w!N> z8J1!)zE>6h3C=HpJHVtr9`54J)Gq<24 zeH38*^P=GO<_;5&Jwpz>@!jluwQnW;@CVoL*t!wl*KGnpBxP8N$@sSIT6_B?lu^L> zi$$yY73{e;^ev2@A8tx5)_tz^?w;2f52-fd`vc7&n4}C#G5J5+_G7k5D5C(y`*x$s zJU&~NZ|R>@tnG`kzV@<+-`F$ka?G_f!nXYqgpibBDJJ9FcDd<}?@+!21DH%`T@G)I zE;gyXf22s~`2LP-PA4kpq>eOqIfoGBS0I$63`;Q?KN=EbRYPwspa2uzYjnX>*(%-H z^}DFk4+I~RtZtMAC+rn;1Z@d&3kV}A!%|Gfw{4rI@91p}6!6lQcXGYd_=m$9@t>!` zC5qPC$<-oHsJ2y=G+mQii1~)^mspZ84NlK#xsHvzJwZrC+96-i$Cqd#W+tW2Jk_ z8pb67MENziPEv-Yn2evfpPg(-fiemZ-86Jarm=h~O2#?7o`Jubad*P@lcvucbVfur z5!Q1%h#)D$QcV8Ovj>*9(S3Lnpy(z$`gPpcxLCnleN`TJ;ghwQ38f2B+0mXkTM2Rp zxIt2erI?J*+j=K5n4l~I18#L$UaHc}d@iS*W}DE{^W?mx(|OLR^=BLuZxXk#PH>Z? z3`;Q?pGn^BZODc)3JA=4IaSy#kj$y-=$88_UP?r+?^h$0Q;>{@0#WV)ktAhUipltO z&!sJP=r{}oFxDRZw)0HEPL{gdj8+C$aXr5Q(IjP9iplsdhPmfeO(>&) zRSz5eV}seRJD%^@;hZ@F=EX8De+s-};(3nv`Rd!&}hDe@g zVSbl(-FMACnrxB%IJMSZe%36{gD~b^aEqi2OIIxIEtApDp^O5SviC5uwid<*dG>#L zK0*1!bl_lffwp$fK$;Hm7o!i{CMm;GOvay2)CQT}P(}gnhqF?)uDL53?Rou9M`pZ% zykV#?aY>sn&3ilT(-QrE@^jVm> zG8eO&AP<6gk}@pC#*EKlwH~GD*;^Fg@OpMy@TgC?cjPU7g$tRRnhZAWkZ+uGqv_Np z?p5zV0!bN`VzSQfr9J1P+X-b9z#JGJWuLmT>g%EN>;qLm-i) z3`;Q?pGmeHNi>8q3Yh=)K9KwH3@aUVyWVu*lK=4?PdxYQ>9pAKKU_{HOvd*r-=p6SK^X|u4d9F$Rj{_x^Z zYLND{$13SCm92bz4mZan{mS$gyA>^nPj!rfRFX0*T`}fUqWL~hMgiCO)k}HoogVLT zA7S9Dnois@`zEnMC%Tnp-tq%s%wr&pqzp?j8NW6ZASC_@$|yj)r%TwR^YiJg`@6O&`&5b&n2+EANg0-6GX8#_OP~b0vW^1MIwi)(=1aaxpQujil4>eK|tASuJr74pWez6L0xfXMxYfk)KYIkwz-qWVRi|Mh=GAzYp{0LBTa6h_Vg#yMePo_Cps|xIs%DfX~#s%CzZ#>1FFDsQLl~4To z=OlPYQii3Nj34#Z(0a~783lMVSGs>3SEPJfWXoDDX+b@`??yvp-G;eerJKSDTi9oi zMN)>Pn2gUP47D|~P(}ey;);Zi8`4MK7Ian-5g4ER5L=c$=G(nycBJScLH+`=Ny@Mk zlksy5=G~i3po{`&TUrkl%Z?x6=D)_UM<#tujbKGHT?4E0CN`M_f;;u5fHTN?@-NLH-JINy@Mklkw{$E#u{&{TTaNPL=n8ayH?!%|HC&t9d9eRmWEDA>|3 zQ$OYHI;FH~uJ7ryt^U(dy4zPL{qkXZkV;tEGa#R&3`;Q?->cNqS(Sx`VyhNYN{9~pFc3b#TT1*q|aH7h3vU*s#> zo1^1yCz~rRoO!Z3R6TfPIG!NSfkKipEX8E}_c^pURvyYIpsO`LzrL;~bzN$w>Wg~1 zW{Y{B1(sK8d?z_?suSezpopXlOEDQg0?bs?FM~1)5S=(}JixTmbJx>Z_A}c8cE~G_ zJ#9UGgT^G0)|(*z0L3I_Sc=K`uOyHbMPo(*B_dZS-Xn)i;X*jj5Fg4ExJER^N-nxL zQX)!=ApZmmv948f*6v z-?Ezrr6groipls)($AEX3}qBBEnRR!DdN`o)4wj(+x2$qYW!5!YZA@O-zHiYLm2Y{ zC?hGuQcT8wG1h)d`UPbaaH?yM_5Ib>+~aLIx_7A+Bl#-73udj>cJF>s=SPr#fyX3e zSc=K`ww>H(XAWf)z~%Hoa-m6hZ#gf8pG8n1X>(@vuYDJ;6vs5EjT7WW@PwocOEDRr zNzV8&4x^GHacuvzT1<381J=ilK~jdLn2cW=O4^=*o{2*N%|G)a zlPc-Q?Q5<{ux32YG;%Q75-P^f>ig_7@p_mqPbEnimSQr#S5Y_fpMWw7*tvmm)P}lm zuFj)pe)gfL5DR~Q$7jm^ywkOd+=Qj=$5Ta8hNUZ(_On%8?NCMmpFRaB*cdu66|zmy z3>>zRm5^l6=lb#`LE>o-@sBR~^Hh_RVJRl#`*VHmOaPQoz_tLE-NjQ*0;k!!)|m;^ zMTC1-dcKI#OnmQcc#tsW0G{V0Wmt;I_|edp?5`3~Mgbej_MOSN(DjzN!-V~a;ow&l zPq||u#et`WShf+*F#>sNNXoDjli@YM5K4f9hTr%h&+6!@2?JWz@L*N`TPJp~2nnHzZ6rH%Mal^3OTmkHhFXB6w6?PB`5 zP0TlxP(uBJ8j0Bjb zjwB32SHQ**n~NcgeD=k&Z70_jxW+%!KX`}T`@*Gon==wGl$lreji(Y|+Io^O3|#@+ z!N0}{VdTf{_`|D8tL8bI;AQ?390_~JKh01&GMEM8G*UkjV7dm9FbrJ*JGm)rgfQ|y z9FKoPR~RI$9iEYAaPDb@(|Gn&d_4ce-E}SzJkWzAVbFp%?winoWKtM_b{$u7~?+X84TWc3r_-_)DX6B|+yLO4nOUufL zN}I|`iOQRsn~6%FlQcKmc~;uu+|F}TRDbVJIbEVB;j%^pKYF38mE9!aBqyEyzOkD< zn>pwC_!QHqf@rv}d9I2hC zU1+Y*xmvs0p2HTiBz$d^a%FB- zYBP4j+vQVC1ep3MgsJ}Dp(Z%nBoRb)<%zg8f@Zi!WGQDT$ODff9IGUcg&25B&X(1= z#0x!oR4{+6z*@fjiU0wkdA1UwAwaT{lH!>3RQx=WrVYf~DJB#I=5|bT?%MY4#?|UQ mcHSBH=^qU?d7S9jQ5lo_@eo0xEng|o5+oHU$z#+r@&5x1qw(MX literal 0 HcmV?d00001 diff --git a/rust/badmessage/package.json b/rust/badmessage/package.json new file mode 100644 index 00000000..e8d2ad3f --- /dev/null +++ b/rust/badmessage/package.json @@ -0,0 +1,13 @@ +{ + "name": "badmessage", + "version": "1.0.0", + "main": "wasm-text.js", + "license": "MIT", + "scripts": { + "wasm": "0x -D prof automerge-wasm.js", + "js": "0x -D prof automerge-js.js" + }, + "devDependencies": { + "0x": "^5.4.1" + } +} diff --git a/rust/badmessage/src/main.rs b/rust/badmessage/src/main.rs new file mode 100644 index 00000000..a67005c7 --- /dev/null +++ b/rust/badmessage/src/main.rs @@ -0,0 +1,25 @@ +use automerge::sync; +use automerge::{Automerge, AutomergeError}; +use std::fs; +use std::time::Instant; + +fn main() -> Result<(), AutomergeError> { + let contents = fs::read("badmessage").expect("cant read badmessage file"); + let mut doc = Automerge::new(); + let mut state = sync::State::new(); + let now = Instant::now(); + // decode and receive happen at the same time in wasm so lets keep it apples to apples + let message = sync::Message::decode(contents.as_slice()).expect("cant decode message"); + doc.receive_sync_message(&mut state, message).unwrap(); + println!("decode/receive in {} ms", now.elapsed().as_millis()); + let now = Instant::now(); + let saved = doc.save(); + println!("save in {} ms", now.elapsed().as_millis()); + let now = Instant::now(); + let _ = Automerge::load(&saved).unwrap(); + println!("load in {} ms", now.elapsed().as_millis()); + let mut doc2 = Automerge::new(); + doc2.load_incremental(saved.as_slice()).unwrap(); + println!("load_incremental in {} ms", now.elapsed().as_millis()); + Ok(()) +} From 1222fc0df130a9883e3a967ba57b2df05d94b7ff Mon Sep 17 00:00:00 2001 From: Orion Henry Date: Sat, 10 Dec 2022 02:36:05 -0800 Subject: [PATCH 03/72] rewrite opnode to store usize instead of Op (#471) --- rust/automerge/src/op_tree.rs | 577 +++--------------- rust/automerge/src/op_tree/iter.rs | 16 +- rust/automerge/src/op_tree/node.rs | 480 +++++++++++++++ rust/automerge/src/query.rs | 9 +- rust/automerge/src/query/elem_id_pos.rs | 4 +- rust/automerge/src/query/insert.rs | 4 +- rust/automerge/src/query/keys.rs | 14 +- rust/automerge/src/query/keys_at.rs | 14 +- rust/automerge/src/query/len.rs | 4 +- rust/automerge/src/query/list_range.rs | 12 +- rust/automerge/src/query/list_range_at.rs | 12 +- rust/automerge/src/query/list_vals.rs | 4 +- rust/automerge/src/query/map_range.rs | 14 +- rust/automerge/src/query/map_range_at.rs | 14 +- rust/automerge/src/query/nth.rs | 4 +- rust/automerge/src/query/opid.rs | 2 +- rust/automerge/src/query/opid_vis.rs | 2 +- rust/automerge/src/query/prop.rs | 3 +- rust/automerge/src/query/prop_at.rs | 5 +- rust/automerge/src/query/seek_op.rs | 11 +- .../automerge/src/query/seek_op_with_patch.rs | 7 +- rust/automerge/src/visualisation.rs | 26 +- rust/edit-trace/automerge-js.js | 12 +- rust/edit-trace/automerge-wasm.js | 10 +- rust/edit-trace/package.json | 4 +- rust/edit-trace/src/main.rs | 16 +- 26 files changed, 682 insertions(+), 598 deletions(-) create mode 100644 rust/automerge/src/op_tree/node.rs diff --git a/rust/automerge/src/op_tree.rs b/rust/automerge/src/op_tree.rs index fae229e2..909a75a7 100644 --- a/rust/automerge/src/op_tree.rs +++ b/rust/automerge/src/op_tree.rs @@ -1,14 +1,9 @@ -use std::{ - cmp::{min, Ordering}, - fmt::Debug, - mem, - ops::RangeBounds, -}; +use std::{fmt::Debug, mem, ops::RangeBounds}; pub(crate) use crate::op_set::OpSetMetadata; use crate::{ clock::Clock, - query::{self, ChangeVisibility, Index, QueryResult, TreeQuery}, + query::{self, ChangeVisibility, QueryResult, TreeQuery}, }; use crate::{ types::{ObjId, Op, OpId}, @@ -16,10 +11,12 @@ use crate::{ }; use std::collections::HashSet; -pub(crate) const B: usize = 16; - mod iter; +mod node; + pub(crate) use iter::OpTreeIter; +#[allow(unused)] +pub(crate) use node::{OpTreeNode, B}; #[derive(Debug, Clone, PartialEq)] pub(crate) struct OpTree { @@ -56,20 +53,16 @@ impl OpTree { #[derive(Clone, Debug)] pub(crate) struct OpTreeInternal { pub(crate) root_node: Option, -} - -#[derive(Clone, Debug)] -pub(crate) struct OpTreeNode { - pub(crate) children: Vec, - pub(crate) elements: Vec, - pub(crate) index: Index, - length: usize, + pub(crate) ops: Vec, } impl OpTreeInternal { /// Construct a new, empty, sequence. pub(crate) fn new() -> Self { - Self { root_node: None } + Self { + root_node: None, + ops: vec![], + } } /// Get the length of the sequence. @@ -78,13 +71,19 @@ impl OpTreeInternal { } pub(crate) fn keys(&self) -> Option> { - self.root_node.as_ref().map(query::Keys::new) + if self.root_node.is_some() { + Some(query::Keys::new(self)) + } else { + None + } } pub(crate) fn keys_at(&self, clock: Clock) -> Option> { - self.root_node - .as_ref() - .map(|root| query::KeysAt::new(root, clock)) + if self.root_node.is_some() { + Some(query::KeysAt::new(self, clock)) + } else { + None + } } pub(crate) fn map_range<'a, R: RangeBounds>( @@ -92,9 +91,11 @@ impl OpTreeInternal { range: R, meta: &'a OpSetMetadata, ) -> Option> { - self.root_node - .as_ref() - .map(|node| query::MapRange::new(range, node, meta)) + if self.root_node.is_some() { + Some(query::MapRange::new(range, self, meta)) + } else { + None + } } pub(crate) fn map_range_at<'a, R: RangeBounds>( @@ -103,18 +104,22 @@ impl OpTreeInternal { meta: &'a OpSetMetadata, clock: Clock, ) -> Option> { - self.root_node - .as_ref() - .map(|node| query::MapRangeAt::new(range, node, meta, clock)) + if self.root_node.is_some() { + Some(query::MapRangeAt::new(range, self, meta, clock)) + } else { + None + } } pub(crate) fn list_range>( &self, range: R, ) -> Option> { - self.root_node - .as_ref() - .map(|node| query::ListRange::new(range, node)) + if self.root_node.is_some() { + Some(query::ListRange::new(range, self)) + } else { + None + } } pub(crate) fn list_range_at>( @@ -122,22 +127,24 @@ impl OpTreeInternal { range: R, clock: Clock, ) -> Option> { - self.root_node - .as_ref() - .map(|node| query::ListRangeAt::new(range, clock, node)) + if self.root_node.is_some() { + Some(query::ListRangeAt::new(range, clock, self)) + } else { + None + } } pub(crate) fn search<'a, 'b: 'a, Q>(&'b self, mut query: Q, m: &OpSetMetadata) -> Q where Q: TreeQuery<'a>, { - self.root_node - .as_ref() - .map(|root| match query.query_node_with_metadata(root, m) { - QueryResult::Descend => root.search(&mut query, m, None), - QueryResult::Skip(skip) => root.search(&mut query, m, Some(skip)), + self.root_node.as_ref().map(|root| { + match query.query_node_with_metadata(root, m, &self.ops) { + QueryResult::Descend => root.search(&mut query, m, &self.ops, None), + QueryResult::Skip(skip) => root.search(&mut query, m, &self.ops, Some(skip)), _ => true, - }); + } + }); query } @@ -151,7 +158,7 @@ impl OpTreeInternal { /// # Panics /// /// Panics if `index > len`. - pub(crate) fn insert(&mut self, index: usize, element: Op) { + pub(crate) fn insert(&mut self, index: usize, op: Op) { assert!( index <= self.len(), "tried to insert at {} but len is {}", @@ -159,6 +166,9 @@ impl OpTreeInternal { self.len() ); + let element = self.ops.len(); + self.ops.push(op); + let old_len = self.len(); if let Some(root) = self.root_node.as_mut() { #[cfg(debug_assertions)] @@ -174,7 +184,7 @@ impl OpTreeInternal { root.length += old_root.len(); root.index = old_root.index.clone(); root.children.push(old_root); - root.split_child(0); + root.split_child(0, &self.ops); assert_eq!(original_len, root.len()); @@ -187,14 +197,14 @@ impl OpTreeInternal { (&mut root.children[0], index) }; root.length += 1; - root.index.insert(&element); - child.insert_into_non_full_node(insertion_index, element) + root.index.insert(&self.ops[element]); + child.insert_into_non_full_node(insertion_index, element, &self.ops) } else { - root.insert_into_non_full_node(index, element) + root.insert_into_non_full_node(index, element, &self.ops) } } else { let mut root = OpTreeNode::new(); - root.insert_into_non_full_node(index, element); + root.insert_into_non_full_node(index, element, &self.ops); self.root_node = Some(root) } assert_eq!(self.len(), old_len + 1, "{:#?}", self); @@ -202,16 +212,28 @@ impl OpTreeInternal { /// Get the `element` at `index` in the sequence. pub(crate) fn get(&self, index: usize) -> Option<&Op> { - self.root_node.as_ref().and_then(|n| n.get(index)) + self.root_node + .as_ref() + .and_then(|n| n.get(index)) + .map(|n| &self.ops[n]) } // this replaces get_mut() because it allows the indexes to update correctly pub(crate) fn update(&mut self, index: usize, f: F) where - F: FnMut(&mut Op), + F: FnOnce(&mut Op), { if self.len() > index { - self.root_node.as_mut().unwrap().update(index, f); + let n = self.root_node.as_ref().unwrap().get(index).unwrap(); + let new_element = self.ops.get_mut(n).unwrap(); + let old_vis = new_element.visible(); + f(new_element); + let vis = ChangeVisibility { + old_vis, + new_vis: new_element.visible(), + op: new_element, + }; + self.root_node.as_mut().unwrap().update(index, vis); } } @@ -224,7 +246,7 @@ impl OpTreeInternal { if let Some(root) = self.root_node.as_mut() { #[cfg(debug_assertions)] let len = root.check(); - let old = root.remove(index); + let old = root.remove(index, &self.ops); if root.elements.is_empty() { if root.is_leaf() { @@ -236,466 +258,13 @@ impl OpTreeInternal { #[cfg(debug_assertions)] debug_assert_eq!(len, self.root_node.as_ref().map_or(0, |r| r.check()) + 1); - old + self.ops[old].clone() } else { panic!("remove from empty tree") } } } -impl OpTreeNode { - fn new() -> Self { - Self { - elements: Vec::new(), - children: Vec::new(), - index: Default::default(), - length: 0, - } - } - - pub(crate) fn search<'a, 'b: 'a, Q>( - &'b self, - query: &mut Q, - m: &OpSetMetadata, - skip: Option, - ) -> bool - where - Q: TreeQuery<'a>, - { - if self.is_leaf() { - let skip = skip.unwrap_or(0); - for e in self.elements.iter().skip(skip) { - if query.query_element_with_metadata(e, m) == QueryResult::Finish { - return true; - } - } - false - } else { - let mut skip = skip.unwrap_or(0); - for (child_index, child) in self.children.iter().enumerate() { - match skip.cmp(&child.len()) { - Ordering::Greater => { - // not in this child at all - // take off the number of elements in the child as well as the next element - skip -= child.len() + 1; - } - Ordering::Equal => { - // just try the element - skip -= child.len(); - if let Some(e) = self.elements.get(child_index) { - if query.query_element_with_metadata(e, m) == QueryResult::Finish { - return true; - } - } - } - Ordering::Less => { - // descend and try find it - match query.query_node_with_metadata(child, m) { - QueryResult::Descend => { - // search in the child node, passing in the number of items left to - // skip - if child.search(query, m, Some(skip)) { - return true; - } - } - QueryResult::Finish => return true, - QueryResult::Next => (), - QueryResult::Skip(_) => panic!("had skip from non-root node"), - } - if let Some(e) = self.elements.get(child_index) { - if query.query_element_with_metadata(e, m) == QueryResult::Finish { - return true; - } - } - // reset the skip to zero so we continue iterating normally - skip = 0; - } - } - } - false - } - } - - pub(crate) fn len(&self) -> usize { - self.length - } - - fn reindex(&mut self) { - let mut index = Index::new(); - for c in &self.children { - index.merge(&c.index); - } - for e in &self.elements { - index.insert(e); - } - self.index = index - } - - fn is_leaf(&self) -> bool { - self.children.is_empty() - } - - fn is_full(&self) -> bool { - self.elements.len() >= 2 * B - 1 - } - - /// Returns the child index and the given index adjusted for the cumulative index before that - /// child. - fn find_child_index(&self, index: usize) -> (usize, usize) { - let mut cumulative_len = 0; - for (child_index, child) in self.children.iter().enumerate() { - if cumulative_len + child.len() >= index { - return (child_index, index - cumulative_len); - } else { - cumulative_len += child.len() + 1; - } - } - panic!("index {} not found in node with len {}", index, self.len()) - } - - fn insert_into_non_full_node(&mut self, index: usize, element: Op) { - assert!(!self.is_full()); - - self.index.insert(&element); - - if self.is_leaf() { - self.length += 1; - self.elements.insert(index, element); - } else { - let (child_index, sub_index) = self.find_child_index(index); - let child = &mut self.children[child_index]; - - if child.is_full() { - self.split_child(child_index); - - // child structure has changed so we need to find the index again - let (child_index, sub_index) = self.find_child_index(index); - let child = &mut self.children[child_index]; - child.insert_into_non_full_node(sub_index, element); - } else { - child.insert_into_non_full_node(sub_index, element); - } - self.length += 1; - } - } - - // A utility function to split the child `full_child_index` of this node - // Note that `full_child_index` must be full when this function is called. - fn split_child(&mut self, full_child_index: usize) { - let original_len_self = self.len(); - - let full_child = &mut self.children[full_child_index]; - - // Create a new node which is going to store (B-1) keys - // of the full child. - let mut successor_sibling = OpTreeNode::new(); - - let original_len = full_child.len(); - assert!(full_child.is_full()); - - successor_sibling.elements = full_child.elements.split_off(B); - - if !full_child.is_leaf() { - successor_sibling.children = full_child.children.split_off(B); - } - - let middle = full_child.elements.pop().unwrap(); - - full_child.length = - full_child.elements.len() + full_child.children.iter().map(|c| c.len()).sum::(); - - successor_sibling.length = successor_sibling.elements.len() - + successor_sibling - .children - .iter() - .map(|c| c.len()) - .sum::(); - - let z_len = successor_sibling.len(); - - let full_child_len = full_child.len(); - - full_child.reindex(); - successor_sibling.reindex(); - - self.children - .insert(full_child_index + 1, successor_sibling); - - self.elements.insert(full_child_index, middle); - - assert_eq!(full_child_len + z_len + 1, original_len, "{:#?}", self); - - assert_eq!(original_len_self, self.len()); - } - - fn remove_from_leaf(&mut self, index: usize) -> Op { - self.length -= 1; - self.elements.remove(index) - } - - fn remove_element_from_non_leaf(&mut self, index: usize, element_index: usize) -> Op { - self.length -= 1; - if self.children[element_index].elements.len() >= B { - let total_index = self.cumulative_index(element_index); - // recursively delete index - 1 in predecessor_node - let predecessor = self.children[element_index].remove(index - 1 - total_index); - // replace element with that one - mem::replace(&mut self.elements[element_index], predecessor) - } else if self.children[element_index + 1].elements.len() >= B { - // recursively delete index + 1 in successor_node - let total_index = self.cumulative_index(element_index + 1); - let successor = self.children[element_index + 1].remove(index + 1 - total_index); - // replace element with that one - mem::replace(&mut self.elements[element_index], successor) - } else { - let middle_element = self.elements.remove(element_index); - let successor_child = self.children.remove(element_index + 1); - self.children[element_index].merge(middle_element, successor_child); - - let total_index = self.cumulative_index(element_index); - self.children[element_index].remove(index - total_index) - } - } - - fn cumulative_index(&self, child_index: usize) -> usize { - self.children[0..child_index] - .iter() - .map(|c| c.len() + 1) - .sum() - } - - fn remove_from_internal_child(&mut self, index: usize, mut child_index: usize) -> Op { - if self.children[child_index].elements.len() < B - && if child_index > 0 { - self.children[child_index - 1].elements.len() < B - } else { - true - } - && if child_index + 1 < self.children.len() { - self.children[child_index + 1].elements.len() < B - } else { - true - } - { - // if the child and its immediate siblings have B-1 elements merge the child - // with one sibling, moving an element from this node into the new merged node - // to be the median - - if child_index > 0 { - let middle = self.elements.remove(child_index - 1); - - // use the predessor sibling - let successor = self.children.remove(child_index); - child_index -= 1; - - self.children[child_index].merge(middle, successor); - } else { - let middle = self.elements.remove(child_index); - - // use the sucessor sibling - let successor = self.children.remove(child_index + 1); - - self.children[child_index].merge(middle, successor); - } - } else if self.children[child_index].elements.len() < B { - if child_index > 0 - && self - .children - .get(child_index - 1) - .map_or(false, |c| c.elements.len() >= B) - { - let last_element = self.children[child_index - 1].elements.pop().unwrap(); - assert!(!self.children[child_index - 1].elements.is_empty()); - self.children[child_index - 1].length -= 1; - self.children[child_index - 1].index.remove(&last_element); - - let parent_element = - mem::replace(&mut self.elements[child_index - 1], last_element); - - self.children[child_index].index.insert(&parent_element); - self.children[child_index] - .elements - .insert(0, parent_element); - self.children[child_index].length += 1; - - if let Some(last_child) = self.children[child_index - 1].children.pop() { - self.children[child_index - 1].length -= last_child.len(); - self.children[child_index - 1].reindex(); - self.children[child_index].length += last_child.len(); - self.children[child_index].children.insert(0, last_child); - self.children[child_index].reindex(); - } - } else if self - .children - .get(child_index + 1) - .map_or(false, |c| c.elements.len() >= B) - { - let first_element = self.children[child_index + 1].elements.remove(0); - self.children[child_index + 1].index.remove(&first_element); - self.children[child_index + 1].length -= 1; - - assert!(!self.children[child_index + 1].elements.is_empty()); - - let parent_element = mem::replace(&mut self.elements[child_index], first_element); - - self.children[child_index].length += 1; - self.children[child_index].index.insert(&parent_element); - self.children[child_index].elements.push(parent_element); - - if !self.children[child_index + 1].is_leaf() { - let first_child = self.children[child_index + 1].children.remove(0); - self.children[child_index + 1].length -= first_child.len(); - self.children[child_index + 1].reindex(); - self.children[child_index].length += first_child.len(); - - self.children[child_index].children.push(first_child); - self.children[child_index].reindex(); - } - } - } - self.length -= 1; - let total_index = self.cumulative_index(child_index); - self.children[child_index].remove(index - total_index) - } - - fn check(&self) -> usize { - let l = self.elements.len() + self.children.iter().map(|c| c.check()).sum::(); - assert_eq!(self.len(), l, "{:#?}", self); - - l - } - - pub(crate) fn remove(&mut self, index: usize) -> Op { - let original_len = self.len(); - if self.is_leaf() { - let v = self.remove_from_leaf(index); - self.index.remove(&v); - assert_eq!(original_len, self.len() + 1); - debug_assert_eq!(self.check(), self.len()); - v - } else { - let mut total_index = 0; - for (child_index, child) in self.children.iter().enumerate() { - match (total_index + child.len()).cmp(&index) { - Ordering::Less => { - // should be later on in the loop - total_index += child.len() + 1; - continue; - } - Ordering::Equal => { - let v = self.remove_element_from_non_leaf( - index, - min(child_index, self.elements.len() - 1), - ); - self.index.remove(&v); - assert_eq!(original_len, self.len() + 1); - debug_assert_eq!(self.check(), self.len()); - return v; - } - Ordering::Greater => { - let v = self.remove_from_internal_child(index, child_index); - self.index.remove(&v); - assert_eq!(original_len, self.len() + 1); - debug_assert_eq!(self.check(), self.len()); - return v; - } - } - } - panic!( - "index not found to remove {} {} {} {}", - index, - total_index, - self.len(), - self.check() - ); - } - } - - fn merge(&mut self, middle: Op, successor_sibling: OpTreeNode) { - self.index.insert(&middle); - self.index.merge(&successor_sibling.index); - self.elements.push(middle); - self.elements.extend(successor_sibling.elements); - self.children.extend(successor_sibling.children); - self.length += successor_sibling.length + 1; - assert!(self.is_full()); - } - - /// Update the operation at the given index using the provided function. - /// - /// This handles updating the indices after the update. - pub(crate) fn update(&mut self, index: usize, f: F) -> ChangeVisibility<'_> - where - F: FnOnce(&mut Op), - { - if self.is_leaf() { - let new_element = self.elements.get_mut(index).unwrap(); - let old_vis = new_element.visible(); - f(new_element); - self.index.change_vis(ChangeVisibility { - old_vis, - new_vis: new_element.visible(), - op: new_element, - }) - } else { - let mut cumulative_len = 0; - let len = self.len(); - for (child_index, child) in self.children.iter_mut().enumerate() { - match (cumulative_len + child.len()).cmp(&index) { - Ordering::Less => { - cumulative_len += child.len() + 1; - } - Ordering::Equal => { - let new_element = self.elements.get_mut(child_index).unwrap(); - let old_vis = new_element.visible(); - f(new_element); - return self.index.change_vis(ChangeVisibility { - old_vis, - new_vis: new_element.visible(), - op: new_element, - }); - } - Ordering::Greater => { - let vis_args = child.update(index - cumulative_len, f); - return self.index.change_vis(vis_args); - } - } - } - panic!("Invalid index to set: {} but len was {}", index, len) - } - } - - pub(crate) fn last(&self) -> &Op { - if self.is_leaf() { - // node is never empty so this is safe - self.elements.last().unwrap() - } else { - // if not a leaf then there is always at least one child - self.children.last().unwrap().last() - } - } - - pub(crate) fn get(&self, index: usize) -> Option<&Op> { - if self.is_leaf() { - return self.elements.get(index); - } else { - let mut cumulative_len = 0; - for (child_index, child) in self.children.iter().enumerate() { - match (cumulative_len + child.len()).cmp(&index) { - Ordering::Less => { - cumulative_len += child.len() + 1; - } - Ordering::Equal => return self.elements.get(child_index), - Ordering::Greater => { - return child.get(index - cumulative_len); - } - } - } - } - None - } -} - impl Default for OpTreeInternal { fn default() -> Self { Self::new() diff --git a/rust/automerge/src/op_tree/iter.rs b/rust/automerge/src/op_tree/iter.rs index 8d070f11..5f2114c8 100644 --- a/rust/automerge/src/op_tree/iter.rs +++ b/rust/automerge/src/op_tree/iter.rs @@ -21,6 +21,7 @@ impl<'a> OpTreeIter<'a> { }, cumulative_index: 0, root_node: root, + ops: &tree.ops, }) .unwrap_or(Inner::Empty), ) @@ -50,6 +51,7 @@ enum Inner<'a> { // How far through the whole optree we are cumulative_index: usize, root_node: &'a OpTreeNode, + ops: &'a [Op], }, } @@ -75,6 +77,7 @@ impl<'a> Iterator for Inner<'a> { Inner::Empty => None, Inner::NonEmpty { ancestors, + ops, current, cumulative_index, .. @@ -83,10 +86,10 @@ impl<'a> Iterator for Inner<'a> { // If we're in a leaf node and we haven't exhausted it yet we just return the elements // of the leaf node if current.index < current.node.len() { - let result = ¤t.node.elements[current.index]; + let result = current.node.elements[current.index]; current.index += 1; *cumulative_index += 1; - Some(result) + Some(&ops[result]) } else { // We've exhausted the leaf node, we must find the nearest non-exhausted parent (lol) let node_iter = loop { @@ -113,10 +116,10 @@ impl<'a> Iterator for Inner<'a> { // return the element from the parent node which is one after the index at which we // descended into the child *current = node_iter; - let result = ¤t.node.elements[current.index]; + let result = current.node.elements[current.index]; current.index += 1; *cumulative_index += 1; - Some(result) + Some(&ops[result]) } } else { // If we're in a non-leaf node then the last iteration returned an element from the @@ -147,6 +150,7 @@ impl<'a> Iterator for Inner<'a> { Self::Empty => None, Self::NonEmpty { root_node, + ops, cumulative_index, current, ancestors, @@ -177,7 +181,7 @@ impl<'a> Iterator for Inner<'a> { Ordering::Equal => { *cumulative_index += child.len() + 1; current.index = child_index + 1; - return Some(¤t.node.elements[child_index]); + return Some(&ops[current.node.elements[child_index]]); } Ordering::Greater => { current.index = child_index; @@ -197,7 +201,7 @@ impl<'a> Iterator for Inner<'a> { // we're in a leaf node and we kept track of the cumulative index as we went, let index_in_this_node = n.saturating_sub(*cumulative_index); current.index = index_in_this_node + 1; - Some(¤t.node.elements[index_in_this_node]) + Some(&ops[current.node.elements[index_in_this_node]]) } } } diff --git a/rust/automerge/src/op_tree/node.rs b/rust/automerge/src/op_tree/node.rs new file mode 100644 index 00000000..ea7fbf48 --- /dev/null +++ b/rust/automerge/src/op_tree/node.rs @@ -0,0 +1,480 @@ +use std::{ + cmp::{min, Ordering}, + fmt::Debug, + mem, +}; + +pub(crate) use crate::op_set::OpSetMetadata; +use crate::query::{ChangeVisibility, Index, QueryResult, TreeQuery}; +use crate::types::Op; +pub(crate) const B: usize = 16; + +#[derive(Clone, Debug)] +pub(crate) struct OpTreeNode { + pub(crate) children: Vec, + pub(crate) elements: Vec, + pub(crate) index: Index, + pub(crate) length: usize, +} + +impl OpTreeNode { + pub(crate) fn new() -> Self { + Self { + elements: Vec::new(), + children: Vec::new(), + index: Default::default(), + length: 0, + } + } + + pub(crate) fn search<'a, 'b: 'a, Q>( + &'b self, + query: &mut Q, + m: &OpSetMetadata, + ops: &'a [Op], + skip: Option, + ) -> bool + where + Q: TreeQuery<'a>, + { + if self.is_leaf() { + let skip = skip.unwrap_or(0); + for e in self.elements.iter().skip(skip) { + if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish { + return true; + } + } + false + } else { + let mut skip = skip.unwrap_or(0); + for (child_index, child) in self.children.iter().enumerate() { + match skip.cmp(&child.len()) { + Ordering::Greater => { + // not in this child at all + // take off the number of elements in the child as well as the next element + skip -= child.len() + 1; + } + Ordering::Equal => { + // just try the element + skip -= child.len(); + if let Some(e) = self.elements.get(child_index) { + if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish + { + return true; + } + } + } + Ordering::Less => { + // descend and try find it + match query.query_node_with_metadata(child, m, ops) { + QueryResult::Descend => { + // search in the child node, passing in the number of items left to + // skip + if child.search(query, m, ops, Some(skip)) { + return true; + } + } + QueryResult::Finish => return true, + QueryResult::Next => (), + QueryResult::Skip(_) => panic!("had skip from non-root node"), + } + if let Some(e) = self.elements.get(child_index) { + if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish + { + return true; + } + } + // reset the skip to zero so we continue iterating normally + skip = 0; + } + } + } + false + } + } + + pub(crate) fn len(&self) -> usize { + self.length + } + + fn reindex(&mut self, ops: &[Op]) { + let mut index = Index::new(); + for c in &self.children { + index.merge(&c.index); + } + for i in &self.elements { + index.insert(&ops[*i]); + } + self.index = index + } + + pub(crate) fn is_leaf(&self) -> bool { + self.children.is_empty() + } + + pub(crate) fn is_full(&self) -> bool { + self.elements.len() >= 2 * B - 1 + } + + /// Returns the child index and the given index adjusted for the cumulative index before that + /// child. + fn find_child_index(&self, index: usize) -> (usize, usize) { + let mut cumulative_len = 0; + for (child_index, child) in self.children.iter().enumerate() { + if cumulative_len + child.len() >= index { + return (child_index, index - cumulative_len); + } else { + cumulative_len += child.len() + 1; + } + } + panic!("index {} not found in node with len {}", index, self.len()) + } + + pub(crate) fn insert_into_non_full_node(&mut self, index: usize, element: usize, ops: &[Op]) { + assert!(!self.is_full()); + + self.index.insert(&ops[element]); + + if self.is_leaf() { + self.length += 1; + self.elements.insert(index, element); + } else { + let (child_index, sub_index) = self.find_child_index(index); + let child = &mut self.children[child_index]; + + if child.is_full() { + self.split_child(child_index, ops); + + // child structure has changed so we need to find the index again + let (child_index, sub_index) = self.find_child_index(index); + let child = &mut self.children[child_index]; + child.insert_into_non_full_node(sub_index, element, ops); + } else { + child.insert_into_non_full_node(sub_index, element, ops); + } + self.length += 1; + } + } + + // A utility function to split the child `full_child_index` of this node + // Note that `full_child_index` must be full when this function is called. + pub(crate) fn split_child(&mut self, full_child_index: usize, ops: &[Op]) { + let original_len_self = self.len(); + + let full_child = &mut self.children[full_child_index]; + + // Create a new node which is going to store (B-1) keys + // of the full child. + let mut successor_sibling = OpTreeNode::new(); + + let original_len = full_child.len(); + assert!(full_child.is_full()); + + successor_sibling.elements = full_child.elements.split_off(B); + + if !full_child.is_leaf() { + successor_sibling.children = full_child.children.split_off(B); + } + + let middle = full_child.elements.pop().unwrap(); + + full_child.length = + full_child.elements.len() + full_child.children.iter().map(|c| c.len()).sum::(); + + successor_sibling.length = successor_sibling.elements.len() + + successor_sibling + .children + .iter() + .map(|c| c.len()) + .sum::(); + + let z_len = successor_sibling.len(); + + let full_child_len = full_child.len(); + + full_child.reindex(ops); + successor_sibling.reindex(ops); + + self.children + .insert(full_child_index + 1, successor_sibling); + + self.elements.insert(full_child_index, middle); + + assert_eq!(full_child_len + z_len + 1, original_len, "{:#?}", self); + + assert_eq!(original_len_self, self.len()); + } + + fn remove_from_leaf(&mut self, index: usize) -> usize { + self.length -= 1; + self.elements.remove(index) + } + + fn remove_element_from_non_leaf( + &mut self, + index: usize, + element_index: usize, + ops: &[Op], + ) -> usize { + self.length -= 1; + if self.children[element_index].elements.len() >= B { + let total_index = self.cumulative_index(element_index); + // recursively delete index - 1 in predecessor_node + let predecessor = self.children[element_index].remove(index - 1 - total_index, ops); + // replace element with that one + mem::replace(&mut self.elements[element_index], predecessor) + } else if self.children[element_index + 1].elements.len() >= B { + // recursively delete index + 1 in successor_node + let total_index = self.cumulative_index(element_index + 1); + let successor = self.children[element_index + 1].remove(index + 1 - total_index, ops); + // replace element with that one + mem::replace(&mut self.elements[element_index], successor) + } else { + let middle_element = self.elements.remove(element_index); + let successor_child = self.children.remove(element_index + 1); + self.children[element_index].merge(middle_element, successor_child, ops); + + let total_index = self.cumulative_index(element_index); + self.children[element_index].remove(index - total_index, ops) + } + } + + fn cumulative_index(&self, child_index: usize) -> usize { + self.children[0..child_index] + .iter() + .map(|c| c.len() + 1) + .sum() + } + + fn remove_from_internal_child( + &mut self, + index: usize, + mut child_index: usize, + ops: &[Op], + ) -> usize { + if self.children[child_index].elements.len() < B + && if child_index > 0 { + self.children[child_index - 1].elements.len() < B + } else { + true + } + && if child_index + 1 < self.children.len() { + self.children[child_index + 1].elements.len() < B + } else { + true + } + { + // if the child and its immediate siblings have B-1 elements merge the child + // with one sibling, moving an element from this node into the new merged node + // to be the median + + if child_index > 0 { + let middle = self.elements.remove(child_index - 1); + + // use the predessor sibling + let successor = self.children.remove(child_index); + child_index -= 1; + + self.children[child_index].merge(middle, successor, ops); + } else { + let middle = self.elements.remove(child_index); + + // use the sucessor sibling + let successor = self.children.remove(child_index + 1); + + self.children[child_index].merge(middle, successor, ops); + } + } else if self.children[child_index].elements.len() < B { + if child_index > 0 + && self + .children + .get(child_index - 1) + .map_or(false, |c| c.elements.len() >= B) + { + let last_element = self.children[child_index - 1].elements.pop().unwrap(); + assert!(!self.children[child_index - 1].elements.is_empty()); + self.children[child_index - 1].length -= 1; + self.children[child_index - 1] + .index + .remove(&ops[last_element]); + + let parent_element = + mem::replace(&mut self.elements[child_index - 1], last_element); + + self.children[child_index] + .index + .insert(&ops[parent_element]); + self.children[child_index] + .elements + .insert(0, parent_element); + self.children[child_index].length += 1; + + if let Some(last_child) = self.children[child_index - 1].children.pop() { + self.children[child_index - 1].length -= last_child.len(); + self.children[child_index - 1].reindex(ops); + self.children[child_index].length += last_child.len(); + self.children[child_index].children.insert(0, last_child); + self.children[child_index].reindex(ops); + } + } else if self + .children + .get(child_index + 1) + .map_or(false, |c| c.elements.len() >= B) + { + let first_element = self.children[child_index + 1].elements.remove(0); + self.children[child_index + 1] + .index + .remove(&ops[first_element]); + self.children[child_index + 1].length -= 1; + + assert!(!self.children[child_index + 1].elements.is_empty()); + + let parent_element = mem::replace(&mut self.elements[child_index], first_element); + + self.children[child_index].length += 1; + self.children[child_index] + .index + .insert(&ops[parent_element]); + self.children[child_index].elements.push(parent_element); + + if !self.children[child_index + 1].is_leaf() { + let first_child = self.children[child_index + 1].children.remove(0); + self.children[child_index + 1].length -= first_child.len(); + self.children[child_index + 1].reindex(ops); + self.children[child_index].length += first_child.len(); + + self.children[child_index].children.push(first_child); + self.children[child_index].reindex(ops); + } + } + } + self.length -= 1; + let total_index = self.cumulative_index(child_index); + self.children[child_index].remove(index - total_index, ops) + } + + pub(crate) fn check(&self) -> usize { + let l = self.elements.len() + self.children.iter().map(|c| c.check()).sum::(); + assert_eq!(self.len(), l, "{:#?}", self); + + l + } + + pub(crate) fn remove(&mut self, index: usize, ops: &[Op]) -> usize { + let original_len = self.len(); + if self.is_leaf() { + let v = self.remove_from_leaf(index); + self.index.remove(&ops[v]); + assert_eq!(original_len, self.len() + 1); + debug_assert_eq!(self.check(), self.len()); + v + } else { + let mut total_index = 0; + for (child_index, child) in self.children.iter().enumerate() { + match (total_index + child.len()).cmp(&index) { + Ordering::Less => { + // should be later on in the loop + total_index += child.len() + 1; + continue; + } + Ordering::Equal => { + let v = self.remove_element_from_non_leaf( + index, + min(child_index, self.elements.len() - 1), + ops, + ); + self.index.remove(&ops[v]); + assert_eq!(original_len, self.len() + 1); + debug_assert_eq!(self.check(), self.len()); + return v; + } + Ordering::Greater => { + let v = self.remove_from_internal_child(index, child_index, ops); + self.index.remove(&ops[v]); + assert_eq!(original_len, self.len() + 1); + debug_assert_eq!(self.check(), self.len()); + return v; + } + } + } + panic!( + "index not found to remove {} {} {} {}", + index, + total_index, + self.len(), + self.check() + ); + } + } + + fn merge(&mut self, middle: usize, successor_sibling: OpTreeNode, ops: &[Op]) { + self.index.insert(&ops[middle]); + self.index.merge(&successor_sibling.index); + self.elements.push(middle); + self.elements.extend(successor_sibling.elements); + self.children.extend(successor_sibling.children); + self.length += successor_sibling.length + 1; + assert!(self.is_full()); + } + + /// Update the operation at the given index using the provided function. + /// + /// This handles updating the indices after the update. + pub(crate) fn update<'a>( + &mut self, + index: usize, + vis: ChangeVisibility<'a>, + ) -> ChangeVisibility<'a> { + if self.is_leaf() { + self.index.change_vis(vis) + } else { + let mut cumulative_len = 0; + let len = self.len(); + for (_child_index, child) in self.children.iter_mut().enumerate() { + match (cumulative_len + child.len()).cmp(&index) { + Ordering::Less => { + cumulative_len += child.len() + 1; + } + Ordering::Equal => { + return self.index.change_vis(vis); + } + Ordering::Greater => { + let vis = child.update(index - cumulative_len, vis); + return self.index.change_vis(vis); + } + } + } + panic!("Invalid index to set: {} but len was {}", index, len) + } + } + + pub(crate) fn last(&self) -> usize { + if self.is_leaf() { + // node is never empty so this is safe + *self.elements.last().unwrap() + } else { + // if not a leaf then there is always at least one child + self.children.last().unwrap().last() + } + } + + pub(crate) fn get(&self, index: usize) -> Option { + if self.is_leaf() { + return self.elements.get(index).copied(); + } else { + let mut cumulative_len = 0; + for (child_index, child) in self.children.iter().enumerate() { + match (cumulative_len + child.len()).cmp(&index) { + Ordering::Less => { + cumulative_len += child.len() + 1; + } + Ordering::Equal => return self.elements.get(child_index).copied(), + Ordering::Greater => { + return child.get(index - cumulative_len); + } + } + } + } + None + } +} diff --git a/rust/automerge/src/query.rs b/rust/automerge/src/query.rs index fefac401..9707da33 100644 --- a/rust/automerge/src/query.rs +++ b/rust/automerge/src/query.rs @@ -79,11 +79,12 @@ pub(crate) trait TreeQuery<'a>: Clone + Debug { &mut self, child: &'a OpTreeNode, _m: &OpSetMetadata, + ops: &[Op], ) -> QueryResult { - self.query_node(child) + self.query_node(child, ops) } - fn query_node(&mut self, _child: &'a OpTreeNode) -> QueryResult { + fn query_node(&mut self, _child: &'a OpTreeNode, _ops: &[Op]) -> QueryResult { QueryResult::Descend } @@ -291,7 +292,7 @@ impl VisWindow { } } -pub(crate) fn binary_search_by(node: &OpTreeNode, f: F) -> usize +pub(crate) fn binary_search_by(node: &OpTreeNode, ops: &[Op], f: F) -> usize where F: Fn(&Op) -> Ordering, { @@ -299,7 +300,7 @@ where let mut left = 0; while left < right { let seq = (left + right) / 2; - if f(node.get(seq).unwrap()) == Ordering::Less { + if f(&ops[node.get(seq).unwrap()]) == Ordering::Less { left = seq + 1; } else { right = seq; diff --git a/rust/automerge/src/query/elem_id_pos.rs b/rust/automerge/src/query/elem_id_pos.rs index 250501fe..8eecd7e0 100644 --- a/rust/automerge/src/query/elem_id_pos.rs +++ b/rust/automerge/src/query/elem_id_pos.rs @@ -1,6 +1,6 @@ use crate::{ op_tree::OpTreeNode, - types::{ElemId, Key, ListEncoding}, + types::{ElemId, Key, ListEncoding, Op}, }; use super::{QueryResult, TreeQuery}; @@ -34,7 +34,7 @@ impl ElemIdPos { } impl<'a> TreeQuery<'a> for ElemIdPos { - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, _ops: &[Op]) -> QueryResult { // if index has our element then we can continue if child.index.has_visible(&Key::Seq(self.elemid)) { // element is in this node somewhere diff --git a/rust/automerge/src/query/insert.rs b/rust/automerge/src/query/insert.rs index 12fae5b8..0dc0e98d 100644 --- a/rust/automerge/src/query/insert.rs +++ b/rust/automerge/src/query/insert.rs @@ -71,7 +71,7 @@ impl<'a> TreeQuery<'a> for InsertNth { false } - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, ops: &[Op]) -> QueryResult { // if this node has some visible elements then we may find our target within let mut num_vis = child.index.visible_len(self.encoding); if let Some(last_seen) = self.last_seen { @@ -94,7 +94,7 @@ impl<'a> TreeQuery<'a> for InsertNth { // - the insert was at a previous node and this is a long run of overwrites so last_seen should already be set correctly // - the visible op is in this node and the elemid references it so it can be set here // - the visible op is in a future node and so it will be counted as seen there - let last_elemid = child.last().elemid_or_key(); + let last_elemid = ops[child.last()].elemid_or_key(); if child.index.has_visible(&last_elemid) { self.last_seen = Some(last_elemid); } diff --git a/rust/automerge/src/query/keys.rs b/rust/automerge/src/query/keys.rs index 30436f31..edda4fe9 100644 --- a/rust/automerge/src/query/keys.rs +++ b/rust/automerge/src/query/keys.rs @@ -1,4 +1,4 @@ -use crate::op_tree::OpTreeNode; +use crate::op_tree::OpTreeInternal; use crate::types::Key; use std::fmt::Debug; @@ -8,17 +8,17 @@ pub(crate) struct Keys<'a> { last_key: Option, index_back: usize, last_key_back: Option, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, } impl<'a> Keys<'a> { - pub(crate) fn new(root_child: &'a OpTreeNode) -> Self { + pub(crate) fn new(op_tree: &'a OpTreeInternal) -> Self { Self { index: 0, last_key: None, - index_back: root_child.len(), + index_back: op_tree.len(), last_key_back: None, - root_child, + op_tree, } } } @@ -28,7 +28,7 @@ impl<'a> Iterator for Keys<'a> { fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; self.index += 1; if Some(op.elemid_or_key()) != self.last_key && op.visible() { self.last_key = Some(op.elemid_or_key()); @@ -42,7 +42,7 @@ impl<'a> Iterator for Keys<'a> { impl<'a> DoubleEndedIterator for Keys<'a> { fn next_back(&mut self) -> Option { for i in (self.index..self.index_back).rev() { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; self.index_back -= 1; if Some(op.elemid_or_key()) != self.last_key_back && op.visible() { self.last_key_back = Some(op.elemid_or_key()); diff --git a/rust/automerge/src/query/keys_at.rs b/rust/automerge/src/query/keys_at.rs index 71da2927..bf5b5e0e 100644 --- a/rust/automerge/src/query/keys_at.rs +++ b/rust/automerge/src/query/keys_at.rs @@ -1,4 +1,4 @@ -use crate::op_tree::OpTreeNode; +use crate::op_tree::OpTreeInternal; use crate::query::VisWindow; use crate::types::{Clock, Key}; use std::fmt::Debug; @@ -11,19 +11,19 @@ pub(crate) struct KeysAt<'a> { last_key: Option, index_back: usize, last_key_back: Option, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, } impl<'a> KeysAt<'a> { - pub(crate) fn new(root_child: &'a OpTreeNode, clock: Clock) -> Self { + pub(crate) fn new(op_tree: &'a OpTreeInternal, clock: Clock) -> Self { Self { clock, window: VisWindow::default(), index: 0, last_key: None, - index_back: root_child.len(), + index_back: op_tree.len(), last_key_back: None, - root_child, + op_tree, } } } @@ -33,7 +33,7 @@ impl<'a> Iterator for KeysAt<'a> { fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; let visible = self.window.visible_at(op, i, &self.clock); self.index += 1; if Some(op.elemid_or_key()) != self.last_key && visible { @@ -48,7 +48,7 @@ impl<'a> Iterator for KeysAt<'a> { impl<'a> DoubleEndedIterator for KeysAt<'a> { fn next_back(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; let visible = self.window.visible_at(op, i, &self.clock); self.index_back -= 1; if Some(op.elemid_or_key()) != self.last_key_back && visible { diff --git a/rust/automerge/src/query/len.rs b/rust/automerge/src/query/len.rs index 0dce4f85..9134b11f 100644 --- a/rust/automerge/src/query/len.rs +++ b/rust/automerge/src/query/len.rs @@ -1,6 +1,6 @@ use crate::op_tree::OpTreeNode; use crate::query::{QueryResult, TreeQuery}; -use crate::types::ListEncoding; +use crate::types::{ListEncoding, Op}; use std::fmt::Debug; #[derive(Debug, Clone, PartialEq)] @@ -16,7 +16,7 @@ impl Len { } impl<'a> TreeQuery<'a> for Len { - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, _ops: &[Op]) -> QueryResult { self.len = child.index.visible_len(self.encoding); QueryResult::Finish } diff --git a/rust/automerge/src/query/list_range.rs b/rust/automerge/src/query/list_range.rs index d3206af3..d01082ab 100644 --- a/rust/automerge/src/query/list_range.rs +++ b/rust/automerge/src/query/list_range.rs @@ -1,5 +1,5 @@ use crate::exid::ExId; -use crate::op_tree::OpTreeNode; +use crate::op_tree::OpTreeInternal; use crate::types::{ElemId, OpId}; use crate::values::ValueIter; use crate::{Automerge, Value}; @@ -14,19 +14,19 @@ pub(crate) struct ListRange<'a, R: RangeBounds> { last_elemid: Option, next_result: Option<(usize, Value<'a>, OpId)>, index_back: usize, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, } impl<'a, R: RangeBounds> ListRange<'a, R> { - pub(crate) fn new(range: R, root_child: &'a OpTreeNode) -> Self { + pub(crate) fn new(range: R, op_tree: &'a OpTreeInternal) -> Self { Self { range, index: 0, // FIXME root_child.seek_to_pos(range.start) pos: 0, // FIXME range.start last_elemid: None, next_result: None, - index_back: root_child.len(), - root_child, + index_back: op_tree.len(), + op_tree, } } } @@ -45,7 +45,7 @@ impl<'a, R: RangeBounds> Iterator for ListRange<'a, R> { // point and stop at the end point and not needless scan all the ops before and after the range fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; self.index += 1; if op.visible() { if op.elemid() != self.last_elemid { diff --git a/rust/automerge/src/query/list_range_at.rs b/rust/automerge/src/query/list_range_at.rs index 5c7257af..33cdf548 100644 --- a/rust/automerge/src/query/list_range_at.rs +++ b/rust/automerge/src/query/list_range_at.rs @@ -1,6 +1,6 @@ use super::VisWindow; use crate::exid::ExId; -use crate::op_tree::OpTreeNode; +use crate::op_tree::OpTreeInternal; use crate::types::{Clock, ElemId, OpId}; use crate::values::ValueIter; use crate::{Automerge, Value}; @@ -15,7 +15,7 @@ pub(crate) struct ListRangeAt<'a, R: RangeBounds> { last_elemid: Option, next_result: Option<(usize, Value<'a>, OpId)>, index_back: usize, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, clock: Clock, window: VisWindow, } @@ -27,15 +27,15 @@ impl<'a, R: RangeBounds> ValueIter<'a> for ListRangeAt<'a, R> { } impl<'a, R: RangeBounds> ListRangeAt<'a, R> { - pub(crate) fn new(range: R, clock: Clock, root_child: &'a OpTreeNode) -> Self { + pub(crate) fn new(range: R, clock: Clock, op_tree: &'a OpTreeInternal) -> Self { Self { range, index: 0, // FIXME root_child.seek_to_pos(range.start) pos: 0, // FIXME range.start last_elemid: None, next_result: None, - index_back: root_child.len(), - root_child, + index_back: op_tree.len(), + op_tree, clock, window: VisWindow::default(), } @@ -47,7 +47,7 @@ impl<'a, R: RangeBounds> Iterator for ListRangeAt<'a, R> { fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; let visible = self.window.visible_at(op, i, &self.clock); self.index += 1; if visible { diff --git a/rust/automerge/src/query/list_vals.rs b/rust/automerge/src/query/list_vals.rs index 4ad2f47b..6c056621 100644 --- a/rust/automerge/src/query/list_vals.rs +++ b/rust/automerge/src/query/list_vals.rs @@ -19,10 +19,10 @@ impl ListVals { } impl<'a> TreeQuery<'a> for ListVals { - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, ops: &[Op]) -> QueryResult { let start = 0; for pos in start..child.len() { - let op = child.get(pos).unwrap(); + let op = &ops[child.get(pos).unwrap()]; if op.insert { self.last_elem = None; } diff --git a/rust/automerge/src/query/map_range.rs b/rust/automerge/src/query/map_range.rs index 81334ca4..909312db 100644 --- a/rust/automerge/src/query/map_range.rs +++ b/rust/automerge/src/query/map_range.rs @@ -1,5 +1,5 @@ use crate::exid::ExId; -use crate::op_tree::{OpSetMetadata, OpTreeNode}; +use crate::op_tree::{OpSetMetadata, OpTreeInternal}; use crate::types::{Key, OpId}; use crate::values::ValueIter; use crate::{Automerge, Value}; @@ -14,7 +14,7 @@ pub(crate) struct MapRange<'a, R: RangeBounds> { next_result: Option<(&'a str, Value<'a>, OpId)>, index_back: usize, last_key_back: Option, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, meta: &'a OpSetMetadata, } @@ -25,15 +25,15 @@ impl<'a, R: RangeBounds> ValueIter<'a> for MapRange<'a, R> { } impl<'a, R: RangeBounds> MapRange<'a, R> { - pub(crate) fn new(range: R, root_child: &'a OpTreeNode, meta: &'a OpSetMetadata) -> Self { + pub(crate) fn new(range: R, op_tree: &'a OpTreeInternal, meta: &'a OpSetMetadata) -> Self { Self { range, index: 0, last_key: None, next_result: None, - index_back: root_child.len(), + index_back: op_tree.len(), last_key_back: None, - root_child, + op_tree, meta, } } @@ -47,7 +47,7 @@ impl<'a, R: RangeBounds> Iterator for MapRange<'a, R> { // point and stop at the end point and not needless scan all the ops before and after the range fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; self.index += 1; if op.visible() { let prop = match op.key { @@ -72,7 +72,7 @@ impl<'a, R: RangeBounds> Iterator for MapRange<'a, R> { impl<'a, R: RangeBounds> DoubleEndedIterator for MapRange<'a, R> { fn next_back(&mut self) -> Option { for i in (self.index..self.index_back).rev() { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; self.index_back -= 1; if Some(op.key) != self.last_key_back && op.visible() { diff --git a/rust/automerge/src/query/map_range_at.rs b/rust/automerge/src/query/map_range_at.rs index 84453955..c5c5af06 100644 --- a/rust/automerge/src/query/map_range_at.rs +++ b/rust/automerge/src/query/map_range_at.rs @@ -1,6 +1,6 @@ use crate::clock::Clock; use crate::exid::ExId; -use crate::op_tree::{OpSetMetadata, OpTreeNode}; +use crate::op_tree::{OpSetMetadata, OpTreeInternal}; use crate::types::{Key, OpId}; use crate::values::ValueIter; use crate::{Automerge, Value}; @@ -22,7 +22,7 @@ pub(crate) struct MapRangeAt<'a, R: RangeBounds> { index_back: usize, last_key_back: Option, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, meta: &'a OpSetMetadata, } @@ -35,7 +35,7 @@ impl<'a, R: RangeBounds> ValueIter<'a> for MapRangeAt<'a, R> { impl<'a, R: RangeBounds> MapRangeAt<'a, R> { pub(crate) fn new( range: R, - root_child: &'a OpTreeNode, + op_tree: &'a OpTreeInternal, meta: &'a OpSetMetadata, clock: Clock, ) -> Self { @@ -46,9 +46,9 @@ impl<'a, R: RangeBounds> MapRangeAt<'a, R> { index: 0, last_key: None, next_result: None, - index_back: root_child.len(), + index_back: op_tree.len(), last_key_back: None, - root_child, + op_tree, meta, } } @@ -59,7 +59,7 @@ impl<'a, R: RangeBounds> Iterator for MapRangeAt<'a, R> { fn next(&mut self) -> Option { for i in self.index..self.index_back { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; let visible = self.window.visible_at(op, i, &self.clock); self.index += 1; if visible { @@ -85,7 +85,7 @@ impl<'a, R: RangeBounds> Iterator for MapRangeAt<'a, R> { impl<'a, R: RangeBounds> DoubleEndedIterator for MapRangeAt<'a, R> { fn next_back(&mut self) -> Option { for i in (self.index..self.index_back).rev() { - let op = self.root_child.get(i)?; + let op = self.op_tree.get(i)?; let visible = self.window.visible_at(op, i, &self.clock); self.index_back -= 1; if Some(op.key) != self.last_key_back && visible { diff --git a/rust/automerge/src/query/nth.rs b/rust/automerge/src/query/nth.rs index a286c4e2..ed374b9b 100644 --- a/rust/automerge/src/query/nth.rs +++ b/rust/automerge/src/query/nth.rs @@ -73,7 +73,7 @@ impl<'a> TreeQuery<'a> for Nth<'a> { false } - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, ops: &[Op]) -> QueryResult { let mut num_vis = child.index.visible_len(self.encoding); if let Some(last_seen) = self.last_seen { if child.index.has_visible(&last_seen) { @@ -94,7 +94,7 @@ impl<'a> TreeQuery<'a> for Nth<'a> { // - the insert was at a previous node and this is a long run of overwrites so last_seen should already be set correctly // - the visible op is in this node and the elemid references it so it can be set here // - the visible op is in a future node and so it will be counted as seen there - let last_elemid = child.last().elemid_or_key(); + let last_elemid = ops[child.last()].elemid_or_key(); if child.index.has_visible(&last_elemid) { self.last_seen = Some(last_elemid); } diff --git a/rust/automerge/src/query/opid.rs b/rust/automerge/src/query/opid.rs index aa3a45e6..3d4c8b24 100644 --- a/rust/automerge/src/query/opid.rs +++ b/rust/automerge/src/query/opid.rs @@ -33,7 +33,7 @@ impl OpIdSearch { } impl<'a> TreeQuery<'a> for OpIdSearch { - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, _ops: &[Op]) -> QueryResult { if child.index.ops.contains(&self.target) { QueryResult::Descend } else { diff --git a/rust/automerge/src/query/opid_vis.rs b/rust/automerge/src/query/opid_vis.rs index 8a4b6a10..c0d2cc89 100644 --- a/rust/automerge/src/query/opid_vis.rs +++ b/rust/automerge/src/query/opid_vis.rs @@ -28,7 +28,7 @@ impl OpIdVisSearch { } impl<'a> TreeQuery<'a> for OpIdVisSearch { - fn query_node(&mut self, child: &OpTreeNode) -> QueryResult { + fn query_node(&mut self, child: &OpTreeNode, _ops: &[Op]) -> QueryResult { if child.index.ops.contains(&self.target) { QueryResult::Descend } else { diff --git a/rust/automerge/src/query/prop.rs b/rust/automerge/src/query/prop.rs index 89fa18f0..f6062ec6 100644 --- a/rust/automerge/src/query/prop.rs +++ b/rust/automerge/src/query/prop.rs @@ -37,6 +37,7 @@ impl<'a> TreeQuery<'a> for Prop<'a> { &mut self, child: &'a OpTreeNode, m: &OpSetMetadata, + ops: &[Op], ) -> QueryResult { if let Some(Start { idx: start, @@ -62,7 +63,7 @@ impl<'a> TreeQuery<'a> for Prop<'a> { } } else { // in the root node find the first op position for the key - let start = binary_search_by(child, |op| m.key_cmp(&op.key, &self.key)); + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.key)); self.start = Some(Start { idx: start, optree_len: child.len(), diff --git a/rust/automerge/src/query/prop_at.rs b/rust/automerge/src/query/prop_at.rs index 08b1cb59..f0c2eedc 100644 --- a/rust/automerge/src/query/prop_at.rs +++ b/rust/automerge/src/query/prop_at.rs @@ -29,12 +29,13 @@ impl<'a> TreeQuery<'a> for PropAt { &mut self, child: &'a OpTreeNode, m: &OpSetMetadata, + ops: &[Op], ) -> QueryResult { - let start = binary_search_by(child, |op| m.key_cmp(&op.key, &self.key)); + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.key)); let mut window: VisWindow = Default::default(); self.pos = start; for pos in start..child.len() { - let op = child.get(pos).unwrap(); + let op = &ops[child.get(pos).unwrap()]; if op.key != self.key { break; } diff --git a/rust/automerge/src/query/seek_op.rs b/rust/automerge/src/query/seek_op.rs index 70d52d45..7ca3e9d4 100644 --- a/rust/automerge/src/query/seek_op.rs +++ b/rust/automerge/src/query/seek_op.rs @@ -43,14 +43,19 @@ impl<'a> SeekOp<'a> { } impl<'a> TreeQuery<'a> for SeekOp<'a> { - fn query_node_with_metadata(&mut self, child: &OpTreeNode, m: &OpSetMetadata) -> QueryResult { + fn query_node_with_metadata( + &mut self, + child: &OpTreeNode, + m: &OpSetMetadata, + ops: &[Op], + ) -> QueryResult { if self.found { return QueryResult::Descend; } match self.op.key { Key::Seq(HEAD) => { while self.pos < child.len() { - let op = child.get(self.pos).unwrap(); + let op = &ops[child.get(self.pos).unwrap()]; if op.insert && m.lamport_cmp(op.id, self.op.id) == Ordering::Less { break; } @@ -82,7 +87,7 @@ impl<'a> TreeQuery<'a> for SeekOp<'a> { } } else { // in the root node find the first op position for the key - let start = binary_search_by(child, |op| m.key_cmp(&op.key, &self.op.key)); + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); self.start = Some(start); self.pos = start; QueryResult::Skip(start) diff --git a/rust/automerge/src/query/seek_op_with_patch.rs b/rust/automerge/src/query/seek_op_with_patch.rs index f029c5db..0cc48b37 100644 --- a/rust/automerge/src/query/seek_op_with_patch.rs +++ b/rust/automerge/src/query/seek_op_with_patch.rs @@ -72,6 +72,7 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { &mut self, child: &'a OpTreeNode, m: &OpSetMetadata, + ops: &[Op], ) -> QueryResult { if self.found { return QueryResult::Descend; @@ -82,7 +83,7 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { // the opId of the operation being inserted. Key::Seq(e) if e == HEAD => { while self.pos < child.len() { - let op = child.get(self.pos).unwrap(); + let op = &ops[child.get(self.pos).unwrap()]; if op.insert && m.lamport_cmp(op.id, self.op.id) == Ordering::Less { break; } @@ -123,7 +124,7 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { // the last operation's elemId regardless of whether it's visible or not. // This will lead to incorrect counting if `last_seen` is not visible: it's // not counted towards `num_vis`, so we shouldn't be subtracting 1. - self.last_seen = Some(child.last().elemid_or_key()); + self.last_seen = Some(ops[child.last()].elemid_or_key()); } QueryResult::Next } @@ -148,7 +149,7 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { // in the root node find the first op position for the key // Search for the place where we need to insert the new operation. First find the // first op with a key >= the key we're updating - let start = binary_search_by(child, |op| m.key_cmp(&op.key, &self.op.key)); + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); self.start = Some(start); self.pos = start; QueryResult::Skip(start) diff --git a/rust/automerge/src/visualisation.rs b/rust/automerge/src/visualisation.rs index 6894f46f..31e9bbdb 100644 --- a/rust/automerge/src/visualisation.rs +++ b/rust/automerge/src/visualisation.rs @@ -1,4 +1,4 @@ -use crate::types::ObjId; +use crate::types::{ObjId, Op}; use fxhash::FxHasher; use std::{borrow::Cow, collections::HashMap, hash::BuildHasherDefault}; @@ -26,7 +26,7 @@ pub(crate) struct Node<'a> { #[derive(Clone)] pub(crate) enum NodeType<'a> { ObjRoot(crate::types::ObjId), - ObjTreeNode(ObjId, &'a crate::op_tree::OpTreeNode), + ObjTreeNode(ObjId, &'a crate::op_tree::OpTreeNode, &'a [Op]), } #[derive(Clone)] @@ -52,7 +52,13 @@ impl<'a> GraphVisualisation<'a> { let mut nodes = HashMap::new(); for (obj_id, tree) in trees { if let Some(root_node) = &tree.internal.root_node { - let tree_id = Self::construct_nodes(root_node, obj_id, &mut nodes, metadata); + let tree_id = Self::construct_nodes( + root_node, + &tree.internal.ops, + obj_id, + &mut nodes, + metadata, + ); let obj_tree_id = NodeId::default(); nodes.insert( obj_tree_id, @@ -77,6 +83,7 @@ impl<'a> GraphVisualisation<'a> { fn construct_nodes( node: &'a crate::op_tree::OpTreeNode, + ops: &'a [Op], objid: &ObjId, nodes: &mut HashMap>, m: &'a crate::op_set::OpSetMetadata, @@ -84,7 +91,7 @@ impl<'a> GraphVisualisation<'a> { let node_id = NodeId::default(); let mut child_ids = Vec::new(); for child in &node.children { - let child_id = Self::construct_nodes(child, objid, nodes, m); + let child_id = Self::construct_nodes(child, ops, objid, nodes, m); child_ids.push(child_id); } nodes.insert( @@ -92,7 +99,7 @@ impl<'a> GraphVisualisation<'a> { Node { id: node_id, children: child_ids, - node_type: NodeType::ObjTreeNode(*objid, node), + node_type: NodeType::ObjTreeNode(*objid, node, ops), metadata: m, }, ); @@ -138,7 +145,7 @@ impl<'a> dot::Labeller<'a, &'a Node<'a>, Edge> for GraphVisualisation<'a> { fn node_shape(&'a self, node: &&'a Node<'a>) -> Option> { let shape = match node.node_type { - NodeType::ObjTreeNode(_, _) => dot::LabelText::label("none"), + NodeType::ObjTreeNode(_, _, _) => dot::LabelText::label("none"), NodeType::ObjRoot(_) => dot::LabelText::label("ellipse"), }; Some(shape) @@ -146,8 +153,8 @@ impl<'a> dot::Labeller<'a, &'a Node<'a>, Edge> for GraphVisualisation<'a> { fn node_label(&'a self, n: &&Node<'a>) -> dot::LabelText<'a> { match n.node_type { - NodeType::ObjTreeNode(objid, tree_node) => dot::LabelText::HtmlStr( - OpTable::create(tree_node, &objid, n.metadata, &self.actor_shorthands) + NodeType::ObjTreeNode(objid, tree_node, ops) => dot::LabelText::HtmlStr( + OpTable::create(tree_node, ops, &objid, n.metadata, &self.actor_shorthands) .to_html() .into(), ), @@ -165,6 +172,7 @@ struct OpTable { impl OpTable { fn create<'a>( node: &'a crate::op_tree::OpTreeNode, + ops: &'a [Op], obj: &ObjId, metadata: &crate::op_set::OpSetMetadata, actor_shorthands: &HashMap, @@ -172,7 +180,7 @@ impl OpTable { let rows = node .elements .iter() - .map(|e| OpTableRow::create(e, obj, metadata, actor_shorthands)) + .map(|e| OpTableRow::create(&ops[*e], obj, metadata, actor_shorthands)) .collect(); OpTable { rows } } diff --git a/rust/edit-trace/automerge-js.js b/rust/edit-trace/automerge-js.js index 6a6d3389..2956d5d5 100644 --- a/rust/edit-trace/automerge-js.js +++ b/rust/edit-trace/automerge-js.js @@ -2,7 +2,7 @@ const { edits, finalText } = require('./editing-trace') const Automerge = require('../../javascript') -const start = new Date() +let start = new Date() let state = Automerge.from({text: ""}) state = Automerge.change(state, doc => { @@ -14,10 +14,16 @@ state = Automerge.change(state, doc => { Automerge.splice(doc, 'text', ... edit) } }) - -let _ = Automerge.save(state) console.log(`Done in ${new Date() - start} ms`) +start = new Date() +let bytes = Automerge.save(state) +console.log(`Save in ${new Date() - start} ms`) + +start = new Date() +let _load = Automerge.load(bytes) +console.log(`Load in ${new Date() - start} ms`) + if (state.text !== finalText) { throw new RangeError('ERROR: final text did not match expectation') } diff --git a/rust/edit-trace/automerge-wasm.js b/rust/edit-trace/automerge-wasm.js index 82786cd9..8f6f51af 100644 --- a/rust/edit-trace/automerge-wasm.js +++ b/rust/edit-trace/automerge-wasm.js @@ -16,11 +16,17 @@ for (let i = 0; i < edits.length; i++) { doc.splice(text, ...edit) } -let _ = doc.save() - console.log(`Done in ${new Date() - start} ms`) let t_time = new Date() +let saved = doc.save() +console.log(`doc.save in ${new Date() - t_time} ms`) + +t_time = new Date() +Automerge.load(saved) +console.log(`doc.load in ${new Date() - t_time} ms`) + +t_time = new Date() let t = doc.text(text); console.log(`doc.text in ${new Date() - t_time} ms`) diff --git a/rust/edit-trace/package.json b/rust/edit-trace/package.json index a9d1e0e0..acd37ac0 100644 --- a/rust/edit-trace/package.json +++ b/rust/edit-trace/package.json @@ -4,9 +4,9 @@ "main": "wasm-text.js", "license": "MIT", "scripts": { - "wasm": "0x -D prof wasm-text.js" + "wasm": "0x -D prof automerge-wasm.js" }, "devDependencies": { - "0x": "^4.11.0" + "0x": "^5.4.1" } } diff --git a/rust/edit-trace/src/main.rs b/rust/edit-trace/src/main.rs index f6924c7d..debe52db 100644 --- a/rust/edit-trace/src/main.rs +++ b/rust/edit-trace/src/main.rs @@ -28,16 +28,18 @@ fn main() -> Result<(), AutomergeError> { tx.splice_text(&text, pos, del, &vals)?; } tx.commit(); + println!("Done in {} ms", now.elapsed().as_millis()); let save = Instant::now(); - let _bytes = doc.save(); + let bytes = doc.save(); println!("Saved in {} ms", save.elapsed().as_millis()); - /* - let load = Instant::now(); - let _ = Automerge::load(&bytes).unwrap(); - println!("Loaded in {} ms", load.elapsed().as_millis()); - */ + let load = Instant::now(); + let _ = Automerge::load(&bytes).unwrap(); + println!("Loaded in {} ms", load.elapsed().as_millis()); + + let get_txt = Instant::now(); + doc.text(&text)?; + println!("Text in {} ms", get_txt.elapsed().as_millis()); - println!("Done in {} ms", now.elapsed().as_millis()); Ok(()) } From b78211ca65ae49b0794b004f80ec8350eb39abcf Mon Sep 17 00:00:00 2001 From: Orion Henry Date: Sun, 11 Dec 2022 10:56:20 -0800 Subject: [PATCH 04/72] change opid to (u32,u32) - 10% performance uptick (#473) --- rust/automerge/src/automerge.rs | 15 ++++---- rust/automerge/src/change.rs | 2 +- rust/automerge/src/clock.rs | 20 +++++------ .../src/columnar/column_range/key.rs | 4 +-- .../src/columnar/column_range/obj_id.rs | 2 +- .../src/columnar/column_range/opid.rs | 2 +- .../src/columnar/column_range/opid_list.rs | 2 +- .../src/columnar/encoding/properties.rs | 2 +- rust/automerge/src/op_set.rs | 14 ++++---- rust/automerge/src/op_tree.rs | 2 +- rust/automerge/src/op_tree/iter.rs | 2 +- rust/automerge/src/transaction/inner.rs | 2 +- rust/automerge/src/types.rs | 36 +++++++++++-------- rust/automerge/src/types/opids.rs | 5 +-- 14 files changed, 60 insertions(+), 50 deletions(-) diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 7a5340e6..5502456c 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -487,7 +487,7 @@ impl Automerge { // do a direct get here b/c this could be foriegn and not be within the array // bounds let obj = if self.ops.m.actors.cache.get(*idx) == Some(actor) { - ObjId(OpId(*ctr, *idx)) + ObjId(OpId::new(*ctr, *idx)) } else { // FIXME - make a real error let idx = self @@ -496,7 +496,7 @@ impl Automerge { .actors .lookup(actor) .ok_or(AutomergeError::Fail)?; - ObjId(OpId(*ctr, idx)) + ObjId(OpId::new(*ctr, idx)) }; if let Some(obj_type) = self.ops.object_type(&obj) { Ok((obj, obj_type)) @@ -859,23 +859,26 @@ impl Automerge { .iter_ops() .enumerate() .map(|(i, c)| { - let id = OpId(change.start_op().get() + i as u64, actor); + let id = OpId::new(change.start_op().get() + i as u64, actor); let key = match &c.key { EncodedKey::Prop(n) => Key::Map(self.ops.m.props.cache(n.to_string())), EncodedKey::Elem(e) if e.is_head() => Key::Seq(ElemId::head()), EncodedKey::Elem(ElemId(o)) => { - Key::Seq(ElemId(OpId::new(actors[o.actor()], o.counter()))) + Key::Seq(ElemId(OpId::new(o.counter(), actors[o.actor()]))) } }; let obj = if c.obj.is_root() { ObjId::root() } else { - ObjId(OpId(c.obj.opid().counter(), actors[c.obj.opid().actor()])) + ObjId(OpId::new( + c.obj.opid().counter(), + actors[c.obj.opid().actor()], + )) }; let pred = c .pred .iter() - .map(|p| OpId::new(actors[p.actor()], p.counter())); + .map(|p| OpId::new(p.counter(), actors[p.actor()])); let pred = self.ops.m.sorted_opids(pred); ( obj, diff --git a/rust/automerge/src/change.rs b/rust/automerge/src/change.rs index 198c68fb..b5cae7df 100644 --- a/rust/automerge/src/change.rs +++ b/rust/automerge/src/change.rs @@ -356,7 +356,7 @@ pub(crate) mod gen { (0_u64..10) .prop_map(|num_ops| { (0..num_ops) - .map(|counter| OpId::new(0, counter)) + .map(|counter| OpId::new(counter, 0)) .collect::>() }) .prop_flat_map(move |opids| { diff --git a/rust/automerge/src/clock.rs b/rust/automerge/src/clock.rs index 11890ffb..79125323 100644 --- a/rust/automerge/src/clock.rs +++ b/rust/automerge/src/clock.rs @@ -59,8 +59,8 @@ impl Clock { } pub(crate) fn covers(&self, id: &OpId) -> bool { - if let Some(data) = self.0.get(&id.1) { - data.max_op >= id.0 + if let Some(data) = self.0.get(&id.actor()) { + data.max_op >= id.counter() } else { false } @@ -123,16 +123,16 @@ mod tests { clock.include(1, ClockData { max_op: 20, seq: 1 }); clock.include(2, ClockData { max_op: 10, seq: 2 }); - assert!(clock.covers(&OpId(10, 1))); - assert!(clock.covers(&OpId(20, 1))); - assert!(!clock.covers(&OpId(30, 1))); + assert!(clock.covers(&OpId::new(10, 1))); + assert!(clock.covers(&OpId::new(20, 1))); + assert!(!clock.covers(&OpId::new(30, 1))); - assert!(clock.covers(&OpId(5, 2))); - assert!(clock.covers(&OpId(10, 2))); - assert!(!clock.covers(&OpId(15, 2))); + assert!(clock.covers(&OpId::new(5, 2))); + assert!(clock.covers(&OpId::new(10, 2))); + assert!(!clock.covers(&OpId::new(15, 2))); - assert!(!clock.covers(&OpId(1, 3))); - assert!(!clock.covers(&OpId(100, 3))); + assert!(!clock.covers(&OpId::new(1, 3))); + assert!(!clock.covers(&OpId::new(100, 3))); } #[test] diff --git a/rust/automerge/src/columnar/column_range/key.rs b/rust/automerge/src/columnar/column_range/key.rs index 5283fc39..70ea8e1e 100644 --- a/rust/automerge/src/columnar/column_range/key.rs +++ b/rust/automerge/src/columnar/column_range/key.rs @@ -167,11 +167,11 @@ impl<'a> KeyIter<'a> { Ok(Some(Key::Prop(string))) } (Some(None) | None, Some(Some(0)), Some(None) | None) => { - Ok(Some(Key::Elem(ElemId(OpId(0, 0))))) + Ok(Some(Key::Elem(ElemId(OpId::new(0, 0))))) } (Some(Some(actor)), Some(Some(ctr)), Some(None) | None) => match ctr.try_into() { //Ok(ctr) => Some(Ok(Key::Elem(ElemId(OpId(ctr, actor as usize))))), - Ok(ctr) => Ok(Some(Key::Elem(ElemId(OpId::new(actor as usize, ctr))))), + Ok(ctr) => Ok(Some(Key::Elem(ElemId(OpId::new(ctr, actor as usize))))), Err(_) => Err(DecodeColumnError::invalid_value( "counter", "negative value for counter", diff --git a/rust/automerge/src/columnar/column_range/obj_id.rs b/rust/automerge/src/columnar/column_range/obj_id.rs index f6525b44..6a3e2ef0 100644 --- a/rust/automerge/src/columnar/column_range/obj_id.rs +++ b/rust/automerge/src/columnar/column_range/obj_id.rs @@ -133,7 +133,7 @@ impl<'a> ObjIdIter<'a> { .map_err(|e| DecodeColumnError::decode_raw("counter", e))?; match (actor, counter) { (None | Some(None), None | Some(None)) => Ok(Some(ObjId::root())), - (Some(Some(a)), Some(Some(c))) => Ok(Some(ObjId(OpId(c, a as usize)))), + (Some(Some(a)), Some(Some(c))) => Ok(Some(ObjId(OpId::new(c, a as usize)))), (_, Some(Some(0))) => Ok(Some(ObjId::root())), (Some(None) | None, _) => Err(DecodeColumnError::unexpected_null("actor")), (_, Some(None) | None) => Err(DecodeColumnError::unexpected_null("counter")), diff --git a/rust/automerge/src/columnar/column_range/opid.rs b/rust/automerge/src/columnar/column_range/opid.rs index 592f6041..ae95d758 100644 --- a/rust/automerge/src/columnar/column_range/opid.rs +++ b/rust/automerge/src/columnar/column_range/opid.rs @@ -105,7 +105,7 @@ impl<'a> OpIdIter<'a> { .map_err(|e| DecodeColumnError::decode_raw("counter", e))?; match (actor, counter) { (Some(Some(a)), Some(Some(c))) => match c.try_into() { - Ok(c) => Ok(Some(OpId(c, a as usize))), + Ok(c) => Ok(Some(OpId::new(c, a as usize))), Err(_) => Err(DecodeColumnError::invalid_value( "counter", "negative value encountered", diff --git a/rust/automerge/src/columnar/column_range/opid_list.rs b/rust/automerge/src/columnar/column_range/opid_list.rs index 03b92ccf..12279c08 100644 --- a/rust/automerge/src/columnar/column_range/opid_list.rs +++ b/rust/automerge/src/columnar/column_range/opid_list.rs @@ -203,7 +203,7 @@ impl<'a> OpIdListIter<'a> { .map_err(|e| DecodeColumnError::decode_raw("counter", e))?; match (actor, counter) { (Some(Some(a)), Some(Some(ctr))) => match ctr.try_into() { - Ok(ctr) => p.push(OpId(ctr, a as usize)), + Ok(ctr) => p.push(OpId::new(ctr, a as usize)), Err(_e) => { return Err(DecodeColumnError::invalid_value( "counter", diff --git a/rust/automerge/src/columnar/encoding/properties.rs b/rust/automerge/src/columnar/encoding/properties.rs index a6345cad..a3bf1ed0 100644 --- a/rust/automerge/src/columnar/encoding/properties.rs +++ b/rust/automerge/src/columnar/encoding/properties.rs @@ -139,7 +139,7 @@ pub(crate) fn option_splice_scenario< } pub(crate) fn opid() -> impl Strategy + Clone { - (0..(i64::MAX as usize), 0..(i64::MAX as u64)).prop_map(|(actor, ctr)| OpId(ctr, actor)) + (0..(i64::MAX as usize), 0..(i64::MAX as u64)).prop_map(|(actor, ctr)| OpId::new(ctr, actor)) } pub(crate) fn elemid() -> impl Strategy + Clone { diff --git a/rust/automerge/src/op_set.rs b/rust/automerge/src/op_set.rs index 09bc256a..1f5a4486 100644 --- a/rust/automerge/src/op_set.rs +++ b/rust/automerge/src/op_set.rs @@ -55,7 +55,11 @@ impl OpSetInternal { if id == types::ROOT { ExId::Root } else { - ExId::Id(id.0, self.m.actors.cache[id.1].clone(), id.1) + ExId::Id( + id.counter(), + self.m.actors.cache[id.actor()].clone(), + id.actor(), + ) } } @@ -355,13 +359,7 @@ impl OpSetMetadata { } pub(crate) fn lamport_cmp(&self, left: OpId, right: OpId) -> Ordering { - match (left, right) { - (OpId(0, _), OpId(0, _)) => Ordering::Equal, - (OpId(0, _), OpId(_, _)) => Ordering::Less, - (OpId(_, _), OpId(0, _)) => Ordering::Greater, - (OpId(a, x), OpId(b, y)) if a == b => self.actors[x].cmp(&self.actors[y]), - (OpId(a, _), OpId(b, _)) => a.cmp(&b), - } + left.lamport_cmp(&right, &self.actors.cache) } pub(crate) fn sorted_opids>(&self, opids: I) -> OpIds { diff --git a/rust/automerge/src/op_tree.rs b/rust/automerge/src/op_tree.rs index 909a75a7..7de00dc3 100644 --- a/rust/automerge/src/op_tree.rs +++ b/rust/automerge/src/op_tree.rs @@ -325,7 +325,7 @@ mod tests { use super::*; fn op() -> Op { - let zero = OpId(0, 0); + let zero = OpId::new(0, 0); Op { id: zero, action: amp::OpType::Put(0.into()), diff --git a/rust/automerge/src/op_tree/iter.rs b/rust/automerge/src/op_tree/iter.rs index 5f2114c8..0b19f359 100644 --- a/rust/automerge/src/op_tree/iter.rs +++ b/rust/automerge/src/op_tree/iter.rs @@ -262,7 +262,7 @@ mod tests { fn op(counter: u64) -> Op { Op { action: OpType::Put(ScalarValue::Uint(counter)), - id: OpId(counter, 0), + id: OpId::new(counter, 0), key: Key::Map(0), succ: Default::default(), pred: Default::default(), diff --git a/rust/automerge/src/transaction/inner.rs b/rust/automerge/src/transaction/inner.rs index c9567b68..2099acef 100644 --- a/rust/automerge/src/transaction/inner.rs +++ b/rust/automerge/src/transaction/inner.rs @@ -240,7 +240,7 @@ impl TransactionInner { } fn next_id(&mut self) -> OpId { - OpId(self.start_op.get() + self.pending_ops() as u64, self.actor) + OpId::new(self.start_op.get() + self.pending_ops() as u64, self.actor) } fn next_insert(&mut self, key: Key, value: ScalarValue) -> Op { diff --git a/rust/automerge/src/types.rs b/rust/automerge/src/types.rs index b5da60d7..7bbf4353 100644 --- a/rust/automerge/src/types.rs +++ b/rust/automerge/src/types.rs @@ -3,10 +3,12 @@ use crate::legacy as amp; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::cmp::Eq; +use std::cmp::Ordering; use std::fmt; use std::fmt::Display; use std::str::FromStr; use tinyvec::{ArrayVec, TinyVec}; +//use crate::indexed_cache::IndexedCache; mod opids; pub(crate) use opids::OpIds; @@ -253,17 +255,6 @@ pub(crate) trait Exportable { fn export(&self) -> Export; } -impl OpId { - #[inline] - pub(crate) fn counter(&self) -> u64 { - self.0 - } - #[inline] - pub(crate) fn actor(&self) -> usize { - self.1 - } -} - impl Exportable for ObjId { fn export(&self) -> Export { if self.0 == ROOT { @@ -421,11 +412,28 @@ impl Key { } #[derive(Debug, Clone, PartialOrd, Ord, Eq, PartialEq, Copy, Hash, Default)] -pub(crate) struct OpId(pub(crate) u64, pub(crate) usize); +pub(crate) struct OpId(u32, u32); impl OpId { - pub(crate) fn new(actor: usize, counter: u64) -> Self { - Self(counter, actor) + pub(crate) fn new(counter: u64, actor: usize) -> Self { + Self(counter as u32, actor as u32) + } + + #[inline] + pub(crate) fn counter(&self) -> u64 { + self.0 as u64 + } + + #[inline] + pub(crate) fn actor(&self) -> usize { + self.1 as usize + } + + #[inline] + pub(crate) fn lamport_cmp(&self, other: &OpId, actors: &[ActorId]) -> Ordering { + self.0 + .cmp(&other.0) + .then_with(|| actors[self.1 as usize].cmp(&actors[other.1 as usize])) } } diff --git a/rust/automerge/src/types/opids.rs b/rust/automerge/src/types/opids.rs index 3ebac93c..eaeed471 100644 --- a/rust/automerge/src/types/opids.rs +++ b/rust/automerge/src/types/opids.rs @@ -129,7 +129,8 @@ mod tests { fn gen_opid(actors: Vec) -> impl Strategy { (0..actors.len()).prop_flat_map(|actor_idx| { - (Just(actor_idx), 0..u64::MAX).prop_map(|(actor_idx, counter)| OpId(counter, actor_idx)) + (Just(actor_idx), 0..u64::MAX) + .prop_map(|(actor_idx, counter)| OpId::new(counter, actor_idx)) }) } @@ -190,7 +191,7 @@ mod tests { (OpId(0, _), OpId(0, _)) => Ordering::Equal, (OpId(0, _), OpId(_, _)) => Ordering::Less, (OpId(_, _), OpId(0, _)) => Ordering::Greater, - (OpId(a, x), OpId(b, y)) if a == b => actors[*x].cmp(&actors[*y]), + (OpId(a, x), OpId(b, y)) if a == b => actors[*x as usize].cmp(&actors[*y as usize]), (OpId(a, _), OpId(b, _)) => a.cmp(b), } } From 3229548fc7393bf55a401e328ab677e14694522e Mon Sep 17 00:00:00 2001 From: Orion Henry Date: Sun, 11 Dec 2022 13:26:00 -0800 Subject: [PATCH 05/72] update js dependencies and some lint errors (#474) --- javascript/package.json | 20 +++++----- javascript/src/index.ts | 29 ++++++++------- javascript/src/proxies.ts | 61 ++++--------------------------- rust/automerge-wasm/package.json | 21 +++++------ rust/automerge-wasm/test/apply.ts | 6 +-- rust/automerge-wasm/test/test.ts | 16 ++++---- 6 files changed, 54 insertions(+), 99 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index 0dae9684..5fd2213e 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -45,21 +45,21 @@ }, "devDependencies": { "@types/expect": "^24.3.0", - "@types/mocha": "^9.1.1", - "@types/uuid": "^8.3.4", - "@typescript-eslint/eslint-plugin": "^5.25.0", - "@typescript-eslint/parser": "^5.25.0", - "eslint": "^8.15.0", + "@types/mocha": "^10.0.1", + "@types/uuid": "^9.0.0", + "@typescript-eslint/eslint-plugin": "^5.46.0", + "@typescript-eslint/parser": "^5.46.0", + "eslint": "^8.29.0", "fast-sha256": "^1.3.0", - "mocha": "^10.0.0", - "pako": "^2.0.4", + "mocha": "^10.2.0", + "pako": "^2.1.0", "ts-mocha": "^10.0.0", "ts-node": "^10.9.1", - "typedoc": "^0.23.16", - "typescript": "^4.6.4" + "typedoc": "^0.23.22", + "typescript": "^4.9.4" }, "dependencies": { "@automerge/automerge-wasm": "0.1.19", - "uuid": "^8.3" + "uuid": "^9.0.0" } } diff --git a/javascript/src/index.ts b/javascript/src/index.ts index 50306b4c..581f50d1 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -141,9 +141,9 @@ function importOpts(_actor?: ActorId | InitOptions): InitOptions { * random actor ID */ export function init(_opts?: ActorId | InitOptions): Doc { - let opts = importOpts(_opts) - let freeze = !!opts.freeze - let patchCallback = opts.patchCallback + const opts = importOpts(_opts) + const freeze = !!opts.freeze + const patchCallback = opts.patchCallback const handle = ApiHandler.create(opts.actor) handle.enablePatches(true) handle.enableFreeze(!!opts.freeze) @@ -170,7 +170,7 @@ export function init(_opts?: ActorId | InitOptions): Doc { export function view(doc: Doc, heads: Heads): Doc { const state = _state(doc) const handle = state.handle - return state.handle.materialize("/", heads, { ...state, handle, heads }) as any + return state.handle.materialize("/", heads, { ...state, handle, heads }) as Doc } /** @@ -291,9 +291,9 @@ function progressDocument(doc: Doc, heads: Heads | null, callback?: PatchC if (heads == null) { return doc } - let state = _state(doc) - let nextState = {...state, heads: undefined}; - let nextDoc = state.handle.applyPatches(doc, nextState, callback) + const state = _state(doc) + const nextState = {...state, heads: undefined}; + const nextDoc = state.handle.applyPatches(doc, nextState, callback) state.heads = heads return nextDoc } @@ -392,7 +392,7 @@ export function load(data: Uint8Array, _opts?: ActorId | InitOptions): Doc handle.enablePatches(true) handle.enableFreeze(!!opts.freeze) handle.registerDatatype("counter", (n) => new Counter(n)) - const doc: any = handle.materialize("/", undefined, {handle, heads: undefined, patchCallback}) as Doc + const doc = handle.materialize("/", undefined, {handle, heads: undefined, patchCallback}) as Doc return doc } @@ -599,7 +599,7 @@ export function getLastLocalChange(doc: Doc): Change | undefined { * This is useful to determine if something is actually an automerge document, * if `doc` is not an automerge document this will return null. */ -export function getObjectId(doc: any, prop?: Prop): ObjID | null { +export function getObjectId(doc: Doc, prop?: Prop): ObjID | null { if (prop) { const state = _state(doc, false) const objectId = _obj(doc) @@ -619,7 +619,6 @@ export function getObjectId(doc: any, prop?: Prop): ObjID | null { * Note that this will crash if there are changes in `oldState` which are not in `newState`. */ export function getChanges(oldState: Doc, newState: Doc): Change[] { - const o = _state(oldState) const n = _state(newState) return n.handle.getChanges(getHeads(oldState)) } @@ -709,8 +708,8 @@ export function encodeSyncState(state: SyncState): Uint8Array { * @group sync */ export function decodeSyncState(state: Uint8Array): SyncState { - let sync = ApiHandler.decodeSyncState(state) - let result = ApiHandler.exportSyncState(sync) + const sync = ApiHandler.decodeSyncState(state) + const result = ApiHandler.exportSyncState(sync) sync.free() return result } @@ -848,7 +847,11 @@ export function toJS(doc: Doc): T { } export function isAutomerge(doc: unknown): boolean { - return getObjectId(doc) === "_root" && !!Reflect.get(doc as Object, STATE) + if (typeof doc == "object" && doc !== null) { + return getObjectId(doc) === "_root" && !!Reflect.get(doc, STATE) + } else { + return false + } } function isObject(obj: unknown): obj is Record { diff --git a/javascript/src/proxies.ts b/javascript/src/proxies.ts index 6c0035de..ff03be4d 100644 --- a/javascript/src/proxies.ts +++ b/javascript/src/proxies.ts @@ -3,7 +3,7 @@ import { Automerge, Heads, ObjID } from "@automerge/automerge-wasm" import { Prop } from "@automerge/automerge-wasm" import { AutomergeValue, ScalarValue, MapValue, ListValue } from "./types" import { Counter, getWriteableCounter } from "./counter" -import { STATE, TRACE, IS_PROXY, OBJECT_ID, COUNTER, INT, UINT, F64, TEXT } from "./constants" +import { STATE, TRACE, IS_PROXY, OBJECT_ID, COUNTER, INT, UINT, F64 } from "./constants" function parseListIndex(key) { if (typeof key === 'string' && /^[0-9]+$/.test(key)) key = parseInt(key, 10) @@ -95,7 +95,7 @@ function import_value(value) { const MapHandler = { get (target, key) : AutomergeValue { - const { context, objectId, readonly, frozen, heads, cache } = target + const { context, objectId, cache } = target if (key === Symbol.toStringTag) { return target[Symbol.toStringTag] } if (key === OBJECT_ID) return objectId if (key === IS_PROXY) return true @@ -187,7 +187,7 @@ const MapHandler = { const ListHandler = { get (target, index) { - const {context, objectId, readonly, frozen, heads } = target + const {context, objectId, heads } = target index = parseListIndex(index) if (index === Symbol.hasInstance) { return (instance) => { return Array.isArray(instance) } } if (index === Symbol.toStringTag) { return target[Symbol.toStringTag] } @@ -236,11 +236,10 @@ const ListHandler = { break; } case "text": { - let text if (index >= context.length(objectId)) { - text = context.insertObject(objectId, index, value, "text") + context.insertObject(objectId, index, value, "text") } else { - text = context.putObject(objectId, index, value, "text") + context.putObject(objectId, index, value, "text") } break; } @@ -534,7 +533,7 @@ function listMethods(target) { find(f: (AutomergeValue, number) => boolean) : AutomergeValue | undefined { let index = 0 - for (let v of this) { + for (const v of this) { if (f(v, index)) { return v } @@ -544,7 +543,7 @@ function listMethods(target) { findIndex(f: (AutomergeValue, number) => boolean) : number { let index = 0 - for (let v of this) { + for (const v of this) { if (f(v, index)) { return index } @@ -582,7 +581,7 @@ function listMethods(target) { some(f: (AutomergeValue, number) => boolean) : boolean { let index = 0; - for (let v of this) { + for (const v of this) { if (f(v,index)) { return true } @@ -604,47 +603,3 @@ function listMethods(target) { return methods } -function textMethods(target) { - const {context, objectId, heads } = target - const methods = { - set (index: number, value) { - return this[index] = value - }, - get (index: number) : AutomergeValue { - return this[index] - }, - toString () : string { - return context.text(objectId, heads).replace(//g,'') - }, - toSpans () : AutomergeValue[] { - const spans : AutomergeValue[] = [] - let chars = '' - const length = context.length(objectId) - for (let i = 0; i < length; i++) { - const value = this[i] - if (typeof value === 'string') { - chars += value - } else { - if (chars.length > 0) { - spans.push(chars) - chars = '' - } - spans.push(value) - } - } - if (chars.length > 0) { - spans.push(chars) - } - return spans - }, - toJSON () : string { - return this.toString() - }, - indexOf(o, start = 0) { - const text = context.text(objectId) - return text.indexOf(o,start) - } - } - return methods -} - diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 45e7950e..7c02d820 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -40,21 +40,18 @@ "test": "ts-mocha -p tsconfig.json --type-check --bail --full-trace test/*.ts" }, "devDependencies": { - "@types/expect": "^24.3.0", - "@types/jest": "^27.4.0", - "@types/mocha": "^9.1.0", - "@types/node": "^17.0.13", - "@types/uuid": "^8.3.4", - "@typescript-eslint/eslint-plugin": "^5.25.0", - "@typescript-eslint/parser": "^5.25.0", + "@types/mocha": "^10.0.1", + "@types/node": "^18.11.13", + "@typescript-eslint/eslint-plugin": "^5.46.0", + "@typescript-eslint/parser": "^5.46.0", "cross-env": "^7.0.3", - "eslint": "^8.16.0", + "eslint": "^8.29.0", "fast-sha256": "^1.3.0", - "mocha": "^9.1.3", - "pako": "^2.0.4", + "mocha": "^10.2.0", + "pako": "^2.1.0", "rimraf": "^3.0.2", - "ts-mocha": "^9.0.2", - "typescript": "^4.6.4" + "ts-mocha": "^10.0.0", + "typescript": "^4.9.4" }, "exports": { "browser": "./bundler/automerge_wasm.js", diff --git a/rust/automerge-wasm/test/apply.ts b/rust/automerge-wasm/test/apply.ts index c96ad75c..d4b8c95e 100644 --- a/rust/automerge-wasm/test/apply.ts +++ b/rust/automerge-wasm/test/apply.ts @@ -164,7 +164,7 @@ describe('Automerge', () => { it('should set the OBJECT_ID property on lists, maps, and text objects and not on scalars', () => { const doc1 = create('aaaa') - let mat: any = doc1.materialize("/") + const mat: any = doc1.materialize("/") doc1.enablePatches(true) doc1.registerDatatype("counter", (n: number) => new Counter(n)) doc1.put("/", "string", "string", "str") @@ -194,11 +194,11 @@ describe('Automerge', () => { it('should set the root OBJECT_ID to "_root"', () => { const doc1 = create('aaaa') - let mat: any = doc1.materialize("/") + const mat: any = doc1.materialize("/") assert.equal(_obj(mat), "_root") doc1.enablePatches(true) doc1.put("/", "key", "value") - let applied = doc1.applyPatches(mat) + const applied = doc1.applyPatches(mat) assert.equal(_obj(applied), "_root") }) diff --git a/rust/automerge-wasm/test/test.ts b/rust/automerge-wasm/test/test.ts index 64690b90..70b56c55 100644 --- a/rust/automerge-wasm/test/test.ts +++ b/rust/automerge-wasm/test/test.ts @@ -1953,7 +1953,7 @@ describe('Automerge', () => { assert.deepEqual(doc.length("/width2"), 12); assert.deepEqual(doc.length("/mixed"), 9); - let heads1 = doc.getHeads(); + const heads1 = doc.getHeads(); mat = doc.applyPatches(mat) @@ -2013,7 +2013,7 @@ describe('Automerge', () => { }) it('can handle non-characters embedded in text', () => { - let change : any = { + const change : any = { ops: [ { action: 'makeText', obj: '_root', key: 'bad_text', pred: [] }, { action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'A', pred: [] }, @@ -2030,7 +2030,7 @@ describe('Automerge', () => { } const doc = load(encodeChange(change)); doc.enablePatches(true) - let mat : any = doc.materialize("/") + const mat : any = doc.materialize("/") // multi - char strings appear as a span of strings // non strings appear as an object replacement unicode char @@ -2039,27 +2039,27 @@ describe('Automerge', () => { assert.deepEqual(doc.materialize("/bad_text"), 'ABBBBBC') // deleting in the middle of a multi-byte character will delete the whole thing - let doc1 = doc.fork() + const doc1 = doc.fork() doc1.splice("/bad_text", 3, 3, "X"); assert.deepEqual(doc1.text("/bad_text"), 'AXC') // deleting in the middle of a multi-byte character will delete the whole thing // and characters past its end - let doc2 = doc.fork() + const doc2 = doc.fork() doc2.splice("/bad_text", 3, 4, "X"); assert.deepEqual(doc2.text("/bad_text"), 'AXC') - let doc3 = doc.fork() + const doc3 = doc.fork() doc3.splice("/bad_text", 3, 5, "X"); assert.deepEqual(doc3.text("/bad_text"), 'AX') // inserting in the middle of a mutli-bytes span inserts after - let doc4 = doc.fork() + const doc4 = doc.fork() doc4.splice("/bad_text", 3, 0, "X"); assert.deepEqual(doc4.text("/bad_text"), 'ABBBBBXC') // deleting into the middle of a multi-byte span deletes the whole thing - let doc5 = doc.fork() + const doc5 = doc.fork() doc5.splice("/bad_text", 0, 2, "X"); assert.deepEqual(doc5.text("/bad_text"), 'XC') From e75ca2a8342b99b68a12e1471393afd585636c49 Mon Sep 17 00:00:00 2001 From: patryk Date: Wed, 14 Dec 2022 12:41:21 +0100 Subject: [PATCH 06/72] Update README.md (Update Slack invite link) (#475) Slack invite link updated to the one used on the website, as the current one returns "This link is no longer active". --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b2037c13..d11e9d1c 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ If you're familiar with CRDTs and interested in the design of Automerge in particular take a look at https://automerge.org/docs/how-it-works/backend/ Finally, if you want to talk to us about this project please [join the -Slack](https://join.slack.com/t/automerge/shared_invite/zt-1ho1ieas2-DnWZcRR82BRu65vCD4t3Xw) +Slack](https://join.slack.com/t/automerge/shared_invite/zt-e4p3760n-kKh7r3KRH1YwwNfiZM8ktw) ## Status From 6dad2b7df16a31b5f9c02d46b18cd5a89f8e10ea Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 14 Dec 2022 10:34:22 -0700 Subject: [PATCH 07/72] Don't panic on invalid gzip stream (#477) * Don't panic on invalid gzip stream Before this change automerge-rs would panic if the gzip data in a raw column was invalid; after this change the error is propagated to the caller correctly. --- .../src/storage/columns/raw_column.rs | 18 ++-- rust/automerge/src/storage/document.rs | 3 +- .../src/storage/document/compression.rs | 94 +++++++++++-------- rust/automerge/tests/test.rs | 14 +++ 4 files changed, 84 insertions(+), 45 deletions(-) diff --git a/rust/automerge/src/storage/columns/raw_column.rs b/rust/automerge/src/storage/columns/raw_column.rs index 053c3c75..808b53cf 100644 --- a/rust/automerge/src/storage/columns/raw_column.rs +++ b/rust/automerge/src/storage/columns/raw_column.rs @@ -73,15 +73,19 @@ impl RawColumn { } } - fn decompress(&self, input: &[u8], out: &mut Vec) -> (ColumnSpec, usize) { + fn decompress( + &self, + input: &[u8], + out: &mut Vec, + ) -> Result<(ColumnSpec, usize), ParseError> { let len = if self.spec.deflate() { let mut inflater = flate2::bufread::DeflateDecoder::new(&input[self.data.clone()]); - inflater.read_to_end(out).unwrap() + inflater.read_to_end(out).map_err(ParseError::Deflate)? } else { out.extend(&input[self.data.clone()]); self.data.len() }; - (self.spec.inflated(), len) + Ok((self.spec.inflated(), len)) } } @@ -140,7 +144,7 @@ impl RawColumns { &self, input: &[u8], out: &mut Vec, - ) -> RawColumns { + ) -> Result, ParseError> { let mut result = Vec::with_capacity(self.0.len()); let mut start = 0; for col in &self.0 { @@ -148,7 +152,7 @@ impl RawColumns { out.extend(&input[decomp.data.clone()]); (decomp.spec, decomp.data.len()) } else { - col.decompress(input, out) + col.decompress(input, out)? }; result.push(RawColumn { spec, @@ -157,7 +161,7 @@ impl RawColumns { }); start += len; } - RawColumns(result) + Ok(RawColumns(result)) } } @@ -193,6 +197,8 @@ pub(crate) enum ParseError { NotInNormalOrder, #[error(transparent)] Leb128(#[from] parse::leb128::Error), + #[error(transparent)] + Deflate(#[from] std::io::Error), } impl RawColumns { diff --git a/rust/automerge/src/storage/document.rs b/rust/automerge/src/storage/document.rs index 500fbe85..ecef0bfd 100644 --- a/rust/automerge/src/storage/document.rs +++ b/rust/automerge/src/storage/document.rs @@ -173,7 +173,8 @@ impl<'a> Document<'a> { raw_columns: ops_meta, }, extra_args: (), - }); + }) + .map_err(|e| parse::ParseError::Error(ParseError::RawColumns(e)))?; let ops_layout = Columns::parse(op_bytes.len(), ops.iter()).map_err(|e| { parse::ParseError::Error(ParseError::BadColumnLayout { diff --git a/rust/automerge/src/storage/document/compression.rs b/rust/automerge/src/storage/document/compression.rs index f7daa127..2f0e96ce 100644 --- a/rust/automerge/src/storage/document/compression.rs +++ b/rust/automerge/src/storage/document/compression.rs @@ -1,6 +1,9 @@ -use std::{borrow::Cow, ops::Range}; +use std::{borrow::Cow, convert::Infallible, ops::Range}; -use crate::storage::{columns::compression, shift_range, ChunkType, Header, RawColumns}; +use crate::storage::{ + columns::{compression, raw_column}, + shift_range, ChunkType, Header, RawColumns, +}; pub(super) struct Args<'a, T: compression::ColumnCompression, DirArgs> { /// The original data of the entire document chunk (compressed or uncompressed) @@ -23,40 +26,50 @@ pub(super) struct CompressArgs { } /// Compress a document chunk returning the compressed bytes -pub(super) fn compress<'a>(args: Args<'a, compression::Uncompressed, CompressArgs>) -> Vec { +pub(super) fn compress(args: Args<'_, compression::Uncompressed, CompressArgs>) -> Vec { let header_len = args.extra_args.original_header_len; let threshold = args.extra_args.threshold; - Compression::<'a, Compressing, _>::new( - args, - Compressing { - threshold, - header_len, - }, - ) - .changes() - .ops() - .write_data() - .finish() + // Wrap in a closure so we can use `?` in the construction but still force the compiler + // to check that the error type is `Infallible` + let result: Result<_, Infallible> = (|| { + Ok(Compression::::new( + args, + Compressing { + threshold, + header_len, + }, + ) + .changes()? + .ops()? + .write_data() + .finish()) + })(); + // We just checked the error is `Infallible` so unwrap is fine + result.unwrap() } -pub(super) fn decompress<'a>(args: Args<'a, compression::Unknown, ()>) -> Decompressed<'a> { +pub(super) fn decompress<'a>( + args: Args<'a, compression::Unknown, ()>, +) -> Result, raw_column::ParseError> { match ( args.changes.raw_columns.uncompressed(), args.ops.raw_columns.uncompressed(), ) { - (Some(changes), Some(ops)) => Decompressed { + (Some(changes), Some(ops)) => Ok(Decompressed { changes, ops, compressed: None, uncompressed: args.original, change_bytes: args.changes.data, op_bytes: args.ops.data, - }, - _ => Compression::<'a, Decompressing, _>::new(args, Decompressing) - .changes() - .ops() - .write_data() - .finish(), + }), + _ => Ok( + Compression::<'a, Decompressing, _>::new(args, Decompressing) + .changes()? + .ops()? + .write_data() + .finish(), + ), } } @@ -94,6 +107,7 @@ pub(super) struct Cols { trait Direction: std::fmt::Debug { type Out: compression::ColumnCompression; type In: compression::ColumnCompression; + type Error; type Args; /// This method represents the (de)compression process for a direction. The arguments are: @@ -108,7 +122,7 @@ trait Direction: std::fmt::Debug { input: &[u8], out: &mut Vec, meta_out: &mut Vec, - ) -> Cols; + ) -> Result, Self::Error>; } #[derive(Debug)] struct Compressing { @@ -117,6 +131,7 @@ struct Compressing { } impl Direction for Compressing { + type Error = Infallible; type Out = compression::Unknown; type In = compression::Uncompressed; type Args = CompressArgs; @@ -127,16 +142,16 @@ impl Direction for Compressing { input: &[u8], out: &mut Vec, meta_out: &mut Vec, - ) -> Cols { + ) -> Result, Self::Error> { let start = out.len(); let raw_columns = cols .raw_columns .compress(&input[cols.data.clone()], out, self.threshold); raw_columns.write(meta_out); - Cols { + Ok(Cols { data: start..out.len(), raw_columns, - } + }) } } @@ -144,6 +159,7 @@ impl Direction for Compressing { struct Decompressing; impl Direction for Decompressing { + type Error = raw_column::ParseError; type Out = compression::Uncompressed; type In = compression::Unknown; type Args = (); @@ -154,14 +170,16 @@ impl Direction for Decompressing { input: &[u8], out: &mut Vec, meta_out: &mut Vec, - ) -> Cols { + ) -> Result, raw_column::ParseError> { let start = out.len(); - let raw_columns = cols.raw_columns.uncompress(&input[cols.data.clone()], out); + let raw_columns = cols + .raw_columns + .uncompress(&input[cols.data.clone()], out)?; raw_columns.write(meta_out); - Cols { + Ok(Cols { data: start..out.len(), raw_columns, - } + }) } } @@ -233,7 +251,7 @@ impl<'a, D: Direction> Compression<'a, D, Starting> { } impl<'a, D: Direction> Compression<'a, D, Starting> { - fn changes(self) -> Compression<'a, D, Changes> { + fn changes(self) -> Result>, D::Error> { let Starting { mut data_out, mut meta_out, @@ -243,8 +261,8 @@ impl<'a, D: Direction> Compression<'a, D, Starting> { &self.args.original, &mut data_out, &mut meta_out, - ); - Compression { + )?; + Ok(Compression { args: self.args, direction: self.direction, state: Changes { @@ -252,12 +270,12 @@ impl<'a, D: Direction> Compression<'a, D, Starting> { meta_out, data_out, }, - } + }) } } impl<'a, D: Direction> Compression<'a, D, Changes> { - fn ops(self) -> Compression<'a, D, ChangesAndOps> { + fn ops(self) -> Result>, D::Error> { let Changes { change_cols, mut meta_out, @@ -268,8 +286,8 @@ impl<'a, D: Direction> Compression<'a, D, Changes> { &self.args.original, &mut data_out, &mut meta_out, - ); - Compression { + )?; + Ok(Compression { args: self.args, direction: self.direction, state: ChangesAndOps { @@ -278,7 +296,7 @@ impl<'a, D: Direction> Compression<'a, D, Changes> { meta_out, data_out, }, - } + }) } } diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index 876acb74..c1b653d3 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -1397,3 +1397,17 @@ fn ops_on_wrong_objets() -> Result<(), AutomergeError> { assert_eq!(e6, Err(AutomergeError::InvalidOp(ObjType::Text))); Ok(()) } + +#[test] +fn invalid_deflate_stream() { + let bytes: [u8; 123] = [ + 133, 111, 74, 131, 48, 48, 48, 48, 0, 113, 1, 16, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 1, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 6, 1, 2, 3, 2, 32, 2, 48, + 2, 49, 2, 49, 2, 8, 32, 4, 33, 2, 48, 2, 49, 1, 49, 2, 57, 2, 87, 3, 128, 1, 2, 127, 0, + 127, 1, 127, 1, 127, 0, 127, 0, 127, 7, 127, 2, 102, 122, 127, 0, 127, 1, 1, 127, 1, 127, + 54, 239, 191, 189, 127, 0, 0, + ]; + + assert!(Automerge::load(&bytes).is_err()); +} From 8aff1296b99b46f9ba08c833f9c77c5e0763a968 Mon Sep 17 00:00:00 2001 From: alexjg Date: Wed, 14 Dec 2022 18:06:19 +0000 Subject: [PATCH 08/72] automerge-cli: remove a bunch of bad dependencies (#478) Automerge CLI depends transitively (via and old version of `clap` and via `colored_json` on `atty` and `ansi_term`. These crates are both marked as unmaintained and this generates irritating `cargo deny` messages. To avoid this, implement colored JSON ourselves using the `termcolor` crate - colored JSON is pretty mechanical. Also update criterion and cbindgen dependencies and ignore the criterion tree in deny.toml as we only ever use it in benchmarks. All that's left now is a warning about atty in cbindgen, we'll just have to wait for cbindgen to fix that, it's a build time dependency anyway so it's not really an issue. --- rust/automerge-c/Cargo.toml | 2 +- rust/automerge-cli/Cargo.toml | 7 +- rust/automerge-cli/src/color_json.rs | 348 +++++++++++++++++++++++++++ rust/automerge-cli/src/examine.rs | 4 +- rust/automerge-cli/src/export.rs | 4 +- rust/automerge-cli/src/main.rs | 24 +- rust/automerge/Cargo.toml | 2 +- rust/deny.toml | 16 +- rust/edit-trace/Cargo.toml | 2 +- 9 files changed, 375 insertions(+), 34 deletions(-) create mode 100644 rust/automerge-cli/src/color_json.rs diff --git a/rust/automerge-c/Cargo.toml b/rust/automerge-c/Cargo.toml index 851a3470..d039e460 100644 --- a/rust/automerge-c/Cargo.toml +++ b/rust/automerge-c/Cargo.toml @@ -19,4 +19,4 @@ libc = "^0.2" smol_str = "^0.1.21" [build-dependencies] -cbindgen = "^0.20" +cbindgen = "^0.24" diff --git a/rust/automerge-cli/Cargo.toml b/rust/automerge-cli/Cargo.toml index f434bc69..430090a6 100644 --- a/rust/automerge-cli/Cargo.toml +++ b/rust/automerge-cli/Cargo.toml @@ -13,17 +13,18 @@ bench = false doc = false [dependencies] -clap = {version = "~3.1", features = ["derive"]} +clap = {version = "~4", features = ["derive"]} serde_json = "^1.0" anyhow = "1.0" -atty = "^0.2" thiserror = "^1.0" combine = "^4.5" maplit = "^1.0" -colored_json = "^2.1" tracing-subscriber = "~0.3" automerge = { path = "../automerge" } +is-terminal = "0.4.1" +termcolor = "1.1.3" +serde = "1.0.150" [dev-dependencies] duct = "^0.13" diff --git a/rust/automerge-cli/src/color_json.rs b/rust/automerge-cli/src/color_json.rs new file mode 100644 index 00000000..1d175026 --- /dev/null +++ b/rust/automerge-cli/src/color_json.rs @@ -0,0 +1,348 @@ +use std::io::Write; + +use serde::Serialize; +use serde_json::ser::Formatter; +use termcolor::{Buffer, BufferWriter, Color, ColorSpec, WriteColor}; + +struct Style { + /// style of object brackets + object_brackets: ColorSpec, + /// style of array brackets + array_brackets: ColorSpec, + /// style of object + key: ColorSpec, + /// style of string values + string_value: ColorSpec, + /// style of integer values + integer_value: ColorSpec, + /// style of float values + float_value: ColorSpec, + /// style of bool values + bool_value: ColorSpec, + /// style of the `nil` value + nil_value: ColorSpec, + /// should the quotation get the style of the inner string/key? + string_include_quotation: bool, +} + +impl Default for Style { + fn default() -> Self { + Self { + object_brackets: ColorSpec::new().set_bold(true).clone(), + array_brackets: ColorSpec::new().set_bold(true).clone(), + key: ColorSpec::new() + .set_fg(Some(Color::Blue)) + .set_bold(true) + .clone(), + string_value: ColorSpec::new().set_fg(Some(Color::Green)).clone(), + integer_value: ColorSpec::new(), + float_value: ColorSpec::new(), + bool_value: ColorSpec::new(), + nil_value: ColorSpec::new(), + string_include_quotation: true, + } + } +} + +/// Write pretty printed, colored json to stdout +pub(crate) fn print_colored_json(value: &serde_json::Value) -> std::io::Result<()> { + let formatter = ColoredFormatter { + formatter: serde_json::ser::PrettyFormatter::new(), + style: Style::default(), + in_object_key: false, + }; + let mut ignored_writer = Vec::new(); + let mut ser = serde_json::Serializer::with_formatter(&mut ignored_writer, formatter); + value + .serialize(&mut ser) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string())) +} + +struct ColoredFormatter { + formatter: F, + style: Style, + in_object_key: bool, +} + +fn write_colored(color: ColorSpec, handler: H) -> std::io::Result<()> +where + H: FnOnce(&mut Buffer) -> std::io::Result<()>, +{ + let buf = BufferWriter::stdout(termcolor::ColorChoice::Auto); + let mut buffer = buf.buffer(); + buffer.set_color(&color)?; + handler(&mut buffer)?; + buffer.reset()?; + buf.print(&buffer)?; + Ok(()) +} + +impl Formatter for ColoredFormatter { + fn write_null(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.nil_value.clone(), |w| { + self.formatter.write_null(w) + }) + } + + fn write_bool(&mut self, _writer: &mut W, value: bool) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.bool_value.clone(), |w| { + self.formatter.write_bool(w, value) + }) + } + + fn write_i8(&mut self, _writer: &mut W, value: i8) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_i8(w, value) + }) + } + + fn write_i16(&mut self, _writer: &mut W, value: i16) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_i16(w, value) + }) + } + + fn write_i32(&mut self, _writer: &mut W, value: i32) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_i32(w, value) + }) + } + + fn write_i64(&mut self, _writer: &mut W, value: i64) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_i64(w, value) + }) + } + + fn write_u8(&mut self, _writer: &mut W, value: u8) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_u8(w, value) + }) + } + + fn write_u16(&mut self, _writer: &mut W, value: u16) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_u16(w, value) + }) + } + + fn write_u32(&mut self, _writer: &mut W, value: u32) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_u32(w, value) + }) + } + + fn write_u64(&mut self, _writer: &mut W, value: u64) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_u64(w, value) + }) + } + + fn write_f32(&mut self, _writer: &mut W, value: f32) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.float_value.clone(), |w| { + self.formatter.write_f32(w, value) + }) + } + + fn write_f64(&mut self, _writer: &mut W, value: f64) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.float_value.clone(), |w| { + self.formatter.write_f64(w, value) + }) + } + + fn write_number_str(&mut self, _writer: &mut W, value: &str) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_number_str(w, value) + }) + } + + fn begin_string(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + if self.style.string_include_quotation { + let style = if self.in_object_key { + &self.style.key + } else { + &self.style.string_value + }; + write_colored(style.clone(), |w| self.formatter.begin_string(w)) + } else { + self.formatter.begin_string(_writer) + } + } + + fn end_string(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + if self.style.string_include_quotation { + let style = if self.in_object_key { + &self.style.key + } else { + &self.style.string_value + }; + write_colored(style.clone(), |w| self.formatter.end_string(w)) + } else { + self.formatter.end_string(_writer) + } + } + + fn write_string_fragment(&mut self, _writer: &mut W, fragment: &str) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + let style = if self.in_object_key { + &self.style.key + } else { + &self.style.string_value + }; + write_colored(style.clone(), |w| w.write_all(fragment.as_bytes())) + } + + fn write_char_escape( + &mut self, + _writer: &mut W, + char_escape: serde_json::ser::CharEscape, + ) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + let style = if self.in_object_key { + &self.style.key + } else { + &self.style.string_value + }; + write_colored(style.clone(), |w| { + self.formatter.write_char_escape(w, char_escape) + }) + } + + fn begin_array(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.array_brackets.clone(), |w| { + self.formatter.begin_array(w) + }) + } + + fn end_array(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.array_brackets.clone(), |w| { + self.formatter.end_array(w) + }) + } + + fn begin_array_value(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.formatter.begin_array_value(writer, first) + } + + fn end_array_value(&mut self, writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.formatter.end_array_value(writer) + } + + fn begin_object(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.object_brackets.clone(), |w| { + self.formatter.begin_object(w) + }) + } + + fn end_object(&mut self, _writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.object_brackets.clone(), |w| { + self.formatter.end_object(w) + }) + } + + fn begin_object_key(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.in_object_key = true; + self.formatter.begin_object_key(writer, first) + } + + fn end_object_key(&mut self, writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.in_object_key = false; + self.formatter.end_object_key(writer) + } + + fn begin_object_value(&mut self, writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.in_object_key = false; + self.formatter.begin_object_value(writer) + } + + fn end_object_value(&mut self, writer: &mut W) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.in_object_key = false; + self.formatter.end_object_value(writer) + } + + fn write_raw_fragment(&mut self, writer: &mut W, fragment: &str) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + self.formatter.write_raw_fragment(writer, fragment) + } +} diff --git a/rust/automerge-cli/src/examine.rs b/rust/automerge-cli/src/examine.rs index 847abd4f..0b8946d4 100644 --- a/rust/automerge-cli/src/examine.rs +++ b/rust/automerge-cli/src/examine.rs @@ -1,6 +1,8 @@ use automerge as am; use thiserror::Error; +use crate::color_json::print_colored_json; + #[derive(Error, Debug)] pub enum ExamineError { #[error("Error reading change file: {:?}", source)] @@ -39,7 +41,7 @@ pub fn examine( .collect(); if is_tty { let json_changes = serde_json::to_value(uncompressed_changes).unwrap(); - colored_json::write_colored_json(&json_changes, &mut output).unwrap(); + print_colored_json(&json_changes).unwrap(); writeln!(output).unwrap(); } else { let json_changes = serde_json::to_string_pretty(&uncompressed_changes).unwrap(); diff --git a/rust/automerge-cli/src/export.rs b/rust/automerge-cli/src/export.rs index 49cded8f..1d4d7965 100644 --- a/rust/automerge-cli/src/export.rs +++ b/rust/automerge-cli/src/export.rs @@ -1,6 +1,8 @@ use anyhow::Result; use automerge as am; +use crate::color_json::print_colored_json; + pub(crate) fn map_to_json(doc: &am::Automerge, obj: &am::ObjId) -> serde_json::Value { let keys = doc.keys(obj); let mut map = serde_json::Map::new(); @@ -84,7 +86,7 @@ pub fn export_json( let state_json = get_state_json(input_data)?; if is_tty { - colored_json::write_colored_json(&state_json, &mut writer).unwrap(); + print_colored_json(&state_json).unwrap(); writeln!(writer).unwrap(); } else { writeln!( diff --git a/rust/automerge-cli/src/main.rs b/rust/automerge-cli/src/main.rs index ffc13012..b16d9449 100644 --- a/rust/automerge-cli/src/main.rs +++ b/rust/automerge-cli/src/main.rs @@ -2,8 +2,10 @@ use std::{fs::File, path::PathBuf, str::FromStr}; use anyhow::{anyhow, Result}; use clap::Parser; +use is_terminal::IsTerminal; //mod change; +mod color_json; mod examine; mod export; mod import; @@ -16,7 +18,7 @@ struct Opts { cmd: Command, } -#[derive(Debug)] +#[derive(clap::ValueEnum, Clone, Debug)] enum ExportFormat { Json, Toml, @@ -43,11 +45,10 @@ enum Command { format: ExportFormat, /// Path that contains Automerge changes - #[clap(parse(from_os_str))] changes_file: Option, /// The file to write to. If omitted assumes stdout - #[clap(parse(from_os_str), long("out"), short('o'))] + #[clap(long("out"), short('o'))] output_file: Option, }, @@ -56,11 +57,10 @@ enum Command { #[clap(long, short, default_value = "json")] format: ExportFormat, - #[clap(parse(from_os_str))] input_file: Option, /// Path to write Automerge changes to - #[clap(parse(from_os_str), long("out"), short('o'))] + #[clap(long("out"), short('o'))] changes_file: Option, }, @@ -94,11 +94,10 @@ enum Command { script: String, /// The file to change, if omitted will assume stdin - #[clap(parse(from_os_str))] input_file: Option, /// Path to write Automerge changes to, if omitted will write to stdout - #[clap(parse(from_os_str), long("out"), short('o'))] + #[clap(long("out"), short('o'))] output_file: Option, }, @@ -108,15 +107,16 @@ enum Command { /// Read one or more automerge documents and output a merged, compacted version of them Merge { /// The file to write to. If omitted assumes stdout - #[clap(parse(from_os_str), long("out"), short('o'))] + #[clap(long("out"), short('o'))] output_file: Option, + /// The file(s) to compact. If empty assumes stdin input: Vec, }, } fn open_file_or_stdin(maybe_path: Option) -> Result> { - if atty::is(atty::Stream::Stdin) { + if std::io::stdin().is_terminal() { if let Some(path) = maybe_path { Ok(Box::new(File::open(&path).unwrap())) } else { @@ -130,7 +130,7 @@ fn open_file_or_stdin(maybe_path: Option) -> Result) -> Result> { - if atty::is(atty::Stream::Stdout) { + if std::io::stdout().is_terminal() { if let Some(path) = maybe_path { Ok(Box::new(File::create(&path).unwrap())) } else { @@ -158,7 +158,7 @@ fn main() -> Result<()> { match format { ExportFormat::Json => { let mut in_buffer = open_file_or_stdin(changes_file)?; - export::export_json(&mut in_buffer, output, atty::is(atty::Stream::Stdout)) + export::export_json(&mut in_buffer, output, std::io::stdout().is_terminal()) } ExportFormat::Toml => unimplemented!(), } @@ -191,7 +191,7 @@ fn main() -> Result<()> { Command::Examine { input_file } => { let in_buffer = open_file_or_stdin(input_file)?; let out_buffer = std::io::stdout(); - match examine::examine(in_buffer, out_buffer, atty::is(atty::Stream::Stdout)) { + match examine::examine(in_buffer, out_buffer, std::io::stdout().is_terminal()) { Ok(()) => {} Err(e) => { eprintln!("Error: {:?}", e); diff --git a/rust/automerge/Cargo.toml b/rust/automerge/Cargo.toml index 8872dcdc..89b48020 100644 --- a/rust/automerge/Cargo.toml +++ b/rust/automerge/Cargo.toml @@ -42,7 +42,7 @@ pretty_assertions = "1.0.0" proptest = { version = "^1.0.0", default-features = false, features = ["std"] } serde_json = { version = "^1.0.73", features=["float_roundtrip"], default-features=true } maplit = { version = "^1.0" } -criterion = "0.3.5" +criterion = "0.4.0" test-log = { version = "0.2.10", features=["trace"], default-features = false} tracing-subscriber = {version = "0.3.9", features = ["fmt", "env-filter"] } automerge-test = { path = "../automerge-test" } diff --git a/rust/deny.toml b/rust/deny.toml index f6985357..54a68a60 100644 --- a/rust/deny.toml +++ b/rust/deny.toml @@ -46,7 +46,6 @@ notice = "warn" # output a note when they are encountered. ignore = [ #"RUSTSEC-0000-0000", - "RUSTSEC-2021-0127", # serde_cbor is unmaintained, but we only use it in criterion for benchmarks ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories @@ -100,10 +99,6 @@ confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ - # this is a LGPL like license in the CLI - # since this is an application not a library people would link to it should be fine - { allow = ["EPL-2.0"], name = "colored_json" }, - # The Unicode-DFS--2016 license is necessary for unicode-ident because they # use data from the unicode tables to generate the tables which are # included in the application. We do not distribute those data files so @@ -177,21 +172,14 @@ deny = [ ] # Certain crates/versions that will be skipped when doing duplicate detection. skip = [ - # These are transitive depdendencies of criterion, which is only included for benchmarking anyway - { name = "itoa", version = "0.4.8" }, - { name = "textwrap", version = "0.11.0" }, - { name = "clap", version = "2.34.0" }, - - # These are transitive depdendencies of cbindgen - { name = "strsim", version = "0.8.0" }, - { name = "heck", version = "0.3.3" }, ] # Similarly to `skip` allows you to skip certain crates during duplicate # detection. Unlike skip, it also includes the entire tree of transitive # dependencies starting at the specified crate, up to a certain depth, which is # by default infinite skip-tree = [ - #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, + # // We only ever use criterion in benchmarks + { name = "criterion", version = "0.4.0", depth=10}, ] # This section is considered when running `cargo deny check sources`. diff --git a/rust/edit-trace/Cargo.toml b/rust/edit-trace/Cargo.toml index 0107502b..eaebde46 100644 --- a/rust/edit-trace/Cargo.toml +++ b/rust/edit-trace/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" [dependencies] automerge = { path = "../automerge" } -criterion = "0.3.5" +criterion = "0.4.0" json = "0.12.4" rand = "^0.8" From 0f90fe4d02095713dbfd5c1767bcfa03087a4b97 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Mon, 19 Dec 2022 10:43:56 +0000 Subject: [PATCH 09/72] Add a method for loading a document without verifying heads This is primarily useful when debugging documents which have been corrupted somehow so you would like to see the ops even if you can't trust them. Note that this is _not_ currently useful for performance reasons as the hash graph is still constructed, just not verified. --- rust/automerge/src/automerge.rs | 15 +++++-- rust/automerge/src/storage.rs | 1 + rust/automerge/src/storage/load.rs | 4 +- .../src/storage/load/reconstruct_document.rs | 41 ++++++++++++++++--- 4 files changed, 49 insertions(+), 12 deletions(-) diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 5502456c..584f761d 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -12,7 +12,7 @@ use crate::keys::Keys; use crate::op_observer::OpObserver; use crate::op_set::OpSet; use crate::parents::Parents; -use crate::storage::{self, load, CompressConfig}; +use crate::storage::{self, load, CompressConfig, VerificationMode}; use crate::transaction::{ self, CommitOptions, Failure, Observed, Success, Transaction, TransactionArgs, UnObserved, }; @@ -650,13 +650,18 @@ impl Automerge { /// Load a document. pub fn load(data: &[u8]) -> Result { - Self::load_with::<()>(data, None) + Self::load_with::<()>(data, VerificationMode::Check, None) + } + + pub fn load_unverified_heads(data: &[u8]) -> Result { + Self::load_with::<()>(data, VerificationMode::DontCheck, None) } /// Load a document. #[tracing::instrument(skip(data, observer), err)] pub fn load_with( data: &[u8], + mode: VerificationMode, mut observer: Option<&mut Obs>, ) -> Result { if data.is_empty() { @@ -679,8 +684,10 @@ impl Automerge { changes, heads, } = match &mut observer { - Some(o) => storage::load::reconstruct_document(&d, OpSet::observed_builder(*o)), - None => storage::load::reconstruct_document(&d, OpSet::builder()), + Some(o) => { + storage::load::reconstruct_document(&d, mode, OpSet::observed_builder(*o)) + } + None => storage::load::reconstruct_document(&d, mode, OpSet::builder()), } .map_err(|e| load::Error::InflateDocument(Box::new(e)))?; let mut hashes_by_index = HashMap::new(); diff --git a/rust/automerge/src/storage.rs b/rust/automerge/src/storage.rs index c8a2183d..5b3d03a7 100644 --- a/rust/automerge/src/storage.rs +++ b/rust/automerge/src/storage.rs @@ -14,6 +14,7 @@ pub(crate) use { chunk::{CheckSum, Chunk, ChunkType, Header}, columns::{Columns, MismatchingColumn, RawColumn, RawColumns}, document::{AsChangeMeta, AsDocOp, ChangeMetadata, CompressConfig, DocOp, Document}, + load::VerificationMode, }; fn shift_range(range: Range, by: usize) -> Range { diff --git a/rust/automerge/src/storage/load.rs b/rust/automerge/src/storage/load.rs index fe2e8429..80ab3d82 100644 --- a/rust/automerge/src/storage/load.rs +++ b/rust/automerge/src/storage/load.rs @@ -8,7 +8,7 @@ use crate::{ mod change_collector; mod reconstruct_document; pub(crate) use reconstruct_document::{ - reconstruct_document, DocObserver, LoadedObject, Reconstructed, + reconstruct_document, DocObserver, LoadedObject, Reconstructed, VerificationMode, }; #[derive(Debug, thiserror::Error)] @@ -84,7 +84,7 @@ fn load_next_change<'a>( let Reconstructed { changes: new_changes, .. - } = reconstruct_document(&d, NullObserver) + } = reconstruct_document(&d, VerificationMode::DontCheck, NullObserver) .map_err(|e| Error::InflateDocument(Box::new(e)))?; changes.extend(new_changes); } diff --git a/rust/automerge/src/storage/load/reconstruct_document.rs b/rust/automerge/src/storage/load/reconstruct_document.rs index e8221e5c..44ace72a 100644 --- a/rust/automerge/src/storage/load/reconstruct_document.rs +++ b/rust/automerge/src/storage/load/reconstruct_document.rs @@ -6,7 +6,7 @@ use crate::{ change::Change, columnar::Key as DocOpKey, op_tree::OpSetMetadata, - storage::{DocOp, Document}, + storage::{change::Verified, Change as StoredChange, DocOp, Document}, types::{ChangeHash, ElemId, Key, ObjId, ObjType, Op, OpId, OpIds, OpType}, ScalarValue, }; @@ -24,13 +24,29 @@ pub(crate) enum Error { #[error("invalid changes: {0}")] InvalidChanges(#[from] super::change_collector::Error), #[error("mismatching heads")] - MismatchingHeads, + MismatchingHeads(MismatchedHeads), #[error("missing operations")] MissingOps, #[error("succ out of order")] SuccOutOfOrder, } +pub(crate) struct MismatchedHeads { + changes: Vec>, + expected_heads: BTreeSet, + derived_heads: BTreeSet, +} + +impl std::fmt::Debug for MismatchedHeads { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MismatchedHeads") + .field("changes", &self.changes.len()) + .field("expected_heads", &self.expected_heads) + .field("derived_heads", &self.derived_heads) + .finish() + } +} + /// All the operations loaded from an object in the document format pub(crate) struct LoadedObject { /// The id of the object @@ -67,9 +83,16 @@ pub(crate) struct Reconstructed { pub(crate) heads: BTreeSet, } +#[derive(Debug)] +pub enum VerificationMode { + Check, + DontCheck, +} + #[instrument(skip(doc, observer))] pub(crate) fn reconstruct_document<'a, O: DocObserver>( doc: &'a Document<'a>, + mode: VerificationMode, mut observer: O, ) -> Result, Error> { // The document format does not contain the bytes of the changes which are encoded in it @@ -185,10 +208,16 @@ pub(crate) fn reconstruct_document<'a, O: DocObserver>( let super::change_collector::CollectedChanges { history, heads } = collector.finish(&metadata)?; - let expected_heads: BTreeSet<_> = doc.heads().iter().cloned().collect(); - if expected_heads != heads { - tracing::error!(?expected_heads, ?heads, "mismatching heads"); - return Err(Error::MismatchingHeads); + if matches!(mode, VerificationMode::Check) { + let expected_heads: BTreeSet<_> = doc.heads().iter().cloned().collect(); + if expected_heads != heads { + tracing::error!(?expected_heads, ?heads, "mismatching heads"); + return Err(Error::MismatchingHeads(MismatchedHeads { + changes: history, + expected_heads, + derived_heads: heads, + })); + } } let result = observer.finish(metadata); From 6da93b6adc9aca6522b77f8d985a69ce2ebb5cc0 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Mon, 19 Dec 2022 10:52:45 +0000 Subject: [PATCH 10/72] Correctly implement colored json My quickly thrown together implementation had somem mistakes in it which meant that the JSON produced was malformed. --- rust/automerge-cli/src/color_json.rs | 98 +++++++++++++++++----------- 1 file changed, 60 insertions(+), 38 deletions(-) diff --git a/rust/automerge-cli/src/color_json.rs b/rust/automerge-cli/src/color_json.rs index 1d175026..9514da22 100644 --- a/rust/automerge-cli/src/color_json.rs +++ b/rust/automerge-cli/src/color_json.rs @@ -132,6 +132,15 @@ impl Formatter for ColoredFormatter { }) } + fn write_i128(&mut self, _writer: &mut W, value: i128) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_i128(w, value) + }) + } + fn write_u8(&mut self, _writer: &mut W, value: u8) -> std::io::Result<()> where W: ?Sized + std::io::Write, @@ -168,6 +177,15 @@ impl Formatter for ColoredFormatter { }) } + fn write_u128(&mut self, _writer: &mut W, value: u128) -> std::io::Result<()> + where + W: ?Sized + std::io::Write, + { + write_colored(self.style.integer_value.clone(), |w| { + self.formatter.write_u128(w, value) + }) + } + fn write_f32(&mut self, _writer: &mut W, value: f32) -> std::io::Result<()> where W: ?Sized + std::io::Write, @@ -199,32 +217,32 @@ impl Formatter for ColoredFormatter { where W: ?Sized + std::io::Write, { - if self.style.string_include_quotation { - let style = if self.in_object_key { - &self.style.key + let style = if self.style.string_include_quotation { + if self.in_object_key { + self.style.key.clone() } else { - &self.style.string_value - }; - write_colored(style.clone(), |w| self.formatter.begin_string(w)) + self.style.string_value.clone() + } } else { - self.formatter.begin_string(_writer) - } + ColorSpec::new() + }; + write_colored(style, |w| self.formatter.begin_string(w)) } fn end_string(&mut self, _writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { - if self.style.string_include_quotation { - let style = if self.in_object_key { - &self.style.key + let style = if self.style.string_include_quotation { + if self.in_object_key { + self.style.key.clone() } else { - &self.style.string_value - }; - write_colored(style.clone(), |w| self.formatter.end_string(w)) + self.style.string_value.clone() + } } else { - self.formatter.end_string(_writer) - } + ColorSpec::new() + }; + write_colored(style, |w| self.formatter.end_string(w)) } fn write_string_fragment(&mut self, _writer: &mut W, fragment: &str) -> std::io::Result<()> @@ -232,11 +250,11 @@ impl Formatter for ColoredFormatter { W: ?Sized + std::io::Write, { let style = if self.in_object_key { - &self.style.key + self.style.key.clone() } else { - &self.style.string_value + self.style.string_value.clone() }; - write_colored(style.clone(), |w| w.write_all(fragment.as_bytes())) + write_colored(style, |w| w.write_all(fragment.as_bytes())) } fn write_char_escape( @@ -248,13 +266,11 @@ impl Formatter for ColoredFormatter { W: ?Sized + std::io::Write, { let style = if self.in_object_key { - &self.style.key + self.style.key.clone() } else { - &self.style.string_value + self.style.string_value.clone() }; - write_colored(style.clone(), |w| { - self.formatter.write_char_escape(w, char_escape) - }) + write_colored(style, |w| self.formatter.write_char_escape(w, char_escape)) } fn begin_array(&mut self, _writer: &mut W) -> std::io::Result<()> @@ -275,18 +291,20 @@ impl Formatter for ColoredFormatter { }) } - fn begin_array_value(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> + fn begin_array_value(&mut self, _writer: &mut W, first: bool) -> std::io::Result<()> where W: ?Sized + std::io::Write, { - self.formatter.begin_array_value(writer, first) + write_colored(ColorSpec::new(), |w| { + self.formatter.begin_array_value(w, first) + }) } - fn end_array_value(&mut self, writer: &mut W) -> std::io::Result<()> + fn end_array_value(&mut self, _writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { - self.formatter.end_array_value(writer) + write_colored(ColorSpec::new(), |w| self.formatter.end_array_value(w)) } fn begin_object(&mut self, _writer: &mut W) -> std::io::Result<()> @@ -307,42 +325,46 @@ impl Formatter for ColoredFormatter { }) } - fn begin_object_key(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> + fn begin_object_key(&mut self, _writer: &mut W, first: bool) -> std::io::Result<()> where W: ?Sized + std::io::Write, { self.in_object_key = true; - self.formatter.begin_object_key(writer, first) + write_colored(ColorSpec::new(), |w| { + self.formatter.begin_object_key(w, first) + }) } - fn end_object_key(&mut self, writer: &mut W) -> std::io::Result<()> + fn end_object_key(&mut self, _writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { self.in_object_key = false; - self.formatter.end_object_key(writer) + write_colored(ColorSpec::new(), |w| self.formatter.end_object_key(w)) } - fn begin_object_value(&mut self, writer: &mut W) -> std::io::Result<()> + fn begin_object_value(&mut self, _writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { self.in_object_key = false; - self.formatter.begin_object_value(writer) + write_colored(ColorSpec::new(), |w| self.formatter.begin_object_value(w)) } - fn end_object_value(&mut self, writer: &mut W) -> std::io::Result<()> + fn end_object_value(&mut self, _writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { self.in_object_key = false; - self.formatter.end_object_value(writer) + write_colored(ColorSpec::new(), |w| self.formatter.end_object_value(w)) } - fn write_raw_fragment(&mut self, writer: &mut W, fragment: &str) -> std::io::Result<()> + fn write_raw_fragment(&mut self, _writer: &mut W, fragment: &str) -> std::io::Result<()> where W: ?Sized + std::io::Write, { - self.formatter.write_raw_fragment(writer, fragment) + write_colored(ColorSpec::new(), |w| { + self.formatter.write_raw_fragment(w, fragment) + }) } } From f682db303914434a7dfa914dcd3bafc8041d312f Mon Sep 17 00:00:00 2001 From: Alex Good Date: Mon, 19 Dec 2022 11:08:02 +0000 Subject: [PATCH 11/72] automerge-cli: Add a flag to skip verifiying heads --- rust/automerge-cli/src/examine.rs | 8 +- rust/automerge-cli/src/export.rs | 20 +++-- rust/automerge-cli/src/main.rs | 121 ++++++++++++++++-------------- 3 files changed, 83 insertions(+), 66 deletions(-) diff --git a/rust/automerge-cli/src/examine.rs b/rust/automerge-cli/src/examine.rs index 0b8946d4..0ee102fb 100644 --- a/rust/automerge-cli/src/examine.rs +++ b/rust/automerge-cli/src/examine.rs @@ -1,7 +1,7 @@ use automerge as am; use thiserror::Error; -use crate::color_json::print_colored_json; +use crate::{color_json::print_colored_json, SkipVerifyFlag}; #[derive(Error, Debug)] pub enum ExamineError { @@ -22,16 +22,18 @@ pub enum ExamineError { }, } -pub fn examine( +pub(crate) fn examine( mut input: impl std::io::Read, mut output: impl std::io::Write, + skip: SkipVerifyFlag, is_tty: bool, ) -> Result<(), ExamineError> { let mut buf: Vec = Vec::new(); input .read_to_end(&mut buf) .map_err(|e| ExamineError::ReadingChanges { source: e })?; - let doc = am::Automerge::load(&buf) + let doc = skip + .load(&buf) .map_err(|e| ExamineError::ApplyingInitialChanges { source: e })?; let uncompressed_changes: Vec<_> = doc .get_changes(&[]) diff --git a/rust/automerge-cli/src/export.rs b/rust/automerge-cli/src/export.rs index 1d4d7965..2a7b4130 100644 --- a/rust/automerge-cli/src/export.rs +++ b/rust/automerge-cli/src/export.rs @@ -1,7 +1,7 @@ use anyhow::Result; use automerge as am; -use crate::color_json::print_colored_json; +use crate::{color_json::print_colored_json, SkipVerifyFlag}; pub(crate) fn map_to_json(doc: &am::Automerge, obj: &am::ObjId) -> serde_json::Value { let keys = doc.keys(obj); @@ -71,20 +71,21 @@ fn scalar_to_json(val: &am::ScalarValue) -> serde_json::Value { } } -fn get_state_json(input_data: Vec) -> Result { - let doc = am::Automerge::load(&input_data).unwrap(); // FIXME +fn get_state_json(input_data: Vec, skip: SkipVerifyFlag) -> Result { + let doc = skip.load(&input_data).unwrap(); // FIXME Ok(map_to_json(&doc, &am::ObjId::Root)) } -pub fn export_json( +pub(crate) fn export_json( mut changes_reader: impl std::io::Read, mut writer: impl std::io::Write, + skip: SkipVerifyFlag, is_tty: bool, ) -> Result<()> { let mut input_data = vec![]; changes_reader.read_to_end(&mut input_data)?; - let state_json = get_state_json(input_data)?; + let state_json = get_state_json(input_data, skip)?; if is_tty { print_colored_json(&state_json).unwrap(); writeln!(writer).unwrap(); @@ -105,7 +106,10 @@ mod tests { #[test] fn cli_export_with_empty_input() { - assert_eq!(get_state_json(vec![]).unwrap(), serde_json::json!({})) + assert_eq!( + get_state_json(vec![], Default::default()).unwrap(), + serde_json::json!({}) + ) } #[test] @@ -119,7 +123,7 @@ mod tests { let mut backend = initialize_from_json(&initial_state_json).unwrap(); let change_bytes = backend.save(); assert_eq!( - get_state_json(change_bytes).unwrap(), + get_state_json(change_bytes, Default::default()).unwrap(), serde_json::json!({"sparrows": 15.0}) ) } @@ -146,7 +150,7 @@ mod tests { */ let change_bytes = backend.save(); assert_eq!( - get_state_json(change_bytes).unwrap(), + get_state_json(change_bytes, Default::default()).unwrap(), serde_json::json!({ "birds": { "wrens": 3.0, diff --git a/rust/automerge-cli/src/main.rs b/rust/automerge-cli/src/main.rs index b16d9449..48513a92 100644 --- a/rust/automerge-cli/src/main.rs +++ b/rust/automerge-cli/src/main.rs @@ -1,10 +1,12 @@ use std::{fs::File, path::PathBuf, str::FromStr}; use anyhow::{anyhow, Result}; -use clap::Parser; +use clap::{ + builder::{BoolishValueParser, TypedValueParser, ValueParserFactory}, + Parser, +}; use is_terminal::IsTerminal; -//mod change; mod color_json; mod examine; mod export; @@ -24,6 +26,44 @@ enum ExportFormat { Toml, } +#[derive(Copy, Clone, Default, Debug)] +pub(crate) struct SkipVerifyFlag(bool); + +impl SkipVerifyFlag { + fn load(&self, buf: &[u8]) -> Result { + if self.0 { + automerge::Automerge::load(buf) + } else { + automerge::Automerge::load_unverified_heads(buf) + } + } +} + +#[derive(Clone)] +struct SkipVerifyFlagParser; +impl ValueParserFactory for SkipVerifyFlag { + type Parser = SkipVerifyFlagParser; + + fn value_parser() -> Self::Parser { + SkipVerifyFlagParser + } +} + +impl TypedValueParser for SkipVerifyFlagParser { + type Value = SkipVerifyFlag; + + fn parse_ref( + &self, + cmd: &clap::Command, + arg: Option<&clap::Arg>, + value: &std::ffi::OsStr, + ) -> Result { + BoolishValueParser::new() + .parse_ref(cmd, arg, value) + .map(SkipVerifyFlag) + } +} + impl FromStr for ExportFormat { type Err = anyhow::Error; @@ -50,6 +90,10 @@ enum Command { /// The file to write to. If omitted assumes stdout #[clap(long("out"), short('o'))] output_file: Option, + + /// Whether to verify the head hashes of a compressed document + #[clap(long, action = clap::ArgAction::SetFalse)] + skip_verifying_heads: SkipVerifyFlag, }, Import { @@ -64,45 +108,11 @@ enum Command { changes_file: Option, }, - /// Read an automerge document from a file or stdin, perform a change on it and write a new - /// document to stdout or the specified output file. - Change { - /// The change script to perform. Change scripts have the form []. - /// The possible commands are 'set', 'insert', 'delete', and 'increment'. - /// - /// Paths look like this: $["mapkey"][0]. They always lways start with a '$', then each - /// subsequent segment of the path is either a string in double quotes to index a key in a - /// map, or an integer index to address an array element. - /// - /// Examples - /// - /// ## set - /// - /// > automerge change 'set $["someobject"] {"items": []}' somefile - /// - /// ## insert - /// - /// > automerge change 'insert $["someobject"]["items"][0] "item1"' somefile - /// - /// ## increment - /// - /// > automerge change 'increment $["mycounter"]' - /// - /// ## delete - /// - /// > automerge change 'delete $["someobject"]["items"]' somefile - script: String, - - /// The file to change, if omitted will assume stdin - input_file: Option, - - /// Path to write Automerge changes to, if omitted will write to stdout - #[clap(long("out"), short('o'))] - output_file: Option, - }, - /// Read an automerge document and print a JSON representation of the changes in it to stdout - Examine { input_file: Option }, + Examine { + input_file: Option, + skip_verifying_heads: SkipVerifyFlag, + }, /// Read one or more automerge documents and output a merged, compacted version of them Merge { @@ -149,6 +159,7 @@ fn main() -> Result<()> { changes_file, format, output_file, + skip_verifying_heads, } => { let output: Box = if let Some(output_file) = output_file { Box::new(File::create(&output_file)?) @@ -158,7 +169,12 @@ fn main() -> Result<()> { match format { ExportFormat::Json => { let mut in_buffer = open_file_or_stdin(changes_file)?; - export::export_json(&mut in_buffer, output, std::io::stdout().is_terminal()) + export::export_json( + &mut in_buffer, + output, + skip_verifying_heads, + std::io::stdout().is_terminal(), + ) } ExportFormat::Toml => unimplemented!(), } @@ -175,23 +191,18 @@ fn main() -> Result<()> { } ExportFormat::Toml => unimplemented!(), }, - Command::Change { .. - //input_file, - //output_file, - //script, + Command::Examine { + input_file, + skip_verifying_heads, } => { - unimplemented!() -/* - let in_buffer = open_file_or_stdin(input_file)?; - let mut out_buffer = create_file_or_stdout(output_file)?; - change::change(in_buffer, &mut out_buffer, script.as_str()) - .map_err(|e| anyhow::format_err!("Unable to make changes: {:?}", e)) -*/ - } - Command::Examine { input_file } => { let in_buffer = open_file_or_stdin(input_file)?; let out_buffer = std::io::stdout(); - match examine::examine(in_buffer, out_buffer, std::io::stdout().is_terminal()) { + match examine::examine( + in_buffer, + out_buffer, + skip_verifying_heads, + std::io::stdout().is_terminal(), + ) { Ok(()) => {} Err(e) => { eprintln!("Error: {:?}", e); From d678280b57a7b03c104c7b8a4ed74930885fd96b Mon Sep 17 00:00:00 2001 From: Alex Good Date: Mon, 19 Dec 2022 11:33:12 +0000 Subject: [PATCH 12/72] automerge-cli: Add an examine-sync command This is useful when receiving sync messages that behave in unexptected ways --- rust/automerge-cli/src/examine_sync.rs | 38 ++++++++++++++++++++++++++ rust/automerge-cli/src/main.rs | 16 +++++++++++ 2 files changed, 54 insertions(+) create mode 100644 rust/automerge-cli/src/examine_sync.rs diff --git a/rust/automerge-cli/src/examine_sync.rs b/rust/automerge-cli/src/examine_sync.rs new file mode 100644 index 00000000..ad6699d4 --- /dev/null +++ b/rust/automerge-cli/src/examine_sync.rs @@ -0,0 +1,38 @@ +use automerge::sync::ReadMessageError; + +use crate::color_json::print_colored_json; + +#[derive(Debug, thiserror::Error)] +pub enum ExamineSyncError { + #[error("Error reading message: {0}")] + ReadMessage(#[source] std::io::Error), + + #[error("error writing message: {0}")] + WriteMessage(#[source] std::io::Error), + + #[error("error writing json to output: {0}")] + WriteJson(#[source] serde_json::Error), + + #[error("Error parsing message: {0}")] + ParseMessage(#[from] ReadMessageError), +} + +pub(crate) fn examine_sync( + mut input: Box, + output: W, + is_tty: bool, +) -> Result<(), ExamineSyncError> { + let mut buf: Vec = Vec::new(); + input + .read_to_end(&mut buf) + .map_err(ExamineSyncError::ReadMessage)?; + + let message = automerge::sync::Message::decode(&buf)?; + let json = serde_json::to_value(&message).unwrap(); + if is_tty { + print_colored_json(&json).map_err(ExamineSyncError::WriteMessage)?; + } else { + serde_json::to_writer(output, &json).map_err(ExamineSyncError::WriteJson)?; + } + Ok(()) +} diff --git a/rust/automerge-cli/src/main.rs b/rust/automerge-cli/src/main.rs index 48513a92..b0b456c8 100644 --- a/rust/automerge-cli/src/main.rs +++ b/rust/automerge-cli/src/main.rs @@ -9,6 +9,7 @@ use is_terminal::IsTerminal; mod color_json; mod examine; +mod examine_sync; mod export; mod import; mod merge; @@ -114,6 +115,9 @@ enum Command { skip_verifying_heads: SkipVerifyFlag, }, + /// Read an automerge sync messaage and print a JSON representation of it + ExamineSync { input_file: Option }, + /// Read one or more automerge documents and output a merged, compacted version of them Merge { /// The file to write to. If omitted assumes stdout @@ -210,6 +214,18 @@ fn main() -> Result<()> { } Ok(()) } + Command::ExamineSync { input_file } => { + let in_buffer = open_file_or_stdin(input_file)?; + let out_buffer = std::io::stdout(); + match examine_sync::examine_sync(in_buffer, out_buffer, std::io::stdout().is_terminal()) + { + Ok(()) => {} + Err(e) => { + eprintln!("Error: {:?}", e); + } + } + Ok(()) + } Command::Merge { input, output_file } => { let out_buffer = create_file_or_stdout(output_file)?; match merge::merge(input.into(), out_buffer) { From 4de0756bb482bf214fd5e8ac80302ada4b0d9fe0 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Sun, 18 Dec 2022 20:21:26 +0000 Subject: [PATCH 13/72] Correctly handle ops on optree node boundaries The `SeekOp` query can produce incorrect results when the optree it is searching only has visible ops on the internal nodes. Add some tests to demonstrate the issue as well as a fix. --- rust/automerge/src/query/seek_op.rs | 119 +++++++++++++++++++++++++++- rust/automerge/tests/test.rs | 39 ++++++++- 2 files changed, 155 insertions(+), 3 deletions(-) diff --git a/rust/automerge/src/query/seek_op.rs b/rust/automerge/src/query/seek_op.rs index 7ca3e9d4..4d955f96 100644 --- a/rust/automerge/src/query/seek_op.rs +++ b/rust/automerge/src/query/seek_op.rs @@ -76,8 +76,19 @@ impl<'a> TreeQuery<'a> for SeekOp<'a> { if self.pos + child.len() >= start { // skip empty nodes if child.index.visible_len(ListEncoding::List) == 0 { - self.pos += child.len(); - QueryResult::Next + let child_contains_key = + child.elements.iter().any(|e| ops[*e].key == self.op.key); + if !child_contains_key { + // If we are in a node which has no visible ops, but none of the + // elements of the node match the key of the op, then we must have + // finished processing and so we can just return. + // See https://github.com/automerge/automerge-rs/pull/480 + QueryResult::Finish + } else { + // Otherwise, we need to proceed to the next node + self.pos += child.len(); + QueryResult::Next + } } else { QueryResult::Descend } @@ -148,3 +159,107 @@ impl<'a> TreeQuery<'a> for SeekOp<'a> { } } } + +#[cfg(test)] +mod tests { + use crate::{ + op_set::OpSet, + op_tree::B, + query::SeekOp, + types::{Key, ObjId, Op, OpId}, + ActorId, ScalarValue, + }; + + #[test] + fn seek_on_page_boundary() { + // Create an optree in which the only visible ops are on the boundaries of the nodes, + // i.e. the visible elements are in the internal nodes. Like so + // + // .----------------------. + // | id | key | succ | + // | B | "a" | | + // | 2B | "b" | | + // '----------------------' + // / | \ + // ;------------------------. | `------------------------------------. + // | id | op | succ | | | id | op | succ | + // | 0 |set "a" | 1 | | | 2B + 1 |set "c" | 2B + 2 | + // | 1 |set "a" | 2 | | | 2B + 2 |set "c" | 2B + 3 | + // | 2 |set "a" | 3 | | ... + // ... | | 3B |set "c" | | + // | B - 1 |set "a" | B | | '------------------------------------' + // '--------'--------'------' | + // | + // .-----------------------------. + // | id | key | succ | + // | B + 1 | "b" | B + 2 | + // | B + 2 | "b" | B + 3 | + // .... + // | B + (B - 1 | "b" | 2B | + // '-----------------------------' + // + // The important point here is that the leaf nodes contain no visible ops for keys "a" and + // "b". + let mut set = OpSet::new(); + let actor = set.m.actors.cache(ActorId::random()); + let a = set.m.props.cache("a".to_string()); + let b = set.m.props.cache("b".to_string()); + let c = set.m.props.cache("c".to_string()); + + let mut counter = 0; + // For each key insert `B` operations with the `pred` and `succ` setup such that the final + // operation for each key is the only visible op. + for key in [a, b, c] { + for iteration in 0..B { + // Generate a value to insert + let keystr = set.m.props.get(key); + let val = keystr.repeat(iteration + 1); + + // Only the last op is visible + let pred = if iteration == 0 { + Default::default() + } else { + set.m + .sorted_opids(vec![OpId::new(counter - 1, actor)].into_iter()) + }; + + // only the last op is visible + let succ = if iteration == B - 1 { + Default::default() + } else { + set.m + .sorted_opids(vec![OpId::new(counter, actor)].into_iter()) + }; + + let op = Op { + id: OpId::new(counter, actor), + action: crate::OpType::Put(ScalarValue::Str(val.into())), + key: Key::Map(key), + succ, + pred, + insert: false, + }; + set.insert(counter as usize, &ObjId::root(), op); + counter += 1; + } + } + + // Now try and create an op which inserts at the next index of 'a' + let new_op = Op { + id: OpId::new(counter, actor), + action: crate::OpType::Put(ScalarValue::Str("test".into())), + key: Key::Map(a), + succ: Default::default(), + pred: set + .m + .sorted_opids(std::iter::once(OpId::new(B as u64 - 1, actor))), + insert: false, + }; + + let q = SeekOp::new(&new_op); + let q = set.search(&ObjId::root(), q); + + // we've inserted `B - 1` elements for "a", so the index should be `B` + assert_eq!(q.pos, B); + } +} diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index c1b653d3..069a664d 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -5,7 +5,7 @@ use automerge::{ }; // set up logging for all the tests -use test_log::test; +//use test_log::test; #[allow(unused_imports)] use automerge_test::{ @@ -1411,3 +1411,40 @@ fn invalid_deflate_stream() { assert!(Automerge::load(&bytes).is_err()); } + +#[test] +fn bad_change_on_optree_node_boundary() { + let mut doc = Automerge::new(); + doc.transact::<_, _, AutomergeError>(|d| { + d.put(ROOT, "a", "z")?; + d.put(ROOT, "b", 0)?; + d.put(ROOT, "c", 0)?; + Ok(()) + }) + .unwrap(); + let iterations = 15_u64; + for i in 0_u64..iterations { + doc.transact::<_, _, AutomergeError>(|d| { + let s = "a".repeat(i as usize); + d.put(ROOT, "a", s)?; + d.put(ROOT, "b", i + 1)?; + d.put(ROOT, "c", i + 1)?; + Ok(()) + }) + .unwrap(); + } + let mut doc2 = Automerge::load(doc.save().as_slice()).unwrap(); + doc.transact::<_, _, AutomergeError>(|d| { + let i = iterations + 2; + let s = "a".repeat(i as usize); + d.put(ROOT, "a", s)?; + d.put(ROOT, "b", i)?; + d.put(ROOT, "c", i)?; + Ok(()) + }) + .unwrap(); + let change = doc.get_changes(&doc2.get_heads()).unwrap(); + doc2.apply_changes(change.into_iter().cloned().collect::>()) + .unwrap(); + Automerge::load(doc2.save().as_slice()).unwrap(); +} From 8a645bb1932a504cfd76dc940a8cd0e5b1ad4de2 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 22 Dec 2022 09:59:16 +0000 Subject: [PATCH 14/72] js: Enable typescript for the JS tests The tsconfig.json was setup to not include the JS tests. Update the config to include the tests when checking typescript and fix all the consequent errors. None of this is semantically meaningful _except_ for a few incorrect usages of the API which were leading to flaky tests. Hooray for types! --- javascript/src/index.ts | 10 +- javascript/src/low_level.ts | 5 +- javascript/test/basic_test.ts | 48 +++---- javascript/test/columnar_test.ts | 97 -------------- javascript/test/extra_api_tests.ts | 4 +- javascript/test/helpers.ts | 12 +- javascript/test/legacy_tests.ts | 152 ++++++++++++---------- javascript/test/sync_test.ts | 181 +++++++++++++------------- javascript/test/text_test.ts | 201 +---------------------------- javascript/tsconfig.json | 2 +- rust/automerge-wasm/index.d.ts | 9 +- 11 files changed, 231 insertions(+), 490 deletions(-) delete mode 100644 javascript/test/columnar_test.ts diff --git a/javascript/src/index.ts b/javascript/src/index.ts index 581f50d1..df71c648 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -10,7 +10,7 @@ export {AutomergeValue, Counter, Int, Uint, Float64, ScalarValue} from "./types" import {type API, type Patch} from "@automerge/automerge-wasm"; export { type Patch, PutPatch, DelPatch, SplicePatch, IncPatch, SyncMessage, } from "@automerge/automerge-wasm" -import {ApiHandler, UseApi} from "./low_level" +import {ApiHandler, ChangeToEncode, UseApi} from "./low_level" import {Actor as ActorId, Prop, ObjID, Change, DecodedChange, Heads, Automerge, MaterializeValue} from "@automerge/automerge-wasm" import {JsSyncState as SyncState, SyncMessage, DecodedSyncMessage} from "@automerge/automerge-wasm" @@ -56,7 +56,7 @@ export type ChangeFn = (doc: T) => void * @param before - The document before the change was made * @param after - The document after the change was made */ -export type PatchCallback = (patch: Patch, before: Doc, after: Doc) => void +export type PatchCallback = (patches: Array, before: Doc, after: Doc) => void /** @hidden **/ export interface State { @@ -224,8 +224,8 @@ export function free(doc: Doc) { * }) * ``` */ -export function from>(initialState: T | Doc, actor?: ActorId): Doc { - return change(init(actor), (d) => Object.assign(d, initialState)) +export function from>(initialState: T | Doc, _opts?: ActorId | InitOptions): Doc { + return change(init(_opts), (d) => Object.assign(d, initialState)) } /** @@ -779,7 +779,7 @@ export function initSyncState(): SyncState { } /** @hidden */ -export function encodeChange(change: DecodedChange): Change { +export function encodeChange(change: ChangeToEncode): Change { return ApiHandler.encodeChange(change) } diff --git a/javascript/src/low_level.ts b/javascript/src/low_level.ts index 9a5480b3..6eabfa52 100644 --- a/javascript/src/low_level.ts +++ b/javascript/src/low_level.ts @@ -1,5 +1,6 @@ -import { Automerge, Change, DecodedChange, Actor, SyncState, SyncMessage, JsSyncState, DecodedSyncMessage } from "@automerge/automerge-wasm" +import { Automerge, Change, DecodedChange, Actor, SyncState, SyncMessage, JsSyncState, DecodedSyncMessage, ChangeToEncode } from "@automerge/automerge-wasm" +export { ChangeToEncode } from "@automerge/automerge-wasm" import { API } from "@automerge/automerge-wasm" export function UseApi(api: API) { @@ -12,7 +13,7 @@ export function UseApi(api: API) { export const ApiHandler : API = { create(actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called") }, load(data: Uint8Array, actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called (load)") }, - encodeChange(change: DecodedChange): Change { throw new RangeError("Automerge.use() not called (encodeChange)") }, + encodeChange(change: ChangeToEncode): Change { throw new RangeError("Automerge.use() not called (encodeChange)") }, decodeChange(change: Change): DecodedChange { throw new RangeError("Automerge.use() not called (decodeChange)") }, initSyncState(): SyncState { throw new RangeError("Automerge.use() not called (initSyncState)") }, encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { throw new RangeError("Automerge.use() not called (encodeSyncMessage)") }, diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index 437af233..e50e8782 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -1,5 +1,4 @@ import * as assert from 'assert' -import {Counter} from 'automerge' import * as Automerge from '../src' import * as WASM from "@automerge/automerge-wasm" @@ -15,7 +14,7 @@ describe('Automerge', () => { }) it('should be able to make a view with specifc heads', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => d.value = 1) let heads2 = Automerge.getHeads(doc2) let doc3 = Automerge.change(doc2, (d) => d.value = 2) @@ -38,7 +37,7 @@ describe('Automerge', () => { }) it('handle basic set and read on root object', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.hello = "world" d.big = "little" @@ -62,8 +61,8 @@ describe('Automerge', () => { }) it('it should recursively freeze the document if requested', () => { - let doc1 = Automerge.init({ freeze: true } ) - let doc2 = Automerge.init() + let doc1 = Automerge.init({ freeze: true } ) + let doc2 = Automerge.init() assert(Object.isFrozen(doc1)) assert(!Object.isFrozen(doc2)) @@ -82,7 +81,7 @@ describe('Automerge', () => { assert(Object.isFrozen(doc3.sub)) // works on load - let doc4 = Automerge.load(Automerge.save(doc3), { freeze: true }) + let doc4 = Automerge.load(Automerge.save(doc3), { freeze: true }) assert(Object.isFrozen(doc4)) assert(Object.isFrozen(doc4.sub)) @@ -97,7 +96,7 @@ describe('Automerge', () => { }) it('handle basic sets over many changes', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let timestamp = new Date(); let counter = new Automerge.Counter(100); let bytes = new Uint8Array([10,11,12]); @@ -135,7 +134,7 @@ describe('Automerge', () => { }) it('handle overwrites to values', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.hello = "world1" }) @@ -152,7 +151,7 @@ describe('Automerge', () => { }) it('handle set with object value', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.subobj = { hello: "world", subsubobj: { zip: "zop" } } }) @@ -160,13 +159,13 @@ describe('Automerge', () => { }) it('handle simple list creation', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => d.list = []) assert.deepEqual(doc2, { list: []}) }) it('handle simple lists', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.list = [ 1, 2, 3 ] }) @@ -188,7 +187,7 @@ describe('Automerge', () => { assert.deepEqual(doc3, { list: [1,"a",3] }) }) it('handle simple lists', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.list = [ 1, 2, 3 ] }) @@ -198,7 +197,7 @@ describe('Automerge', () => { assert.deepEqual(docB2, doc2); }) it('handle text', () => { - let doc1 = Automerge.init() + let doc1 = Automerge.init() let doc2 = Automerge.change(doc1, (d) => { d.list = "hello" Automerge.splice(d, "list", 2, 0, "Z") @@ -212,7 +211,7 @@ describe('Automerge', () => { it('handle non-text strings', () => { let doc1 = WASM.create(); doc1.put("_root", "text", "hello world"); - let doc2 = Automerge.load(doc1.save()) + let doc2 = Automerge.load(doc1.save()) assert.throws(() => { Automerge.change(doc2, (d) => { Automerge.splice(d, "text", 1, 0, "Z") }) }, /Cannot splice/) @@ -238,6 +237,7 @@ describe('Automerge', () => { }) assert.deepEqual(doc5, { list: [2,1,9,10,3,11,12] }); let doc6 = Automerge.change(doc5, (d) => { + // @ts-ignore d.list.insertAt(3,100,101) }) assert.deepEqual(doc6, { list: [2,1,9,100,101,10,3,11,12] }); @@ -261,7 +261,7 @@ describe('Automerge', () => { doc = Automerge.change(doc, d => { d.key = "value" }) - let _ = Automerge.save(doc) + Automerge.save(doc) let headsBefore = Automerge.getHeads(doc) headsBefore.sort() doc = Automerge.emptyChange(doc, "empty change") @@ -278,24 +278,24 @@ describe('Automerge', () => { numbers: [20,3,100], repeats: [20,20,3,3,3,3,100,100] }) - let r1 = [] + let r1: Array = [] doc = Automerge.change(doc, (d) => { - assert.deepEqual(d.chars.concat([1,2]), ["a","b","c",1,2]) + assert.deepEqual((d.chars as any[]).concat([1,2]), ["a","b","c",1,2]) assert.deepEqual(d.chars.map((n) => n + "!"), ["a!", "b!", "c!"]) assert.deepEqual(d.numbers.map((n) => n + 10), [30, 13, 110]) assert.deepEqual(d.numbers.toString(), "20,3,100") assert.deepEqual(d.numbers.toLocaleString(), "20,3,100") - assert.deepEqual(d.numbers.forEach((n) => r1.push(n)), undefined) + assert.deepEqual(d.numbers.forEach((n: number) => r1.push(n)), undefined) assert.deepEqual(d.numbers.every((n) => n > 1), true) assert.deepEqual(d.numbers.every((n) => n > 10), false) assert.deepEqual(d.numbers.filter((n) => n > 10), [20,100]) assert.deepEqual(d.repeats.find((n) => n < 10), 3) - assert.deepEqual(d.repeats.toArray().find((n) => n < 10), 3) + assert.deepEqual(d.repeats.find((n) => n < 10), 3) assert.deepEqual(d.repeats.find((n) => n < 0), undefined) assert.deepEqual(d.repeats.findIndex((n) => n < 10), 2) assert.deepEqual(d.repeats.findIndex((n) => n < 0), -1) - assert.deepEqual(d.repeats.toArray().findIndex((n) => n < 10), 2) - assert.deepEqual(d.repeats.toArray().findIndex((n) => n < 0), -1) + assert.deepEqual(d.repeats.findIndex((n) => n < 10), 2) + assert.deepEqual(d.repeats.findIndex((n) => n < 0), -1) assert.deepEqual(d.numbers.includes(3), true) assert.deepEqual(d.numbers.includes(-3), false) assert.deepEqual(d.numbers.join("|"), "20|3|100") @@ -321,8 +321,8 @@ describe('Automerge', () => { }) it('should obtain the same conflicts, regardless of merge order', () => { - let s1 = Automerge.init() - let s2 = Automerge.init() + let s1 = Automerge.init() + let s2 = Automerge.init() s1 = Automerge.change(s1, doc => { doc.x = 1; doc.y = 2 }) s2 = Automerge.change(s2, doc => { doc.x = 3; doc.y = 4 }) const m1 = Automerge.merge(Automerge.clone(s1), Automerge.clone(s2)) @@ -346,7 +346,7 @@ describe('Automerge', () => { it("should return null for scalar values", () => { assert.equal(Automerge.getObjectId(s1.string), null) assert.equal(Automerge.getObjectId(s1.number), null) - assert.equal(Automerge.getObjectId(s1.null), null) + assert.equal(Automerge.getObjectId(s1.null!), null) assert.equal(Automerge.getObjectId(s1.date), null) assert.equal(Automerge.getObjectId(s1.counter), null) assert.equal(Automerge.getObjectId(s1.bytes), null) diff --git a/javascript/test/columnar_test.ts b/javascript/test/columnar_test.ts deleted file mode 100644 index ca670377..00000000 --- a/javascript/test/columnar_test.ts +++ /dev/null @@ -1,97 +0,0 @@ -import * as assert from 'assert' -import { checkEncoded } from './helpers' -import * as Automerge from '../src' -import { encodeChange, decodeChange } from '../src' - -describe('change encoding', () => { - it('should encode text edits', () => { - /* - const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: '', deps: [], ops: [ - {action: 'makeText', obj: '_root', key: 'text', insert: false, pred: []}, - {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []}, - {action: 'del', obj: '1@aaaa', elemId: '2@aaaa', insert: false, pred: ['2@aaaa']}, - {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []}, - {action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []} - ]} - */ - const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: null, deps: [], ops: [ - {action: 'makeText', obj: '_root', key: 'text', pred: []}, - {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []}, - {action: 'del', obj: '1@aaaa', elemId: '2@aaaa', pred: ['2@aaaa']}, - {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []}, - {action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []} - ]} - checkEncoded(encodeChange(change1), [ - 0x85, 0x6f, 0x4a, 0x83, // magic bytes - 0xe2, 0xbd, 0xfb, 0xf5, // checksum - 1, 94, 0, 2, 0xaa, 0xaa, // chunkType: change, length, deps, actor 'aaaa' - 1, 1, 9, 0, 0, // seq, startOp, time, message, actor list - 12, 0x01, 4, 0x02, 4, // column count, objActor, objCtr - 0x11, 8, 0x13, 7, 0x15, 8, // keyActor, keyCtr, keyStr - 0x34, 4, 0x42, 6, // insert, action - 0x56, 6, 0x57, 3, // valLen, valRaw - 0x70, 6, 0x71, 2, 0x73, 2, // predNum, predActor, predCtr - 0, 1, 4, 0, // objActor column: null, 0, 0, 0, 0 - 0, 1, 4, 1, // objCtr column: null, 1, 1, 1, 1 - 0, 2, 0x7f, 0, 0, 1, 0x7f, 0, // keyActor column: null, null, 0, null, 0 - 0, 1, 0x7c, 0, 2, 0x7e, 4, // keyCtr column: null, 0, 2, 0, 4 - 0x7f, 4, 0x74, 0x65, 0x78, 0x74, 0, 4, // keyStr column: 'text', null, null, null, null - 1, 1, 1, 2, // insert column: false, true, false, true, true - 0x7d, 4, 1, 3, 2, 1, // action column: makeText, set, del, set, set - 0x7d, 0, 0x16, 0, 2, 0x16, // valLen column: 0, 0x16, 0, 0x16, 0x16 - 0x68, 0x48, 0x69, // valRaw column: 'h', 'H', 'i' - 2, 0, 0x7f, 1, 2, 0, // predNum column: 0, 0, 1, 0, 0 - 0x7f, 0, // predActor column: 0 - 0x7f, 2 // predCtr column: 2 - ]) - const decoded = decodeChange(encodeChange(change1)) - assert.deepStrictEqual(decoded, Object.assign({hash: decoded.hash}, change1)) - }) - - // FIXME - skipping this b/c it was never implemented in the rust impl and isnt trivial -/* - it.skip('should require strict ordering of preds', () => { - const change = new Uint8Array([ - 133, 111, 74, 131, 31, 229, 112, 44, 1, 105, 1, 58, 30, 190, 100, 253, 180, 180, 66, 49, 126, - 81, 142, 10, 3, 35, 140, 189, 231, 34, 145, 57, 66, 23, 224, 149, 64, 97, 88, 140, 168, 194, - 229, 4, 244, 209, 58, 138, 67, 140, 1, 152, 236, 250, 2, 0, 1, 4, 55, 234, 66, 242, 8, 21, 11, - 52, 1, 66, 2, 86, 3, 87, 10, 112, 2, 113, 3, 115, 4, 127, 9, 99, 111, 109, 109, 111, 110, 86, - 97, 114, 1, 127, 1, 127, 166, 1, 52, 48, 57, 49, 52, 57, 52, 53, 56, 50, 127, 2, 126, 0, 1, - 126, 139, 1, 0 - ]) - assert.throws(() => { decodeChange(change) }, /operation IDs are not in ascending order/) - }) -*/ - - describe('with trailing bytes', () => { - let change = new Uint8Array([ - 0x85, 0x6f, 0x4a, 0x83, // magic bytes - 0xb2, 0x98, 0x9e, 0xa9, // checksum - 1, 61, 0, 2, 0x12, 0x34, // chunkType: change, length, deps, actor '1234' - 1, 1, 252, 250, 220, 255, 5, // seq, startOp, time - 14, 73, 110, 105, 116, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, // message: 'Initialization' - 0, 6, // actor list, column count - 0x15, 3, 0x34, 1, 0x42, 2, // keyStr, insert, action - 0x56, 2, 0x57, 1, 0x70, 2, // valLen, valRaw, predNum - 0x7f, 1, 0x78, // keyStr: 'x' - 1, // insert: false - 0x7f, 1, // action: set - 0x7f, 19, // valLen: 1 byte of type uint - 1, // valRaw: 1 - 0x7f, 0, // predNum: 0 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 // 10 trailing bytes - ]) - - it('should allow decoding and re-encoding', () => { - // NOTE: This calls the JavaScript encoding and decoding functions, even when the WebAssembly - // backend is loaded. Should the wasm backend export its own functions for testing? - checkEncoded(change, encodeChange(decodeChange(change))) - }) - - it('should be preserved in document encoding', () => { - const [doc] = Automerge.applyChanges(Automerge.init(), [change]) - const [reconstructed] = Automerge.getAllChanges(Automerge.load(Automerge.save(doc))) - checkEncoded(change, reconstructed) - }) - }) -}) diff --git a/javascript/test/extra_api_tests.ts b/javascript/test/extra_api_tests.ts index ce0438d5..c0c18177 100644 --- a/javascript/test/extra_api_tests.ts +++ b/javascript/test/extra_api_tests.ts @@ -5,8 +5,8 @@ import * as Automerge from '../src' describe('Automerge', () => { describe('basics', () => { it('should allow you to load incrementally', () => { - let doc1 = Automerge.from({ foo: "bar" }) - let doc2 = Automerge.init(); + let doc1 = Automerge.from({ foo: "bar" }) + let doc2 = Automerge.init(); doc2 = Automerge.loadIncremental(doc2, Automerge.save(doc1)) doc1 = Automerge.change(doc1, (d) => d.foo2 = "bar2") doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) diff --git a/javascript/test/helpers.ts b/javascript/test/helpers.ts index d5292130..7799cb84 100644 --- a/javascript/test/helpers.ts +++ b/javascript/test/helpers.ts @@ -3,14 +3,18 @@ import { Encoder } from './legacy/encoding' // Assertion that succeeds if the first argument deepStrictEquals at least one of the // subsequent arguments (but we don't care which one) -function assertEqualsOneOf(actual, ...expected) { +export function assertEqualsOneOf(actual, ...expected) { assert(expected.length > 0) for (let i = 0; i < expected.length; i++) { try { assert.deepStrictEqual(actual, expected[i]) return // if we get here without an exception, that means success } catch (e) { - if (!e.name.match(/^AssertionError/) || i === expected.length - 1) throw e + if (e instanceof assert.AssertionError) { + if (!e.name.match(/^AssertionError/) || i === expected.length - 1) throw e + } else { + throw e + } } } } @@ -19,7 +23,7 @@ function assertEqualsOneOf(actual, ...expected) { * Asserts that the byte array maintained by `encoder` contains the same byte * sequence as the array `bytes`. */ -function checkEncoded(encoder, bytes, detail) { +export function checkEncoded(encoder, bytes, detail?) { const encoded = (encoder instanceof Encoder) ? encoder.buffer : encoder const expected = new Uint8Array(bytes) const message = (detail ? `${detail}: ` : '') + `${encoded} expected to equal ${expected}` @@ -28,5 +32,3 @@ function checkEncoded(encoder, bytes, detail) { assert(encoded[i] === expected[i], message) } } - -module.exports = { assertEqualsOneOf, checkEncoded } diff --git a/javascript/test/legacy_tests.ts b/javascript/test/legacy_tests.ts index 2320f909..c5c88275 100644 --- a/javascript/test/legacy_tests.ts +++ b/javascript/test/legacy_tests.ts @@ -50,30 +50,35 @@ describe('Automerge', () => { }) it('accepts an array as initial state, but converts it to an object', () => { + // @ts-ignore const doc = Automerge.from(['a', 'b', 'c']) assert.deepStrictEqual(doc, { '0': 'a', '1': 'b', '2': 'c' }) }) it('accepts strings as initial values, but treats them as an array of characters', () => { + // @ts-ignore const doc = Automerge.from('abc') assert.deepStrictEqual(doc, { '0': 'a', '1': 'b', '2': 'c' }) }) it('ignores numbers provided as initial values', () => { + // @ts-ignore const doc = Automerge.from(123) assert.deepStrictEqual(doc, {}) }) it('ignores booleans provided as initial values', () => { + // @ts-ignore const doc1 = Automerge.from(false) assert.deepStrictEqual(doc1, {}) + // @ts-ignore const doc2 = Automerge.from(true) assert.deepStrictEqual(doc2, {}) }) }) describe('sequential use', () => { - let s1, s2 + let s1: Automerge.Doc, s2: Automerge.Doc beforeEach(() => { s1 = Automerge.init("aabbcc") }) @@ -89,12 +94,12 @@ describe('Automerge', () => { s2 = Automerge.change(s1, doc => doc.foo = 'bar') const change2 = Automerge.getLastLocalChange(s2) assert.strictEqual(change1, undefined) - const change = decodeChange(change2) + const change = Automerge.decodeChange(change2!) assert.deepStrictEqual(change, { actor: change.actor, deps: [], seq: 1, startOp: 1, - hash: change.hash, message: '', time: change.time, + hash: change.hash, message: null, time: change.time, ops: [ - {obj: '_root', key: 'foo', action: 'makeText', insert: false, pred: []}, + {obj: '_root', key: 'foo', action: 'makeText', pred: []}, {action: 'set', elemId: '_head', insert: true, obj: '1@aabbcc', pred: [], value: 'b' }, {action: 'set', elemId: '2@aabbcc', insert: true, obj: '1@aabbcc', pred: [], value: 'a' }, {action: 'set', elemId: '3@aabbcc', insert: true, obj: '1@aabbcc', pred: [], value: 'r' }] @@ -127,12 +132,14 @@ describe('Automerge', () => { s1 = Automerge.init({freeze: true}) s2 = Automerge.change(s1, doc => doc.foo = 'bar') try { + // @ts-ignore s2.foo = 'lemon' } catch (e) { } assert.strictEqual(s2.foo, 'bar') let deleted = false try { + // @ts-ignore deleted = delete s2.foo } catch (e) { } assert.strictEqual(s2.foo, 'bar') @@ -140,6 +147,7 @@ describe('Automerge', () => { Automerge.change(s2, () => { try { + // @ts-ignore s2.foo = 'lemon' } catch (e) { } assert.strictEqual(s2.foo, 'bar') @@ -187,7 +195,7 @@ describe('Automerge', () => { s1 = Automerge.change(s1, doc => doc.field = 123) s2 = Automerge.change(s2, doc => doc.field = 321) s1 = Automerge.merge(s1, s2) - assert.strictEqual(Object.keys(Automerge.getConflicts(s1, 'field')).length, 2) + assert.strictEqual(Object.keys(Automerge.getConflicts(s1, 'field')!).length, 2) const resolved = Automerge.change(s1, doc => doc.field = s1.field) assert.notStrictEqual(resolved, s1) assert.deepStrictEqual(resolved, {field: s1.field}) @@ -218,7 +226,9 @@ describe('Automerge', () => { it('should sanity-check arguments', () => { s1 = Automerge.change(s1, doc => doc.nested = {}) + // @ts-ignore assert.throws(() => { Automerge.change({}, doc => doc.foo = 'bar') }, /must be the document root/) + // @ts-ignore assert.throws(() => { Automerge.change(s1.nested, doc => doc.foo = 'bar') }, /must be the document root/) }) @@ -226,6 +236,7 @@ describe('Automerge', () => { assert.throws(() => { Automerge.change(s1, doc1 => { Automerge.change(doc1, doc2 => { + // @ts-ignore doc2.foo = 'bar' }) }) @@ -285,32 +296,31 @@ describe('Automerge', () => { }) it('should call patchCallback if supplied', () => { - const callbacks = [], actor = Automerge.getActorId(s1) + const callbacks: Array<{patches: Array, before: Automerge.Doc, after: Automerge.Doc}> = [] const s2 = Automerge.change(s1, { - patchCallback: (patch, before, after) => callbacks.push({patch, before, after}) + patchCallback: (patches, before, after) => callbacks.push({patches, before, after}) }, doc => { doc.birds = ['Goldfinch'] }) assert.strictEqual(callbacks.length, 1) - assert.deepStrictEqual(callbacks[0].patch[0], { action: "put", path: ["birds"], value: [] }) - assert.deepStrictEqual(callbacks[0].patch[1], { action: "insert", path: ["birds",0], values: [""] }) - assert.deepStrictEqual(callbacks[0].patch[2], { action: "splice", path: ["birds",0, 0], value: "Goldfinch" }) + assert.deepStrictEqual(callbacks[0].patches[0], { action: "put", path: ["birds"], value: [] }) + assert.deepStrictEqual(callbacks[0].patches[1], { action: "insert", path: ["birds",0], values: [""] }) + assert.deepStrictEqual(callbacks[0].patches[2], { action: "splice", path: ["birds",0, 0], value: "Goldfinch" }) assert.strictEqual(callbacks[0].before, s1) assert.strictEqual(callbacks[0].after, s2) }) it('should call a patchCallback set up on document initialisation', () => { - const callbacks = [] + const callbacks: Array<{patches: Array, before: Automerge.Doc, after: Automerge.Doc}> = [] s1 = Automerge.init({ - patchCallback: (patch, before, after) => callbacks.push({patch, before, after }) + patchCallback: (patches, before, after) => callbacks.push({patches, before, after }) }) const s2 = Automerge.change(s1, doc => doc.bird = 'Goldfinch') - const actor = Automerge.getActorId(s1) assert.strictEqual(callbacks.length, 1) - assert.deepStrictEqual(callbacks[0].patch[0], { + assert.deepStrictEqual(callbacks[0].patches[0], { action: "put", path: ["bird"], value: "" }) - assert.deepStrictEqual(callbacks[0].patch[1], { + assert.deepStrictEqual(callbacks[0].patches[1], { action: "splice", path: ["bird", 0], value: "Goldfinch" }) assert.strictEqual(callbacks[0].before, s1) @@ -417,7 +427,7 @@ describe('Automerge', () => { it('should assign an objectId to nested maps', () => { s1 = Automerge.change(s1, doc => { doc.nested = {} }) let id = Automerge.getObjectId(s1.nested) - assert.strictEqual(OPID_PATTERN.test(Automerge.getObjectId(s1.nested)), true) + assert.strictEqual(OPID_PATTERN.test(Automerge.getObjectId(s1.nested)!), true) assert.notEqual(Automerge.getObjectId(s1.nested), '_root') }) @@ -472,7 +482,7 @@ describe('Automerge', () => { s1 = Automerge.change(s1, 'change 1', doc => { doc.myPet = {species: 'dog', legs: 4, breed: 'dachshund'} }) - s2 = Automerge.change(s1, 'change 2', doc => { + let s2 = Automerge.change(s1, 'change 2', doc => { doc.myPet = {species: 'koi', variety: '紅白', colors: {red: true, white: true, black: false}} }) assert.deepStrictEqual(s1.myPet, { @@ -483,6 +493,7 @@ describe('Automerge', () => { species: 'koi', variety: '紅白', colors: {red: true, white: true, black: false} }) + // @ts-ignore assert.strictEqual(s2.myPet.breed, undefined) assert.strictEqual(s2.myPet.variety, '紅白') }) @@ -743,15 +754,18 @@ describe('Automerge', () => { }) it('should allow adding and removing list elements in the same change callback', () => { - s1 = Automerge.change(Automerge.init(), doc => doc.noodles = []) + let s1 = Automerge.change(Automerge.init<{noodles: Array}>(), doc => doc.noodles = []) s1 = Automerge.change(s1, doc => { doc.noodles.push('udon') + // @ts-ignore doc.noodles.deleteAt(0) }) assert.deepStrictEqual(s1, {noodles: []}) // do the add-remove cycle twice, test for #151 (https://github.com/automerge/automerge/issues/151) s1 = Automerge.change(s1, doc => { + // @ts-ignore doc.noodles.push('soba') + // @ts-ignore doc.noodles.deleteAt(0) }) assert.deepStrictEqual(s1, {noodles: []}) @@ -783,7 +797,7 @@ describe('Automerge', () => { describe('counters', () => { // counter it('should allow deleting counters from maps', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = {wrens: new Automerge.Counter(1)}) + const s1 = Automerge.change(Automerge.init(), doc => doc.birds = {wrens: new Automerge.Counter(1)}) const s2 = Automerge.change(s1, doc => doc.birds.wrens.increment(2)) const s3 = Automerge.change(s2, doc => delete doc.birds.wrens) assert.deepStrictEqual(s2, {birds: {wrens: new Automerge.Counter(3)}}) @@ -803,12 +817,12 @@ describe('Automerge', () => { }) describe('concurrent use', () => { - let s1, s2, s3, s4 + let s1: Automerge.Doc, s2: Automerge.Doc, s3: Automerge.Doc, s4: Automerge.Doc beforeEach(() => { - s1 = Automerge.init() - s2 = Automerge.init() - s3 = Automerge.init() - s4 = Automerge.init() + s1 = Automerge.init() + s2 = Automerge.init() + s3 = Automerge.init() + s4 = Automerge.init() }) it('should merge concurrent updates of different properties', () => { @@ -927,7 +941,7 @@ describe('Automerge', () => { } else { assert.deepStrictEqual(s3.list, [{map2: true, key: 2}]) } - assert.deepStrictEqual(Automerge.getConflicts(s3.list, 0), { + assert.deepStrictEqual(Automerge.getConflicts(s3.list, 0), { [`8@${Automerge.getActorId(s1)}`]: {map1: true, key: 1}, [`8@${Automerge.getActorId(s2)}`]: {map2: true, key: 2} }) @@ -1130,22 +1144,22 @@ describe('Automerge', () => { }) it('should reconstitute complex datatypes', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.todos = [{title: 'water plants', done: false}]) + let s1 = Automerge.change(Automerge.init(), doc => doc.todos = [{title: 'water plants', done: false}]) let s2 = Automerge.load(Automerge.save(s1)) assert.deepStrictEqual(s2, {todos: [{title: 'water plants', done: false}]}) }) it('should save and load maps with @ symbols in the keys', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc["123@4567"] = "hello") + let s1 = Automerge.change(Automerge.init(), doc => doc["123@4567"] = "hello") let s2 = Automerge.load(Automerge.save(s1)) assert.deepStrictEqual(s2, { "123@4567": "hello" }) }) it('should reconstitute conflicts', () => { - let s1 = Automerge.change(Automerge.init('111111'), doc => doc.x = 3) - let s2 = Automerge.change(Automerge.init('222222'), doc => doc.x = 5) + let s1 = Automerge.change(Automerge.init('111111'), doc => doc.x = 3) + let s2 = Automerge.change(Automerge.init('222222'), doc => doc.x = 5) s1 = Automerge.merge(s1, s2) - let s3 = Automerge.load(Automerge.save(s1)) + let s3 = Automerge.load(Automerge.save(s1)) assert.strictEqual(s1.x, 5) assert.strictEqual(s3.x, 5) assert.deepStrictEqual(Automerge.getConflicts(s1, 'x'), {'1@111111': 3, '1@222222': 5}) @@ -1153,26 +1167,26 @@ describe('Automerge', () => { }) it('should reconstitute element ID counters', () => { - const s1 = Automerge.init('01234567') + const s1 = Automerge.init('01234567') const s2 = Automerge.change(s1, doc => doc.list = ['a']) const listId = Automerge.getObjectId(s2.list) - const changes12 = Automerge.getAllChanges(s2).map(decodeChange) + const changes12 = Automerge.getAllChanges(s2).map(Automerge.decodeChange) assert.deepStrictEqual(changes12, [{ hash: changes12[0].hash, actor: '01234567', seq: 1, startOp: 1, - time: changes12[0].time, message: '', deps: [], ops: [ - {obj: '_root', action: 'makeList', key: 'list', insert: false, pred: []}, + time: changes12[0].time, message: null, deps: [], ops: [ + {obj: '_root', action: 'makeList', key: 'list', pred: []}, {obj: listId, action: 'makeText', elemId: '_head', insert: true, pred: []}, {obj: "2@01234567", action: 'set', elemId: '_head', insert: true, value: 'a', pred: []} ] }]) const s3 = Automerge.change(s2, doc => doc.list.deleteAt(0)) - const s4 = Automerge.load(Automerge.save(s3), '01234567') + const s4 = Automerge.load(Automerge.save(s3), '01234567') const s5 = Automerge.change(s4, doc => doc.list.push('b')) - const changes45 = Automerge.getAllChanges(s5).map(decodeChange) + const changes45 = Automerge.getAllChanges(s5).map(Automerge.decodeChange) assert.deepStrictEqual(s5, {list: ['b']}) assert.deepStrictEqual(changes45[2], { hash: changes45[2].hash, actor: '01234567', seq: 3, startOp: 5, - time: changes45[2].time, message: '', deps: [changes45[1].hash], ops: [ + time: changes45[2].time, message: null, deps: [changes45[1].hash], ops: [ {obj: listId, action: 'makeText', elemId: '_head', insert: true, pred: []}, {obj: "5@01234567", action: 'set', elemId: '_head', insert: true, value: 'b', pred: []} ] @@ -1180,7 +1194,7 @@ describe('Automerge', () => { }) it('should allow a reloaded list to be mutated', () => { - let doc = Automerge.change(Automerge.init(), doc => doc.foo = []) + let doc = Automerge.change(Automerge.init(), doc => doc.foo = []) doc = Automerge.load(Automerge.save(doc)) doc = Automerge.change(doc, 'add', doc => doc.foo.push(1)) doc = Automerge.load(Automerge.save(doc)) @@ -1191,23 +1205,23 @@ describe('Automerge', () => { // In this test, the keyCtr column is long enough for deflate compression to kick in, but the // keyStr column is short. Thus, the deflate bit gets set for keyCtr but not for keyStr. // When checking whether the columns appear in ascending order, we must ignore the deflate bit. - let doc = Automerge.change(Automerge.init(), doc => { + let doc = Automerge.change(Automerge.init(), doc => { doc.list = [] for (let i = 0; i < 200; i++) doc.list.insertAt(Math.floor(Math.random() * i), 'a') }) - Automerge.load(Automerge.save(doc)) - let expected = [] + Automerge.load(Automerge.save(doc)) + let expected: Array = [] for (let i = 0; i < 200; i++) expected.push('a') assert.deepStrictEqual(doc, {list: expected}) }) it.skip('should call patchCallback if supplied to load', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) + const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) const s2 = Automerge.change(s1, doc => doc.birds.push('Chaffinch')) - const callbacks = [], actor = Automerge.getActorId(s1) - const reloaded = Automerge.load(Automerge.save(s2), { - patchCallback(patch, before, after, local) { - callbacks.push({patch, before, after, local}) + const callbacks: Array = [], actor = Automerge.getActorId(s1) + const reloaded = Automerge.load(Automerge.save(s2), { + patchCallback(patch, before, after) { + callbacks.push({patch, before, after}) } }) assert.strictEqual(callbacks.length, 1) @@ -1231,7 +1245,7 @@ describe('Automerge', () => { }) it('should make past document states accessible', () => { - let s = Automerge.init() + let s = Automerge.init() s = Automerge.change(s, doc => doc.config = {background: 'blue'}) s = Automerge.change(s, doc => doc.birds = ['mallard']) s = Automerge.change(s, doc => doc.birds.unshift('oystercatcher')) @@ -1243,7 +1257,7 @@ describe('Automerge', () => { }) it('should make change messages accessible', () => { - let s = Automerge.init() + let s = Automerge.init() s = Automerge.change(s, 'Empty Bookshelf', doc => doc.books = []) s = Automerge.change(s, 'Add Orwell', doc => doc.books.push('Nineteen Eighty-Four')) s = Automerge.change(s, 'Add Huxley', doc => doc.books.push('Brave New World')) @@ -1260,32 +1274,32 @@ describe('Automerge', () => { }) it('should return an empty list when nothing changed', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) assert.deepStrictEqual(Automerge.getChanges(s1, s1), []) }) it('should do nothing when applying an empty list of changes', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) assert.deepStrictEqual(Automerge.applyChanges(s1, [])[0], s1) }) it('should return all changes when compared to an empty document', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) let changes = Automerge.getChanges(Automerge.init(), s2) assert.strictEqual(changes.length, 2) }) it('should allow a document copy to be reconstructed from scratch', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) let changes = Automerge.getAllChanges(s2) - let [s3] = Automerge.applyChanges(Automerge.init(), changes) + let [s3] = Automerge.applyChanges(Automerge.init(), changes) assert.deepStrictEqual(s3.birds, ['Chaffinch', 'Bullfinch']) }) it('should return changes since the last given version', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) let changes1 = Automerge.getAllChanges(s1) let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) let changes2 = Automerge.getChanges(s1, s2) @@ -1294,29 +1308,29 @@ describe('Automerge', () => { }) it('should incrementally apply changes since the last given version', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) let changes1 = Automerge.getAllChanges(s1) let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) let changes2 = Automerge.getChanges(s1, s2) - let [s3] = Automerge.applyChanges(Automerge.init(), changes1) + let [s3] = Automerge.applyChanges(Automerge.init(), changes1) let [s4] = Automerge.applyChanges(s3, changes2) assert.deepStrictEqual(s3.birds, ['Chaffinch']) assert.deepStrictEqual(s4.birds, ['Chaffinch', 'Bullfinch']) }) it('should handle updates to a list element', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch', 'Bullfinch']) + let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch', 'Bullfinch']) let s2 = Automerge.change(s1, doc => doc.birds[0] = 'Goldfinch') - let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) + let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) assert.deepStrictEqual(s3.birds, ['Goldfinch', 'Bullfinch']) assert.strictEqual(Automerge.getConflicts(s3.birds, 0), undefined) }) // TEXT it('should handle updates to a text object', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.text = 'ab') + let s1 = Automerge.change(Automerge.init(), doc => doc.text = 'ab') let s2 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 1, "A")) - let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) + let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) assert.deepStrictEqual([...s3.text], ['A', 'b']) }) @@ -1339,7 +1353,7 @@ describe('Automerge', () => { */ it('should report missing dependencies with out-of-order applyChanges', () => { - let s0 = Automerge.init() + let s0 = Automerge.init() let s1 = Automerge.change(s0, doc => doc.test = ['a']) let changes01 = Automerge.getAllChanges(s1) let s2 = Automerge.change(s1, doc => doc.test = ['b']) @@ -1349,14 +1363,14 @@ describe('Automerge', () => { let s4 = Automerge.init() let [s5] = Automerge.applyChanges(s4, changes23) let [s6] = Automerge.applyChanges(s5, changes12) - assert.deepStrictEqual(Automerge.getMissingDeps(s6), [decodeChange(changes01[0]).hash]) + assert.deepStrictEqual(Automerge.getMissingDeps(s6, []), [decodeChange(changes01[0]).hash]) }) it('should call patchCallback if supplied when applying changes', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) - const callbacks = [], actor = Automerge.getActorId(s1) + const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) + const callbacks: Array = [] const before = Automerge.init() - const [after, patch] = Automerge.applyChanges(before, Automerge.getAllChanges(s1), { + const [after] = Automerge.applyChanges(before, Automerge.getAllChanges(s1), { patchCallback(patch, before, after) { callbacks.push({patch, before, after}) } @@ -1370,9 +1384,9 @@ describe('Automerge', () => { }) it('should merge multiple applied changes into one patch', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) + const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) const s2 = Automerge.change(s1, doc => doc.birds.push('Chaffinch')) - const patches = [], actor = Automerge.getActorId(s2) + const patches: Array = [] Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2), {patchCallback: p => patches.push(... p)}) assert.deepStrictEqual(patches, [ @@ -1385,8 +1399,8 @@ describe('Automerge', () => { }) it('should call a patchCallback registered on doc initialisation', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.bird = 'Goldfinch') - const patches = [], actor = Automerge.getActorId(s1) + const s1 = Automerge.change(Automerge.init(), doc => doc.bird = 'Goldfinch') + const patches: Array = [] const before = Automerge.init({patchCallback: p => patches.push(... p)}) Automerge.applyChanges(before, Automerge.getAllChanges(s1)) assert.deepStrictEqual(patches, [ diff --git a/javascript/test/sync_test.ts b/javascript/test/sync_test.ts index 56b4bd87..8e03c18a 100644 --- a/javascript/test/sync_test.ts +++ b/javascript/test/sync_test.ts @@ -1,25 +1,19 @@ import * as assert from 'assert' import * as Automerge from '../src' import { BloomFilter } from './legacy/sync' -import { decodeChangeMeta } from './legacy/columnar' import { decodeSyncMessage, encodeSyncMessage, decodeSyncState, encodeSyncState, initSyncState } from "../src" -function inspect(a) { - const util = require("util"); - return util.inspect(a,false,null,true) -} - function getHeads(doc) { return Automerge.getHeads(doc) } function getMissingDeps(doc) { - return Automerge.getMissingDeps(doc) + return Automerge.getMissingDeps(doc, []) } function sync(a, b, aSyncState = initSyncState(), bSyncState = initSyncState()) { const MAX_ITER = 10 - let aToBmsg = null, bToAmsg = null, i = 0 + let aToBmsg: Automerge.SyncMessage | null = null, bToAmsg: Automerge.SyncMessage | null = null, i = 0 do { [aSyncState, aToBmsg] = Automerge.generateSyncMessage(a, aSyncState) ;[bSyncState, bToAmsg] = Automerge.generateSyncMessage(b, bSyncState) @@ -59,9 +53,11 @@ describe('Data sync protocol', () => { it('should not reply if we have no data as well', () => { let n1 = Automerge.init(), n2 = Automerge.init() let s1 = initSyncState(), s2 = initSyncState() - let m1 = null, m2 = null + let m1: Automerge.SyncMessage | null = null, m2: Automerge.SyncMessage | null = null ;[s1, m1] = Automerge.generateSyncMessage(n1, s1) - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + if (m1 != null) { + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + } ;[s2, m2] = Automerge.generateSyncMessage(n2, s2) assert.deepStrictEqual(m2, null) }) @@ -69,9 +65,9 @@ describe('Data sync protocol', () => { describe('documents with data', () => { it('repos with equal heads do not need a reply message', () => { - let n1 = Automerge.init(), n2 = Automerge.init() + let n1 = Automerge.init(), n2 = Automerge.init() let s1 = initSyncState(), s2 = initSyncState() - let m1 = null, m2 = null + let m1: Automerge.SyncMessage | null = null, m2: Automerge.SyncMessage | null = null // make two nodes with the same changes n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) @@ -84,13 +80,15 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(s1.lastSentHeads, getHeads(n1)) // heads are equal so this message should be null - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + if (m1 != null) { + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + } ;[s2, m2] = Automerge.generateSyncMessage(n2, s2) assert.strictEqual(m2, null) }) it('n1 should offer all changes to n2 when starting from nothing', () => { - let n1 = Automerge.init(), n2 = Automerge.init() + let n1 = Automerge.init(), n2 = Automerge.init() // make changes for n1 that n2 should request n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) @@ -102,7 +100,7 @@ describe('Data sync protocol', () => { }) it('should sync peers where one has commits the other does not', () => { - let n1 = Automerge.init(), n2 = Automerge.init() + let n1 = Automerge.init(), n2 = Automerge.init() // make changes for n1 that n2 should request n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) @@ -115,7 +113,7 @@ describe('Data sync protocol', () => { it('should work with prior sync state', () => { // create & synchronize two nodes - let n1 = Automerge.init(), n2 = Automerge.init() + let n1 = Automerge.init(), n2 = Automerge.init() let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) @@ -131,35 +129,35 @@ describe('Data sync protocol', () => { it('should not generate messages once synced', () => { // create & synchronize two nodes - let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') + let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') let s1 = initSyncState(), s2 = initSyncState() - let message, patch + let message for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) for (let i = 0; i < 5; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.y = i) // n1 reports what it has - ;[s1, message] = Automerge.generateSyncMessage(n1, s1, n1) + ;[s1, message] = Automerge.generateSyncMessage(n1, s1) // n2 receives that message and sends changes along with what it has - ;[n2, s2, patch] = Automerge.receiveSyncMessage(n2, s2, message) + ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, message) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 5) //assert.deepStrictEqual(patch, null) // no changes arrived // n1 receives the changes and replies with the changes it now knows n2 needs - ;[n1, s1, patch] = Automerge.receiveSyncMessage(n1, s1, message) + ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, message) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 5) //assert.deepStrictEqual(patch.diffs.props, {y: {'5@def456': {type: 'value', value: 4, datatype: 'int'}}}) // changes arrived // n2 applies the changes and sends confirmation ending the exchange - ;[n2, s2, patch] = Automerge.receiveSyncMessage(n2, s2, message) + ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, message) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) //assert.deepStrictEqual(patch.diffs.props, {x: {'5@abc123': {type: 'value', value: 4, datatype: 'int'}}}) // changes arrived // n1 receives the message and has nothing more to say - ;[n1, s1, patch] = Automerge.receiveSyncMessage(n1, s1, message) + ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, message) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) assert.deepStrictEqual(message, null) //assert.deepStrictEqual(patch, null) // no changes arrived @@ -171,7 +169,7 @@ describe('Data sync protocol', () => { it('should allow simultaneous messages during synchronization', () => { // create & synchronize two nodes - let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') + let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) for (let i = 0; i < 5; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.y = i) @@ -187,10 +185,9 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(decodeSyncMessage(msg2to1).have[0].lastSync.length, 0) // n1 and n2 receives that message and update sync state but make no patch - let patch1, patch2 - ;[n1, s1, patch1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) //assert.deepStrictEqual(patch1, null) // no changes arrived, so no patch - ;[n2, s2, patch2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) //assert.deepStrictEqual(patch2, null) // no changes arrived, so no patch // now both reply with their local changes the other lacks @@ -201,12 +198,12 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 5) // both should now apply the changes and update the frontend - ;[n1, s1, patch1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) assert.deepStrictEqual(getMissingDeps(n1), []) //assert.notDeepStrictEqual(patch1, null) assert.deepStrictEqual(n1, {x: 4, y: 4}) - ;[n2, s2, patch2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) assert.deepStrictEqual(getMissingDeps(n2), []) //assert.notDeepStrictEqual(patch2, null) assert.deepStrictEqual(n2, {x: 4, y: 4}) @@ -218,8 +215,8 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 0) // After receiving acknowledgements, their shared heads should be equal - ;[n1, s1, patch1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) - ;[n2, s2, patch2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) assert.deepStrictEqual(s1.sharedHeads, [head1, head2].sort()) assert.deepStrictEqual(s2.sharedHeads, [head1, head2].sort()) //assert.deepStrictEqual(patch1, null) @@ -238,29 +235,34 @@ describe('Data sync protocol', () => { }) it('should assume sent changes were recieved until we hear otherwise', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), message = null - let s2 + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let s1 = initSyncState(), message: Automerge.SyncMessage | null = null n1 = Automerge.change(n1, {time: 0}, doc => doc.items = []) - ;[n1, n2, s1, s2 ] = sync(n1, n2) + ;[n1, n2, s1, ] = sync(n1, n2) n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('x')) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + if (message != null) { + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + } n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('y')) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + if (message != null) { + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + } n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('z')) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + if (message != null) { + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + } }) it('should work regardless of who initiates the exchange', () => { // create & synchronize two nodes - let n1 = Automerge.init(), n2 = Automerge.init() + let n1 = Automerge.init(), n2 = Automerge.init() let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) @@ -284,7 +286,7 @@ describe('Data sync protocol', () => { // lastSync is undefined. // create two peers both with divergent commits - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) ;[n1, n2] = sync(n1, n2) @@ -305,7 +307,7 @@ describe('Data sync protocol', () => { // lastSync is c9. // create two peers both with divergent commits - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) @@ -323,7 +325,7 @@ describe('Data sync protocol', () => { }) it('should ensure non-empty state after sync', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) @@ -338,7 +340,7 @@ describe('Data sync protocol', () => { // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 // n2 has changes {c0, c1, c2}, n1's lastSync is c5, and n2's lastSync is c2. // we want to successfully sync (n1) with (r), even though (n1) believes it's talking to (n2) - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() // n1 makes three changes, which we sync to n2 @@ -371,7 +373,7 @@ describe('Data sync protocol', () => { }) it('should resync after one node experiences data loss without disconnecting', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() // n1 makes three changes, which we sync to n2 @@ -391,7 +393,7 @@ describe('Data sync protocol', () => { }) it('should handle changes concurrent to the last sync heads', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') let s12 = initSyncState(), s21 = initSyncState(), s23 = initSyncState(), s32 = initSyncState() // Change 1 is known to all three nodes @@ -411,8 +413,8 @@ describe('Data sync protocol', () => { // Apply n3's latest change to n2. If running in Node, turn the Uint8Array into a Buffer, to // simulate transmission over a network (see https://github.com/automerge/automerge/pull/362) let change = Automerge.getLastLocalChange(n3) - if (typeof Buffer === 'function') change = Buffer.from(change) - ;[n2] = Automerge.applyChanges(n2, [change]) + if (typeof Buffer === 'function' && change != null) change = Buffer.from(change) + ;[n2] = change && Automerge.applyChanges(n2, [change]) || [n2] // Now sync n1 and n2. n3's change is concurrent to n1 and n2's last sync heads ;[n1, n2, s12, s21] = sync(n1, n2, s12, s21) @@ -421,10 +423,10 @@ describe('Data sync protocol', () => { }) it('should handle histories with lots of branching and merging', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 0) - ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n1)]) - ;[n3] = Automerge.applyChanges(n3, [Automerge.getLastLocalChange(n1)]) + ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n1)!]) + ;[n3] = Automerge.applyChanges(n3, [Automerge.getLastLocalChange(n1)!]) n3 = Automerge.change(n3, {time: 0}, doc => doc.x = 1) // - n1c1 <------ n1c2 <------ n1c3 <-- etc. <-- n1c20 <------ n1c21 @@ -438,15 +440,15 @@ describe('Data sync protocol', () => { n2 = Automerge.change(n2, {time: 0}, doc => doc.n2 = i) const change1 = Automerge.getLastLocalChange(n1) const change2 = Automerge.getLastLocalChange(n2) - ;[n1] = Automerge.applyChanges(n1, [change2]) - ;[n2] = Automerge.applyChanges(n2, [change1]) + ;[n1] = Automerge.applyChanges(n1, [change2!]) + ;[n2] = Automerge.applyChanges(n2, [change1!]) } let s1 = initSyncState(), s2 = initSyncState() ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) // Having n3's last change concurrent to the last sync heads forces us into the slower code path - ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n3)]) + ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n3)!]) n1 = Automerge.change(n1, {time: 0}, doc => doc.n1 = 'final') n2 = Automerge.change(n2, {time: 0}, doc => doc.n2 = 'final') @@ -471,14 +473,14 @@ describe('Data sync protocol', () => { // `-- n2 // where n2 is a false positive in the Bloom filter containing {n1}. // lastSync is c9. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) ;[n1, n2, s1, s2] = sync(n1, n2) for (let i = 1; ; i++) { // search for false positive; see comment above - const n1up = Automerge.change(Automerge.clone(n1, {actorId: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2up = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + const n1up = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) + const n2up = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) if (new BloomFilter(getHeads(n1up)).containsHash(getHeads(n2up)[0])) { n1 = n1up; n2 = n2up; break } @@ -500,20 +502,20 @@ describe('Data sync protocol', () => { // `-- n2c1 <-- n2c2 // where n2c1 is a false positive in the Bloom filter containing {n1c1, n1c2}. // lastSync is c9. - n1 = Automerge.init('01234567') - n2 = Automerge.init('89abcdef') + n1 = Automerge.init('01234567') + n2 = Automerge.init('89abcdef') s1 = initSyncState() s2 = initSyncState() - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, (doc: any) => doc.x = i) ;[n1, n2, s1, s2] = sync(n1, n2) let n1hash1, n2hash1 for (let i = 29; ; i++) { // search for false positive; see comment above - const n1us1 = Automerge.change(Automerge.clone(n1, {actorId: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2us1 = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + const n1us1 = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, (doc: any) => doc.x = `${i} @ n1`) + const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, (doc: any) => doc.x = `${i} @ n2`) n1hash1 = getHeads(n1us1)[0]; n2hash1 = getHeads(n2us1)[0] - const n1us2 = Automerge.change(n1us1, {time: 0}, doc => doc.x = 'final @ n1') - const n2us2 = Automerge.change(n2us1, {time: 0}, doc => doc.x = 'final @ n2') + const n1us2 = Automerge.change(n1us1, {time: 0}, (doc: any) => doc.x = 'final @ n1') + const n2us2 = Automerge.change(n2us1, {time: 0}, (doc: any) => doc.x = 'final @ n2') n1hash2 = getHeads(n1us2)[0]; n2hash2 = getHeads(n2us2)[0] if (new BloomFilter([n1hash1, n1hash2]).containsHash(n2hash1)) { n1 = n1us2; n2 = n2us2; break @@ -569,15 +571,15 @@ describe('Data sync protocol', () => { // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c2 is a false positive in the Bloom filter containing {n1c1, n1c2, n1c3}. // lastSync is c4. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() let n1hash3, n2hash3 for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) ;[n1, n2, s1, s2] = sync(n1, n2) for (let i = 86; ; i++) { // search for false positive; see comment above - const n1us1 = Automerge.change(Automerge.clone(n1, {actorId: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2us1 = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + const n1us1 = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) + const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) const n1hash1 = getHeads(n1us1)[0] const n1us2 = Automerge.change(n1us1, {time: 0}, doc => doc.x = `${i + 1} @ n1`) const n2us2 = Automerge.change(n2us1, {time: 0}, doc => doc.x = `${i + 1} @ n2`) @@ -603,20 +605,20 @@ describe('Data sync protocol', () => { // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c1 and n2c2 are both false positives in the Bloom filter containing {c5}. // lastSync is c4. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 5) for (let i = 2; ; i++) { // search for false positive; see comment above - const n2us1 = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) if (new BloomFilter(getHeads(n1)).containsHash(getHeads(n2us1)[0])) { n2 = n2us1; break } } for (let i = 141; ; i++) { // search for false positive; see comment above - const n2us2 = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} again`) + const n2us2 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} again`) if (new BloomFilter(getHeads(n1)).containsHash(getHeads(n2us2)[0])) { n2 = n2us2; break } @@ -636,7 +638,7 @@ describe('Data sync protocol', () => { // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- n2 // where n2 causes a false positive in the Bloom filter containing {n1}. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() let message @@ -646,8 +648,8 @@ describe('Data sync protocol', () => { s2 = decodeSyncState(encodeSyncState(s2)) for (let i = 1; ; i++) { // brute-force search for false positive; see comment above - const n1up = Automerge.change(Automerge.clone(n1, {actorId: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2up = Automerge.change(Automerge.clone(n2, {actorId: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + const n1up = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) + const n2up = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) // check if the bloom filter on n2 will believe n1 already has a particular hash // this will mean n2 won't offer that data to n2 by receiving a sync message from n1 if (new BloomFilter(getHeads(n1up)).containsHash(getHeads(n2up)[0])) { @@ -688,14 +690,14 @@ describe('Data sync protocol', () => { // n1 has {c0, c1, c2, n1c1, n1c2, n1c3, n2c1, n2c2}; // n2 has {c0, c1, c2, n1c1, n1c2, n2c1, n2c2, n2c3}; // n3 has {c0, c1, c2, n3c1, n3c2, n3c3}. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') - let s13 = initSyncState(), s12 = initSyncState(), s21 = initSyncState() + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') + let s13 = initSyncState() let s32 = initSyncState(), s31 = initSyncState(), s23 = initSyncState() let message1, message2, message3 for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) // sync all 3 nodes - ;[n1, n2, s12, s21] = sync(n1, n2) // eslint-disable-line no-unused-vars -- kept for consistency + ;[n1, n2, , ] = sync(n1, n2) // eslint-disable-line no-unused-vars -- kept for consistency ;[n1, n3, s13, s31] = sync(n1, n3) ;[n3, n2, s32, s23] = sync(n3, n2) for (let i = 0; i < 2; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = `${i} @ n1`) @@ -742,9 +744,9 @@ describe('Data sync protocol', () => { }) it('should allow any change to be requested', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() - let message = null + let message: Automerge.SyncMessage | null = null for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) const lastSync = getHeads(n1) @@ -753,24 +755,26 @@ describe('Data sync protocol', () => { ;[n1, n2, s1, s2] = sync(n1, n2) s1.lastSentHeads = [] // force generateSyncMessage to return a message even though nothing changed ;[s1, message] = Automerge.generateSyncMessage(n1, s1) - const modMsg = decodeSyncMessage(message) + const modMsg = decodeSyncMessage(message!) modMsg.need = lastSync // re-request change 2 ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, encodeSyncMessage(modMsg)) ;[s1, message] = Automerge.generateSyncMessage(n2, s2) - assert.strictEqual(decodeSyncMessage(message).changes.length, 1) - assert.strictEqual(Automerge.decodeChange(decodeSyncMessage(message).changes[0]).hash, lastSync[0]) + assert.strictEqual(decodeSyncMessage(message!).changes.length, 1) + assert.strictEqual(Automerge.decodeChange(decodeSyncMessage(message!).changes[0]).hash, lastSync[0]) }) it('should ignore requests for a nonexistent change', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') let s1 = initSyncState(), s2 = initSyncState() - let message = null + let message: Automerge.SyncMessage | null = null for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) ;[n2] = Automerge.applyChanges(n2, Automerge.getAllChanges(n1)) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) - message.need = ['0000000000000000000000000000000000000000000000000000000000000000'] - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, message) + const decoded = Automerge.decodeSyncMessage(message!) + decoded.need = ['0000000000000000000000000000000000000000000000000000000000000000'] + message = Automerge.encodeSyncMessage(decoded) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, message!) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) assert.strictEqual(message, null) }) @@ -779,7 +783,7 @@ describe('Data sync protocol', () => { // ,-- c1 <-- c2 // c0 <-+ // `-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') + let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') let s1 = initSyncState(), s2 = initSyncState() let msg, decodedMsg @@ -813,9 +817,10 @@ describe('Data sync protocol', () => { decodedMsg = decodeSyncMessage(msg) decodedMsg.changes = [change5, change6] msg = encodeSyncMessage(decodedMsg) - const sentHashes = {} - sentHashes[decodeChangeMeta(change5, true).hash] = true - sentHashes[decodeChangeMeta(change6, true).hash] = true + const sentHashes = [ + Automerge.decodeChange(change5!).hash, + Automerge.decodeChange(change6!).hash, + ] s2.sentHashes = sentHashes ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, msg) assert.deepStrictEqual(s1.sharedHeads, [c2, c6].sort()) diff --git a/javascript/test/text_test.ts b/javascript/test/text_test.ts index 59890470..dd66e108 100644 --- a/javascript/test/text_test.ts +++ b/javascript/test/text_test.ts @@ -2,203 +2,16 @@ import * as assert from 'assert' import * as Automerge from '../src' import { assertEqualsOneOf } from './helpers' -function attributeStateToAttributes(accumulatedAttributes) { - const attributes = {} - Object.entries(accumulatedAttributes).forEach(([key, values]) => { - if (values.length && values[0] !== null) { - attributes[key] = values[0] - } - }) - return attributes -} - -function isEquivalent(a, b) { - const aProps = Object.getOwnPropertyNames(a) - const bProps = Object.getOwnPropertyNames(b) - - if (aProps.length != bProps.length) { - return false - } - - for (let i = 0; i < aProps.length; i++) { - const propName = aProps[i] - if (a[propName] !== b[propName]) { - return false - } - } - - return true -} - -function isControlMarker(pseudoCharacter) { - return typeof pseudoCharacter === 'object' && pseudoCharacter.attributes -} - -function opFrom(text, attributes) { - let op = { insert: text } - if (Object.keys(attributes).length > 0) { - op.attributes = attributes - } - return op -} - -function accumulateAttributes(span, accumulatedAttributes) { - Object.entries(span).forEach(([key, value]) => { - if (!accumulatedAttributes[key]) { - accumulatedAttributes[key] = [] - } - if (value === null) { - if (accumulatedAttributes[key].length === 0 || accumulatedAttributes[key] === null) { - accumulatedAttributes[key].unshift(null) - } else { - accumulatedAttributes[key].shift() - } - } else { - if (accumulatedAttributes[key][0] === null) { - accumulatedAttributes[key].shift() - } else { - accumulatedAttributes[key].unshift(value) - } - } - }) - return accumulatedAttributes -} - -function automergeTextToDeltaDoc(text) { - let ops = [] - let controlState = {} - let currentString = "" - let attributes = {} - text.toSpans().forEach((span) => { - if (isControlMarker(span)) { - controlState = accumulateAttributes(span.attributes, controlState) - } else { - let next = attributeStateToAttributes(controlState) - - // if the next span has the same calculated attributes as the current span - // don't bother outputting it as a separate span, just let it ride - if (typeof span === 'string' && isEquivalent(next, attributes)) { - currentString = currentString + span - return - } - - if (currentString) { - ops.push(opFrom(currentString, attributes)) - } - - // If we've got a string, we might be able to concatenate it to another - // same-attributed-string, so remember it and go to the next iteration. - if (typeof span === 'string') { - currentString = span - attributes = next - } else { - // otherwise we have an embed "character" and should output it immediately. - // embeds are always one-"character" in length. - ops.push(opFrom(span, next)) - currentString = '' - attributes = {} - } - } - }) - - // at the end, flush any accumulated string out - if (currentString) { - ops.push(opFrom(currentString, attributes)) - } - - return ops -} - -function inverseAttributes(attributes) { - let invertedAttributes = {} - Object.keys(attributes).forEach((key) => { - invertedAttributes[key] = null - }) - return invertedAttributes -} - -function applyDeleteOp(text, offset, op) { - let length = op.delete - while (length > 0) { - if (isControlMarker(text.get(offset))) { - offset += 1 - } else { - // we need to not delete control characters, but we do delete embed characters - text.deleteAt(offset, 1) - length -= 1 - } - } - return [text, offset] -} - -function applyRetainOp(text, offset, op) { - let length = op.retain - - if (op.attributes) { - text.insertAt(offset, { attributes: op.attributes }) - offset += 1 - } - - while (length > 0) { - const char = text.get(offset) - offset += 1 - if (!isControlMarker(char)) { - length -= 1 - } - } - - if (op.attributes) { - text.insertAt(offset, { attributes: inverseAttributes(op.attributes) }) - offset += 1 - } - - return [text, offset] -} - - -function applyInsertOp(text, offset, op) { - let originalOffset = offset - - if (typeof op.insert === 'string') { - text.insertAt(offset, ...op.insert.split('')) - offset += op.insert.length - } else { - // we have an embed or something similar - text.insertAt(offset, op.insert) - offset += 1 - } - - if (op.attributes) { - text.insertAt(originalOffset, { attributes: op.attributes }) - offset += 1 - } - if (op.attributes) { - text.insertAt(offset, { attributes: inverseAttributes(op.attributes) }) - offset += 1 - } - return [text, offset] -} - -// XXX: uhhhhh, why can't I pass in text? -function applyDeltaDocToAutomergeText(delta, doc) { - let offset = 0 - - delta.forEach(op => { - if (op.retain) { - [, offset] = applyRetainOp(doc.text, offset, op) - } else if (op.delete) { - [, offset] = applyDeleteOp(doc.text, offset, op) - } else if (op.insert) { - [, offset] = applyInsertOp(doc.text, offset, op) - } - }) +type DocType = { + text: string + [key: string]: any } describe('Automerge.Text', () => { - let s1, s2 + let s1: Automerge.Doc, s2: Automerge.Doc beforeEach(() => { - s1 = Automerge.change(Automerge.init(), doc => doc.text = "") - s2 = Automerge.merge(Automerge.init(), s1) + s1 = Automerge.change(Automerge.init(), doc => doc.text = "") + s2 = Automerge.merge(Automerge.init(), s1) }) it('should support insertion', () => { @@ -281,7 +94,7 @@ describe('Automerge.Text', () => { const s1 = Automerge.from({text: 'init'}) const changes = Automerge.getAllChanges(s1) assert.strictEqual(changes.length, 1) - const [s2] = Automerge.applyChanges(Automerge.init(), changes) + const [s2] = Automerge.applyChanges(Automerge.init(), changes) assert.strictEqual(s2.text, 'init') assert.strictEqual(s2.text, 'init') }) diff --git a/javascript/tsconfig.json b/javascript/tsconfig.json index 80dd7c76..8e934416 100644 --- a/javascript/tsconfig.json +++ b/javascript/tsconfig.json @@ -14,7 +14,7 @@ "skipLibCheck": true, "outDir": "./dist" }, - "include": [ "src/**/*" ], + "include": [ "src/**/*", "test/**/*" ], "exclude": [ "./dist/**/*", "./node_modules" diff --git a/rust/automerge-wasm/index.d.ts b/rust/automerge-wasm/index.d.ts index 90b7854a..0e0c38e6 100644 --- a/rust/automerge-wasm/index.d.ts +++ b/rust/automerge-wasm/index.d.ts @@ -82,6 +82,9 @@ export type DecodedChange = { ops: Op[] } +type PartialBy = Omit & Partial> +export type ChangeToEncode = PartialBy + export type Op = { action: string, obj: ObjID, @@ -120,7 +123,7 @@ export type SplicePatch = { export function create(actor?: Actor): Automerge; export function load(data: Uint8Array, actor?: Actor): Automerge; -export function encodeChange(change: DecodedChange): Change; +export function encodeChange(change: ChangeToEncode): Change; export function decodeChange(change: Change): DecodedChange; export function initSyncState(): SyncState; export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage; @@ -133,7 +136,7 @@ export function importSyncState(state: JsSyncState): SyncState; export interface API { create(actor?: Actor): Automerge; load(data: Uint8Array, actor?: Actor): Automerge; - encodeChange(change: DecodedChange): Change; + encodeChange(change: ChangeToEncode): Change; decodeChange(change: Change): DecodedChange; initSyncState(): SyncState; encodeSyncMessage(message: DecodedSyncMessage): SyncMessage; @@ -208,7 +211,7 @@ export class Automerge { dump(): void; // experimental api can go here - applyPatches(obj: Doc, meta?: unknown, callback?: (patch: Patch, before: Doc, after: Doc) => void): Doc; + applyPatches(obj: Doc, meta?: unknown, callback?: (patch: Array, before: Doc, after: Doc) => void): Doc; } export interface JsSyncState { From 1e7dcdedec03b1d6cfcb5ff3efacf0e4879f5afc Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 22 Dec 2022 12:03:49 +0000 Subject: [PATCH 15/72] automerge-js: Add prettier It's christmas, everyone is on holiday, it's time to change every single file in the repository! --- .github/workflows/ci.yaml | 10 + javascript/.eslintrc.cjs | 13 +- javascript/.prettierignore | 2 + javascript/.prettierrc | 4 + javascript/HACKING.md | 3 +- javascript/README.md | 41 +- javascript/config/cjs.json | 8 +- javascript/config/mjs.json | 12 +- javascript/e2e/README.md | 3 +- javascript/e2e/index.ts | 718 ++++--- javascript/e2e/tsconfig.json | 8 +- javascript/e2e/verdaccio.yaml | 24 +- .../examples/create-react-app/README.md | 2 +- .../examples/create-react-app/craco.config.js | 2 +- .../examples/create-react-app/src/App.js | 11 +- .../examples/create-react-app/src/App.test.js | 14 +- .../examples/create-react-app/src/index.css | 6 +- .../examples/create-react-app/src/index.js | 16 +- .../create-react-app/src/reportWebVitals.js | 18 +- .../create-react-app/src/setupTests.js | 2 +- javascript/examples/vite/README.md | 32 +- javascript/examples/vite/main.ts | 22 +- javascript/examples/vite/src/counter.ts | 2 +- javascript/examples/vite/src/main.ts | 17 +- javascript/examples/vite/vite.config.js | 28 +- javascript/examples/webpack/README.md | 26 +- javascript/examples/webpack/src/index.js | 9 +- javascript/examples/webpack/webpack.config.js | 37 +- javascript/package.json | 1 + javascript/src/constants.ts | 19 +- javascript/src/counter.ts | 36 +- javascript/src/index.ts | 853 ++++---- javascript/src/low_level.ts | 59 +- javascript/src/numbers.ts | 25 +- javascript/src/proxies.ts | 462 +++-- javascript/src/types.ts | 23 +- javascript/src/uuid.ts | 19 +- javascript/test/basic_test.ts | 815 ++++---- javascript/test/extra_api_tests.ts | 42 +- javascript/test/helpers.ts | 12 +- javascript/test/legacy/columnar.js | 661 ++++-- javascript/test/legacy/common.js | 14 +- javascript/test/legacy/encoding.js | 432 ++-- javascript/test/legacy/sync.js | 186 +- javascript/test/legacy_tests.ts | 1832 +++++++++++------ javascript/test/sync_test.ts | 693 ++++--- javascript/test/text_test.ts | 91 +- javascript/test/uuid_test.ts | 20 +- javascript/tsconfig.json | 37 +- javascript/typedoc-readme.md | 58 +- scripts/ci/fmt_js | 5 + scripts/ci/run | 1 + 52 files changed, 4564 insertions(+), 2922 deletions(-) create mode 100644 javascript/.prettierignore create mode 100644 javascript/.prettierrc create mode 100755 scripts/ci/fmt_js diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0550619e..361320a0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -90,6 +90,16 @@ jobs: run: rustup target add wasm32-unknown-unknown - name: run tests run: ./scripts/ci/deno_tests + + js_fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: install + run: yarn global add prettier + - name: format + run: prettier -c javascript/.prettierrc javascript + js_tests: runs-on: ubuntu-latest steps: diff --git a/javascript/.eslintrc.cjs b/javascript/.eslintrc.cjs index 80e08d55..5d11eb94 100644 --- a/javascript/.eslintrc.cjs +++ b/javascript/.eslintrc.cjs @@ -1,11 +1,6 @@ module.exports = { root: true, - parser: '@typescript-eslint/parser', - plugins: [ - '@typescript-eslint', - ], - extends: [ - 'eslint:recommended', - 'plugin:@typescript-eslint/recommended', - ], -}; + parser: "@typescript-eslint/parser", + plugins: ["@typescript-eslint"], + extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended"], +} diff --git a/javascript/.prettierignore b/javascript/.prettierignore new file mode 100644 index 00000000..8116ea24 --- /dev/null +++ b/javascript/.prettierignore @@ -0,0 +1,2 @@ +e2e/verdacciodb +dist diff --git a/javascript/.prettierrc b/javascript/.prettierrc new file mode 100644 index 00000000..18b9c97f --- /dev/null +++ b/javascript/.prettierrc @@ -0,0 +1,4 @@ +{ + "semi": false, + "arrowParens": "avoid" +} diff --git a/javascript/HACKING.md b/javascript/HACKING.md index c3203775..b7e92eef 100644 --- a/javascript/HACKING.md +++ b/javascript/HACKING.md @@ -8,7 +8,7 @@ Rust codebase and can be found in `~/automerge-wasm`). I.e. the responsibility of this codebase is - To map from the javascript data model to the underlying `set`, `make`, - `insert`, and `delete` operations of Automerge. + `insert`, and `delete` operations of Automerge. - To expose a more convenient interface to functions in `automerge-wasm` which generate messages to send over the network or compressed file formats to store on disk @@ -37,4 +37,3 @@ yarn test If you make changes to the `automerge-wasm` package you will need to re-run `yarn e2e buildjs` - diff --git a/javascript/README.md b/javascript/README.md index ffd2b38e..af8306ac 100644 --- a/javascript/README.md +++ b/javascript/README.md @@ -19,7 +19,6 @@ data](#make-some-data). If you're in a browser you need a bundler ### Bundler setup - `@automerge/automerge` is a wrapper around a core library which is written in rust, compiled to WebAssembly and distributed as a separate package called `@automerge/automerge-wasm`. Browsers don't currently support WebAssembly @@ -54,28 +53,28 @@ import * as automerge from "@automerge/automerge" import * as assert from "assert" let doc1 = automerge.from({ - tasks: [ - {description: "feed fish", done: false}, - {description: "water plants", done: false}, - ] + tasks: [ + { description: "feed fish", done: false }, + { description: "water plants", done: false }, + ], }) -// Create a new thread of execution +// Create a new thread of execution let doc2 = automerge.clone(doc1) // Now we concurrently make changes to doc1 and doc2 // Complete a task in doc2 doc2 = automerge.change(doc2, d => { - d.tasks[0].done = true + d.tasks[0].done = true }) // Add a task in doc1 doc1 = automerge.change(doc1, d => { - d.tasks.push({ - description: "water fish", - done: false - }) + d.tasks.push({ + description: "water fish", + done: false, + }) }) // Merge changes from both docs @@ -84,19 +83,19 @@ doc2 = automerge.merge(doc2, doc1) // Both docs are merged and identical assert.deepEqual(doc1, { - tasks: [ - {description: "feed fish", done: true}, - {description: "water plants", done: false}, - {description: "water fish", done: false}, - ] + tasks: [ + { description: "feed fish", done: true }, + { description: "water plants", done: false }, + { description: "water fish", done: false }, + ], }) assert.deepEqual(doc2, { - tasks: [ - {description: "feed fish", done: true}, - {description: "water plants", done: false}, - {description: "water fish", done: false}, - ] + tasks: [ + { description: "feed fish", done: true }, + { description: "water plants", done: false }, + { description: "water fish", done: false }, + ], }) ``` diff --git a/javascript/config/cjs.json b/javascript/config/cjs.json index d7f8c63f..9cfceed5 100644 --- a/javascript/config/cjs.json +++ b/javascript/config/cjs.json @@ -1,6 +1,6 @@ { - "extends": "../tsconfig.json", - "compilerOptions": { - "outDir": "../dist/cjs" - } + "extends": "../tsconfig.json", + "compilerOptions": { + "outDir": "../dist/cjs" + } } diff --git a/javascript/config/mjs.json b/javascript/config/mjs.json index 8f964400..5b02ee0e 100644 --- a/javascript/config/mjs.json +++ b/javascript/config/mjs.json @@ -1,8 +1,8 @@ { - "extends": "../tsconfig.json", - "compilerOptions": { - "target": "es6", - "module": "es6", - "outDir": "../dist/mjs" - } + "extends": "../tsconfig.json", + "compilerOptions": { + "target": "es6", + "module": "es6", + "outDir": "../dist/mjs" + } } diff --git a/javascript/e2e/README.md b/javascript/e2e/README.md index ff87bd60..9dcee471 100644 --- a/javascript/e2e/README.md +++ b/javascript/e2e/README.md @@ -54,7 +54,7 @@ yarn e2e buildexamples -e webpack If you're experimenting with a project which is not in the `examples` folder you'll need a running registry. `run-registry` builds and publishes `automerge-js` and `automerge-wasm` and then runs the registry at -`localhost:4873`. +`localhost:4873`. ``` yarn e2e run-registry @@ -63,7 +63,6 @@ yarn e2e run-registry You can now run `yarn install --registry http://localhost:4873` to experiment with the built packages. - ## Using the `dev` build of `automerge-wasm` All the commands above take a `-p` flag which can be either `release` or diff --git a/javascript/e2e/index.ts b/javascript/e2e/index.ts index 828c0635..fb0b1599 100644 --- a/javascript/e2e/index.ts +++ b/javascript/e2e/index.ts @@ -1,15 +1,25 @@ -import {once} from "events" -import {setTimeout} from "timers/promises" -import {spawn, ChildProcess} from "child_process" +import { once } from "events" +import { setTimeout } from "timers/promises" +import { spawn, ChildProcess } from "child_process" import * as child_process from "child_process" -import {command, subcommands, run, array, multioption, option, Type} from "cmd-ts" +import { + command, + subcommands, + run, + array, + multioption, + option, + Type, +} from "cmd-ts" import * as path from "path" import * as fsPromises from "fs/promises" import fetch from "node-fetch" const VERDACCIO_DB_PATH = path.normalize(`${__dirname}/verdacciodb`) const VERDACCIO_CONFIG_PATH = path.normalize(`${__dirname}/verdaccio.yaml`) -const AUTOMERGE_WASM_PATH = path.normalize(`${__dirname}/../../rust/automerge-wasm`) +const AUTOMERGE_WASM_PATH = path.normalize( + `${__dirname}/../../rust/automerge-wasm` +) const AUTOMERGE_JS_PATH = path.normalize(`${__dirname}/..`) const EXAMPLES_DIR = path.normalize(path.join(__dirname, "../", "examples")) @@ -18,217 +28,286 @@ type Example = "webpack" | "vite" | "create-react-app" // Type to parse strings to `Example` so the types line up for the `buildExamples` commmand const ReadExample: Type = { - async from(str) { - if (str === "webpack") { - return "webpack" - } else if (str === "vite") { - return "vite" - } else if (str === "create-react-app") { - return "create-react-app" - } else { - throw new Error(`Unknown example type ${str}`) - } + async from(str) { + if (str === "webpack") { + return "webpack" + } else if (str === "vite") { + return "vite" + } else if (str === "create-react-app") { + return "create-react-app" + } else { + throw new Error(`Unknown example type ${str}`) } + }, } type Profile = "dev" | "release" const ReadProfile: Type = { - async from(str) { - if (str === "dev") { - return "dev" - } else if (str === "release") { - return "release" - } else { - throw new Error(`Unknown profile ${str}`) - } + async from(str) { + if (str === "dev") { + return "dev" + } else if (str === "release") { + return "release" + } else { + throw new Error(`Unknown profile ${str}`) } + }, } const buildjs = command({ - name: "buildjs", - args: { - profile: option({ - type: ReadProfile, - long: "profile", - short: "p", - defaultValue: () => "dev" as Profile - }) - }, - handler: ({profile}) => { - console.log("building js") - withPublishedWasm(profile, async (registryUrl: string) => { - await buildAndPublishAutomergeJs(registryUrl) - }) - } + name: "buildjs", + args: { + profile: option({ + type: ReadProfile, + long: "profile", + short: "p", + defaultValue: () => "dev" as Profile, + }), + }, + handler: ({ profile }) => { + console.log("building js") + withPublishedWasm(profile, async (registryUrl: string) => { + await buildAndPublishAutomergeJs(registryUrl) + }) + }, }) const buildWasm = command({ - name: "buildwasm", - args: { - profile: option({ - type: ReadProfile, - long: "profile", - short: "p", - defaultValue: () => "dev" as Profile - }) - }, - handler: ({profile}) => { - console.log("building automerge-wasm") - withRegistry( - buildAutomergeWasm(profile), - ) - } + name: "buildwasm", + args: { + profile: option({ + type: ReadProfile, + long: "profile", + short: "p", + defaultValue: () => "dev" as Profile, + }), + }, + handler: ({ profile }) => { + console.log("building automerge-wasm") + withRegistry(buildAutomergeWasm(profile)) + }, }) const buildexamples = command({ - name: "buildexamples", - args: { - examples: multioption({ - long: "example", - short: "e", - type: array(ReadExample), - }), - profile: option({ - type: ReadProfile, - long: "profile", - short: "p", - defaultValue: () => "dev" as Profile - }) - }, - handler: ({examples, profile}) => { - if (examples.length === 0) { - examples = ["webpack", "vite", "create-react-app"] - } - buildExamples(examples, profile) + name: "buildexamples", + args: { + examples: multioption({ + long: "example", + short: "e", + type: array(ReadExample), + }), + profile: option({ + type: ReadProfile, + long: "profile", + short: "p", + defaultValue: () => "dev" as Profile, + }), + }, + handler: ({ examples, profile }) => { + if (examples.length === 0) { + examples = ["webpack", "vite", "create-react-app"] } + buildExamples(examples, profile) + }, }) - const runRegistry = command({ - name: "run-registry", - args: { - profile: option({ - type: ReadProfile, - long: "profile", - short: "p", - defaultValue: () => "dev" as Profile - }) - }, - handler: ({profile}) => { - withPublishedWasm(profile, async (registryUrl: string) => { - await buildAndPublishAutomergeJs(registryUrl) - console.log("\n************************") - console.log(` Verdaccio NPM registry is running at ${registryUrl}`) - console.log(" press CTRL-C to exit ") - console.log("************************") - await once(process, "SIGINT") - }).catch(e => { - console.error(`Failed: ${e}`) - }) - } + name: "run-registry", + args: { + profile: option({ + type: ReadProfile, + long: "profile", + short: "p", + defaultValue: () => "dev" as Profile, + }), + }, + handler: ({ profile }) => { + withPublishedWasm(profile, async (registryUrl: string) => { + await buildAndPublishAutomergeJs(registryUrl) + console.log("\n************************") + console.log(` Verdaccio NPM registry is running at ${registryUrl}`) + console.log(" press CTRL-C to exit ") + console.log("************************") + await once(process, "SIGINT") + }).catch(e => { + console.error(`Failed: ${e}`) + }) + }, }) - const app = subcommands({ - name: "e2e", - cmds: {buildjs, buildexamples, buildwasm: buildWasm, "run-registry": runRegistry} + name: "e2e", + cmds: { + buildjs, + buildexamples, + buildwasm: buildWasm, + "run-registry": runRegistry, + }, }) run(app, process.argv.slice(2)) async function buildExamples(examples: Array, profile: Profile) { - await withPublishedWasm(profile, async (registryUrl) => { - printHeader("building and publishing automerge") - await buildAndPublishAutomergeJs(registryUrl) - for (const example of examples) { - printHeader(`building ${example} example`) - if (example === "webpack") { - const projectPath = path.join(EXAMPLES_DIR, example) - await removeExistingAutomerge(projectPath) - await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) - await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) - await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) - } else if (example === "vite") { - const projectPath = path.join(EXAMPLES_DIR, example) - await removeExistingAutomerge(projectPath) - await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) - await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) - await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) - } else if (example === "create-react-app") { - const projectPath = path.join(EXAMPLES_DIR, example) - await removeExistingAutomerge(projectPath) - await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) - await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) - await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) - } - } - }) + await withPublishedWasm(profile, async registryUrl => { + printHeader("building and publishing automerge") + await buildAndPublishAutomergeJs(registryUrl) + for (const example of examples) { + printHeader(`building ${example} example`) + if (example === "webpack") { + const projectPath = path.join(EXAMPLES_DIR, example) + await removeExistingAutomerge(projectPath) + await fsPromises.rm(path.join(projectPath, "yarn.lock"), { + force: true, + }) + await spawnAndWait( + "yarn", + [ + "--cwd", + projectPath, + "install", + "--registry", + registryUrl, + "--check-files", + ], + { stdio: "inherit" } + ) + await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { + stdio: "inherit", + }) + } else if (example === "vite") { + const projectPath = path.join(EXAMPLES_DIR, example) + await removeExistingAutomerge(projectPath) + await fsPromises.rm(path.join(projectPath, "yarn.lock"), { + force: true, + }) + await spawnAndWait( + "yarn", + [ + "--cwd", + projectPath, + "install", + "--registry", + registryUrl, + "--check-files", + ], + { stdio: "inherit" } + ) + await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { + stdio: "inherit", + }) + } else if (example === "create-react-app") { + const projectPath = path.join(EXAMPLES_DIR, example) + await removeExistingAutomerge(projectPath) + await fsPromises.rm(path.join(projectPath, "yarn.lock"), { + force: true, + }) + await spawnAndWait( + "yarn", + [ + "--cwd", + projectPath, + "install", + "--registry", + registryUrl, + "--check-files", + ], + { stdio: "inherit" } + ) + await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { + stdio: "inherit", + }) + } + } + }) } type WithRegistryAction = (registryUrl: string) => Promise -async function withRegistry(action: WithRegistryAction, ...actions: Array) { - // First, start verdaccio - printHeader("Starting verdaccio NPM server") - const verd = await VerdaccioProcess.start() - actions.unshift(action) +async function withRegistry( + action: WithRegistryAction, + ...actions: Array +) { + // First, start verdaccio + printHeader("Starting verdaccio NPM server") + const verd = await VerdaccioProcess.start() + actions.unshift(action) - for (const action of actions) { - try { - type Step = "verd-died" | "action-completed" - const verdDied: () => Promise = async () => { - await verd.died() - return "verd-died" - } - const actionComplete: () => Promise = async () => { - await action("http://localhost:4873") - return "action-completed" - } - const result = await Promise.race([verdDied(), actionComplete()]) - if (result === "verd-died") { - throw new Error("verdaccio unexpectedly exited") - } - } catch(e) { - await verd.kill() - throw e - } + for (const action of actions) { + try { + type Step = "verd-died" | "action-completed" + const verdDied: () => Promise = async () => { + await verd.died() + return "verd-died" + } + const actionComplete: () => Promise = async () => { + await action("http://localhost:4873") + return "action-completed" + } + const result = await Promise.race([verdDied(), actionComplete()]) + if (result === "verd-died") { + throw new Error("verdaccio unexpectedly exited") + } + } catch (e) { + await verd.kill() + throw e } - await verd.kill() + } + await verd.kill() } async function withPublishedWasm(profile: Profile, action: WithRegistryAction) { - await withRegistry( - buildAutomergeWasm(profile), - publishAutomergeWasm, - action - ) + await withRegistry(buildAutomergeWasm(profile), publishAutomergeWasm, action) } function buildAutomergeWasm(profile: Profile): WithRegistryAction { - return async (registryUrl: string) => { - printHeader("building automerge-wasm") - await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, "--registry", registryUrl, "install"], {stdio: "inherit"}) - const cmd = profile === "release" ? "release" : "debug" - await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, cmd], {stdio: "inherit"}) - } + return async (registryUrl: string) => { + printHeader("building automerge-wasm") + await spawnAndWait( + "yarn", + ["--cwd", AUTOMERGE_WASM_PATH, "--registry", registryUrl, "install"], + { stdio: "inherit" } + ) + const cmd = profile === "release" ? "release" : "debug" + await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, cmd], { + stdio: "inherit", + }) + } } async function publishAutomergeWasm(registryUrl: string) { - printHeader("Publishing automerge-wasm to verdaccio") - await fsPromises.rm(path.join(VERDACCIO_DB_PATH, "@automerge/automerge-wasm"), { recursive: true, force: true} ) - await yarnPublish(registryUrl, AUTOMERGE_WASM_PATH) + printHeader("Publishing automerge-wasm to verdaccio") + await fsPromises.rm( + path.join(VERDACCIO_DB_PATH, "@automerge/automerge-wasm"), + { recursive: true, force: true } + ) + await yarnPublish(registryUrl, AUTOMERGE_WASM_PATH) } async function buildAndPublishAutomergeJs(registryUrl: string) { - // Build the js package - printHeader("Building automerge") - await removeExistingAutomerge(AUTOMERGE_JS_PATH) - await removeFromVerdaccio("@automerge/automerge") - await fsPromises.rm(path.join(AUTOMERGE_JS_PATH, "yarn.lock"), {force: true}) - await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) - await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "build"], {stdio: "inherit"}) - await yarnPublish(registryUrl, AUTOMERGE_JS_PATH) + // Build the js package + printHeader("Building automerge") + await removeExistingAutomerge(AUTOMERGE_JS_PATH) + await removeFromVerdaccio("@automerge/automerge") + await fsPromises.rm(path.join(AUTOMERGE_JS_PATH, "yarn.lock"), { + force: true, + }) + await spawnAndWait( + "yarn", + [ + "--cwd", + AUTOMERGE_JS_PATH, + "install", + "--registry", + registryUrl, + "--check-files", + ], + { stdio: "inherit" } + ) + await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "build"], { + stdio: "inherit", + }) + await yarnPublish(registryUrl, AUTOMERGE_JS_PATH) } /** @@ -236,104 +315,110 @@ async function buildAndPublishAutomergeJs(registryUrl: string) { * */ class VerdaccioProcess { - child: ChildProcess - stdout: Array - stderr: Array + child: ChildProcess + stdout: Array + stderr: Array - constructor(child: ChildProcess) { - this.child = child + constructor(child: ChildProcess) { + this.child = child - // Collect stdout/stderr otherwise the subprocess gets blocked writing - this.stdout = [] - this.stderr = [] - this.child.stdout && this.child.stdout.on("data", (data) => this.stdout.push(data)) - this.child.stderr && this.child.stderr.on("data", (data) => this.stderr.push(data)) + // Collect stdout/stderr otherwise the subprocess gets blocked writing + this.stdout = [] + this.stderr = [] + this.child.stdout && + this.child.stdout.on("data", data => this.stdout.push(data)) + this.child.stderr && + this.child.stderr.on("data", data => this.stderr.push(data)) - const errCallback = (e: any) => { - console.error("!!!!!!!!!ERROR IN VERDACCIO PROCESS!!!!!!!!!") - console.error(" ", e) - if (this.stdout.length > 0) { - console.log("\n**Verdaccio stdout**") - const stdout = Buffer.concat(this.stdout) - process.stdout.write(stdout) - } + const errCallback = (e: any) => { + console.error("!!!!!!!!!ERROR IN VERDACCIO PROCESS!!!!!!!!!") + console.error(" ", e) + if (this.stdout.length > 0) { + console.log("\n**Verdaccio stdout**") + const stdout = Buffer.concat(this.stdout) + process.stdout.write(stdout) + } - if (this.stderr.length > 0) { - console.log("\n**Verdaccio stderr**") - const stdout = Buffer.concat(this.stderr) - process.stdout.write(stdout) - } - process.exit(-1) - } - this.child.on("error", errCallback) + if (this.stderr.length > 0) { + console.log("\n**Verdaccio stderr**") + const stdout = Buffer.concat(this.stderr) + process.stdout.write(stdout) + } + process.exit(-1) } + this.child.on("error", errCallback) + } - /** - * Spawn a verdaccio process and wait for it to respond succesfully to http requests - * - * The returned `VerdaccioProcess` can be used to control the subprocess - */ - static async start() { - const child = spawn("yarn", ["verdaccio", "--config", VERDACCIO_CONFIG_PATH], {env: { ...process.env, FORCE_COLOR: "true"}}) + /** + * Spawn a verdaccio process and wait for it to respond succesfully to http requests + * + * The returned `VerdaccioProcess` can be used to control the subprocess + */ + static async start() { + const child = spawn( + "yarn", + ["verdaccio", "--config", VERDACCIO_CONFIG_PATH], + { env: { ...process.env, FORCE_COLOR: "true" } } + ) - // Forward stdout and stderr whilst waiting for startup to complete - const stdoutCallback = (data: Buffer) => process.stdout.write(data) - const stderrCallback = (data: Buffer) => process.stderr.write(data) - child.stdout && child.stdout.on("data", stdoutCallback) - child.stderr && child.stderr.on("data", stderrCallback) + // Forward stdout and stderr whilst waiting for startup to complete + const stdoutCallback = (data: Buffer) => process.stdout.write(data) + const stderrCallback = (data: Buffer) => process.stderr.write(data) + child.stdout && child.stdout.on("data", stdoutCallback) + child.stderr && child.stderr.on("data", stderrCallback) - const healthCheck = async () => { - while (true) { - try { - const resp = await fetch("http://localhost:4873") - if (resp.status === 200) { - return - } else { - console.log(`Healthcheck failed: bad status ${resp.status}`) - } - } catch (e) { - console.error(`Healthcheck failed: ${e}`) - } - await setTimeout(500) - } - } - await withTimeout(healthCheck(), 10000) - - // Stop forwarding stdout/stderr - child.stdout && child.stdout.off("data", stdoutCallback) - child.stderr && child.stderr.off("data", stderrCallback) - return new VerdaccioProcess(child) - } - - /** - * Send a SIGKILL to the process and wait for it to stop - */ - async kill() { - this.child.stdout && this.child.stdout.destroy() - this.child.stderr && this.child.stderr.destroy() - this.child.kill(); + const healthCheck = async () => { + while (true) { try { - await withTimeout(once(this.child, "close"), 500) + const resp = await fetch("http://localhost:4873") + if (resp.status === 200) { + return + } else { + console.log(`Healthcheck failed: bad status ${resp.status}`) + } } catch (e) { - console.error("unable to kill verdaccio subprocess, trying -9") - this.child.kill(9) - await withTimeout(once(this.child, "close"), 500) + console.error(`Healthcheck failed: ${e}`) } + await setTimeout(500) + } } + await withTimeout(healthCheck(), 10000) - /** - * A promise which resolves if the subprocess exits for some reason - */ - async died(): Promise { - const [exit, _signal] = await once(this.child, "exit") - return exit + // Stop forwarding stdout/stderr + child.stdout && child.stdout.off("data", stdoutCallback) + child.stderr && child.stderr.off("data", stderrCallback) + return new VerdaccioProcess(child) + } + + /** + * Send a SIGKILL to the process and wait for it to stop + */ + async kill() { + this.child.stdout && this.child.stdout.destroy() + this.child.stderr && this.child.stderr.destroy() + this.child.kill() + try { + await withTimeout(once(this.child, "close"), 500) + } catch (e) { + console.error("unable to kill verdaccio subprocess, trying -9") + this.child.kill(9) + await withTimeout(once(this.child, "close"), 500) } + } + + /** + * A promise which resolves if the subprocess exits for some reason + */ + async died(): Promise { + const [exit, _signal] = await once(this.child, "exit") + return exit + } } function printHeader(header: string) { - console.log("\n===============================") - console.log(` ${header}`) - console.log("===============================") + console.log("\n===============================") + console.log(` ${header}`) + console.log("===============================") } /** @@ -347,36 +432,46 @@ function printHeader(header: string) { * @param packageDir - The directory containing the package.json of the target project */ async function removeExistingAutomerge(packageDir: string) { - await fsPromises.rm(path.join(packageDir, "node_modules", "@automerge"), {recursive: true, force: true}) - await fsPromises.rm(path.join(packageDir, "node_modules", "automerge"), {recursive: true, force: true}) + await fsPromises.rm(path.join(packageDir, "node_modules", "@automerge"), { + recursive: true, + force: true, + }) + await fsPromises.rm(path.join(packageDir, "node_modules", "automerge"), { + recursive: true, + force: true, + }) } type SpawnResult = { - stdout?: Buffer, - stderr?: Buffer, + stdout?: Buffer + stderr?: Buffer } -async function spawnAndWait(cmd: string, args: Array, options: child_process.SpawnOptions): Promise { - const child = spawn(cmd, args, options) - let stdout = null - let stderr = null - if (child.stdout) { - stdout = [] - child.stdout.on("data", data => stdout.push(data)) - } - if (child.stderr) { - stderr = [] - child.stderr.on("data", data => stderr.push(data)) - } +async function spawnAndWait( + cmd: string, + args: Array, + options: child_process.SpawnOptions +): Promise { + const child = spawn(cmd, args, options) + let stdout = null + let stderr = null + if (child.stdout) { + stdout = [] + child.stdout.on("data", data => stdout.push(data)) + } + if (child.stderr) { + stderr = [] + child.stderr.on("data", data => stderr.push(data)) + } - const [exit, _signal] = await once(child, "exit") - if (exit && exit !== 0) { - throw new Error("nonzero exit code") - } - return { - stderr: stderr? Buffer.concat(stderr) : null, - stdout: stdout ? Buffer.concat(stdout) : null - } + const [exit, _signal] = await once(child, "exit") + if (exit && exit !== 0) { + throw new Error("nonzero exit code") + } + return { + stderr: stderr ? Buffer.concat(stderr) : null, + stdout: stdout ? Buffer.concat(stdout) : null, + } } /** @@ -387,29 +482,27 @@ async function spawnAndWait(cmd: string, args: Array, options: child_pro * okay I Promise. */ async function removeFromVerdaccio(packageName: string) { - await fsPromises.rm(path.join(VERDACCIO_DB_PATH, packageName), {force: true, recursive: true}) + await fsPromises.rm(path.join(VERDACCIO_DB_PATH, packageName), { + force: true, + recursive: true, + }) } async function yarnPublish(registryUrl: string, cwd: string) { - await spawnAndWait( - "yarn", - [ - "--registry", - registryUrl, - "--cwd", - cwd, - "publish", - "--non-interactive", - ], - { - stdio: "inherit", - env: { - ...process.env, - FORCE_COLOR: "true", - // This is a fake token, it just has to be the right format - npm_config__auth: "//localhost:4873/:_authToken=Gp2Mgxm4faa/7wp0dMSuRA==" - } - }) + await spawnAndWait( + "yarn", + ["--registry", registryUrl, "--cwd", cwd, "publish", "--non-interactive"], + { + stdio: "inherit", + env: { + ...process.env, + FORCE_COLOR: "true", + // This is a fake token, it just has to be the right format + npm_config__auth: + "//localhost:4873/:_authToken=Gp2Mgxm4faa/7wp0dMSuRA==", + }, + } + ) } /** @@ -419,20 +512,23 @@ async function yarnPublish(registryUrl: string, cwd: string) { * @param promise - the promise to wait for @param timeout - the delay in * milliseconds to wait before throwing */ -async function withTimeout(promise: Promise, timeout: number): Promise { - type Step = "timed-out" | {result: T} - const timedOut: () => Promise = async () => { - await setTimeout(timeout) - return "timed-out" - } - const succeeded: () => Promise = async () => { - const result = await promise - return {result} - } - const result = await Promise.race([timedOut(), succeeded()]) - if (result === "timed-out") { - throw new Error("timed out") - } else { - return result.result - } +async function withTimeout( + promise: Promise, + timeout: number +): Promise { + type Step = "timed-out" | { result: T } + const timedOut: () => Promise = async () => { + await setTimeout(timeout) + return "timed-out" + } + const succeeded: () => Promise = async () => { + const result = await promise + return { result } + } + const result = await Promise.race([timedOut(), succeeded()]) + if (result === "timed-out") { + throw new Error("timed out") + } else { + return result.result + } } diff --git a/javascript/e2e/tsconfig.json b/javascript/e2e/tsconfig.json index 9f0e2e76..a2109873 100644 --- a/javascript/e2e/tsconfig.json +++ b/javascript/e2e/tsconfig.json @@ -1,6 +1,6 @@ { - "compilerOptions": { - "types": ["node"] - }, - "module": "nodenext" + "compilerOptions": { + "types": ["node"] + }, + "module": "nodenext" } diff --git a/javascript/e2e/verdaccio.yaml b/javascript/e2e/verdaccio.yaml index 45920a16..865f5f05 100644 --- a/javascript/e2e/verdaccio.yaml +++ b/javascript/e2e/verdaccio.yaml @@ -4,22 +4,22 @@ auth: file: ./htpasswd publish: allow_offline: true -logs: {type: stdout, format: pretty, level: info} -packages: +logs: { type: stdout, format: pretty, level: info } +packages: "@automerge/automerge-wasm": - access: "$all" - publish: "$all" + access: "$all" + publish: "$all" "@automerge/automerge": - access: "$all" - publish: "$all" + access: "$all" + publish: "$all" "*": - access: "$all" - publish: "$all" - proxy: npmjs + access: "$all" + publish: "$all" + proxy: npmjs "@*/*": - access: "$all" - publish: "$all" - proxy: npmjs + access: "$all" + publish: "$all" + proxy: npmjs uplinks: npmjs: url: https://registry.npmjs.org/ diff --git a/javascript/examples/create-react-app/README.md b/javascript/examples/create-react-app/README.md index dc894080..baa135ac 100644 --- a/javascript/examples/create-react-app/README.md +++ b/javascript/examples/create-react-app/README.md @@ -54,6 +54,6 @@ In the root of the project add the following contents to `craco.config.js` const cracoWasm = require("craco-wasm") module.exports = { - plugins: [cracoWasm()] + plugins: [cracoWasm()], } ``` diff --git a/javascript/examples/create-react-app/craco.config.js b/javascript/examples/create-react-app/craco.config.js index ad806e67..489dad8f 100644 --- a/javascript/examples/create-react-app/craco.config.js +++ b/javascript/examples/create-react-app/craco.config.js @@ -1,5 +1,5 @@ const cracoWasm = require("craco-wasm") module.exports = { - plugins: [cracoWasm()] + plugins: [cracoWasm()], } diff --git a/javascript/examples/create-react-app/src/App.js b/javascript/examples/create-react-app/src/App.js index fc4805b4..7cfe997b 100644 --- a/javascript/examples/create-react-app/src/App.js +++ b/javascript/examples/create-react-app/src/App.js @@ -1,12 +1,11 @@ import * as Automerge from "@automerge/automerge" -import logo from './logo.svg'; -import './App.css'; +import logo from "./logo.svg" +import "./App.css" let doc = Automerge.init() -doc = Automerge.change(doc, (d) => d.hello = "from automerge") +doc = Automerge.change(doc, d => (d.hello = "from automerge")) const result = JSON.stringify(doc) - function App() { return (

@@ -15,7 +14,7 @@ function App() {

{result}

- ); + ) } -export default App; +export default App diff --git a/javascript/examples/create-react-app/src/App.test.js b/javascript/examples/create-react-app/src/App.test.js index 1f03afee..ea796120 100644 --- a/javascript/examples/create-react-app/src/App.test.js +++ b/javascript/examples/create-react-app/src/App.test.js @@ -1,8 +1,8 @@ -import { render, screen } from '@testing-library/react'; -import App from './App'; +import { render, screen } from "@testing-library/react" +import App from "./App" -test('renders learn react link', () => { - render(); - const linkElement = screen.getByText(/learn react/i); - expect(linkElement).toBeInTheDocument(); -}); +test("renders learn react link", () => { + render() + const linkElement = screen.getByText(/learn react/i) + expect(linkElement).toBeInTheDocument() +}) diff --git a/javascript/examples/create-react-app/src/index.css b/javascript/examples/create-react-app/src/index.css index ec2585e8..4a1df4db 100644 --- a/javascript/examples/create-react-app/src/index.css +++ b/javascript/examples/create-react-app/src/index.css @@ -1,13 +1,13 @@ body { margin: 0; - font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', - 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", + "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } code { - font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', + font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace; } diff --git a/javascript/examples/create-react-app/src/index.js b/javascript/examples/create-react-app/src/index.js index d563c0fb..58c21edc 100644 --- a/javascript/examples/create-react-app/src/index.js +++ b/javascript/examples/create-react-app/src/index.js @@ -1,17 +1,17 @@ -import React from 'react'; -import ReactDOM from 'react-dom/client'; -import './index.css'; -import App from './App'; -import reportWebVitals from './reportWebVitals'; +import React from "react" +import ReactDOM from "react-dom/client" +import "./index.css" +import App from "./App" +import reportWebVitals from "./reportWebVitals" -const root = ReactDOM.createRoot(document.getElementById('root')); +const root = ReactDOM.createRoot(document.getElementById("root")) root.render( -); +) // If you want to start measuring performance in your app, pass a function // to log results (for example: reportWebVitals(console.log)) // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals -reportWebVitals(); +reportWebVitals() diff --git a/javascript/examples/create-react-app/src/reportWebVitals.js b/javascript/examples/create-react-app/src/reportWebVitals.js index 5253d3ad..eee308db 100644 --- a/javascript/examples/create-react-app/src/reportWebVitals.js +++ b/javascript/examples/create-react-app/src/reportWebVitals.js @@ -1,13 +1,13 @@ const reportWebVitals = onPerfEntry => { if (onPerfEntry && onPerfEntry instanceof Function) { - import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { - getCLS(onPerfEntry); - getFID(onPerfEntry); - getFCP(onPerfEntry); - getLCP(onPerfEntry); - getTTFB(onPerfEntry); - }); + import("web-vitals").then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { + getCLS(onPerfEntry) + getFID(onPerfEntry) + getFCP(onPerfEntry) + getLCP(onPerfEntry) + getTTFB(onPerfEntry) + }) } -}; +} -export default reportWebVitals; +export default reportWebVitals diff --git a/javascript/examples/create-react-app/src/setupTests.js b/javascript/examples/create-react-app/src/setupTests.js index 8f2609b7..6a0fd123 100644 --- a/javascript/examples/create-react-app/src/setupTests.js +++ b/javascript/examples/create-react-app/src/setupTests.js @@ -2,4 +2,4 @@ // allows you to do things like: // expect(element).toHaveTextContent(/react/i) // learn more: https://github.com/testing-library/jest-dom -import '@testing-library/jest-dom'; +import "@testing-library/jest-dom" diff --git a/javascript/examples/vite/README.md b/javascript/examples/vite/README.md index efe44479..c84594f5 100644 --- a/javascript/examples/vite/README.md +++ b/javascript/examples/vite/README.md @@ -7,6 +7,7 @@ There are three things you need to do to get WASM packaging working with vite: 3. Exclude `automerge-wasm` from the optimizer First, install the packages we need: + ```bash yarn add vite-plugin-top-level-await yarn add vite-plugin-wasm @@ -20,22 +21,22 @@ import wasm from "vite-plugin-wasm" import topLevelAwait from "vite-plugin-top-level-await" export default defineConfig({ - plugins: [topLevelAwait(), wasm()], - - // This is only necessary if you are using `SharedWorker` or `WebWorker`, as - // documented in https://vitejs.dev/guide/features.html#import-with-constructors - worker: { - format: "es", - plugins: [topLevelAwait(), wasm()] - }, + plugins: [topLevelAwait(), wasm()], - optimizeDeps: { - // This is necessary because otherwise `vite dev` includes two separate - // versions of the JS wrapper. This causes problems because the JS - // wrapper has a module level variable to track JS side heap - // allocations, initializing this twice causes horrible breakage - exclude: ["@automerge/automerge-wasm"] - } + // This is only necessary if you are using `SharedWorker` or `WebWorker`, as + // documented in https://vitejs.dev/guide/features.html#import-with-constructors + worker: { + format: "es", + plugins: [topLevelAwait(), wasm()], + }, + + optimizeDeps: { + // This is necessary because otherwise `vite dev` includes two separate + // versions of the JS wrapper. This causes problems because the JS + // wrapper has a module level variable to track JS side heap + // allocations, initializing this twice causes horrible breakage + exclude: ["@automerge/automerge-wasm"], + }, }) ``` @@ -51,4 +52,3 @@ yarn vite yarn install yarn dev ``` - diff --git a/javascript/examples/vite/main.ts b/javascript/examples/vite/main.ts index 157c8e48..0ba18f48 100644 --- a/javascript/examples/vite/main.ts +++ b/javascript/examples/vite/main.ts @@ -1,15 +1,15 @@ -import * as Automerge from "/node_modules/.vite/deps/automerge-js.js?v=6e973f28"; -console.log(Automerge); -let doc = Automerge.init(); -doc = Automerge.change(doc, (d) => d.hello = "from automerge-js"); -console.log(doc); -const result = JSON.stringify(doc); +import * as Automerge from "/node_modules/.vite/deps/automerge-js.js?v=6e973f28" +console.log(Automerge) +let doc = Automerge.init() +doc = Automerge.change(doc, d => (d.hello = "from automerge-js")) +console.log(doc) +const result = JSON.stringify(doc) if (typeof document !== "undefined") { - const element = document.createElement("div"); - element.innerHTML = JSON.stringify(result); - document.body.appendChild(element); + const element = document.createElement("div") + element.innerHTML = JSON.stringify(result) + document.body.appendChild(element) } else { - console.log("node:", result); + console.log("node:", result) } -//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi9ob21lL2FsZXgvUHJvamVjdHMvYXV0b21lcmdlL2F1dG9tZXJnZS1ycy9hdXRvbWVyZ2UtanMvZXhhbXBsZXMvdml0ZS9zcmMvbWFpbi50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgKiBhcyBBdXRvbWVyZ2UgZnJvbSBcImF1dG9tZXJnZS1qc1wiXG5cbi8vIGhlbGxvIHdvcmxkIGNvZGUgdGhhdCB3aWxsIHJ1biBjb3JyZWN0bHkgb24gd2ViIG9yIG5vZGVcblxuY29uc29sZS5sb2coQXV0b21lcmdlKVxubGV0IGRvYyA9IEF1dG9tZXJnZS5pbml0KClcbmRvYyA9IEF1dG9tZXJnZS5jaGFuZ2UoZG9jLCAoZDogYW55KSA9PiBkLmhlbGxvID0gXCJmcm9tIGF1dG9tZXJnZS1qc1wiKVxuY29uc29sZS5sb2coZG9jKVxuY29uc3QgcmVzdWx0ID0gSlNPTi5zdHJpbmdpZnkoZG9jKVxuXG5pZiAodHlwZW9mIGRvY3VtZW50ICE9PSAndW5kZWZpbmVkJykge1xuICAgIC8vIGJyb3dzZXJcbiAgICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnZGl2Jyk7XG4gICAgZWxlbWVudC5pbm5lckhUTUwgPSBKU09OLnN0cmluZ2lmeShyZXN1bHQpXG4gICAgZG9jdW1lbnQuYm9keS5hcHBlbmRDaGlsZChlbGVtZW50KTtcbn0gZWxzZSB7XG4gICAgLy8gc2VydmVyXG4gICAgY29uc29sZS5sb2coXCJub2RlOlwiLCByZXN1bHQpXG59XG5cbiJdLCJtYXBwaW5ncyI6IkFBQUEsWUFBWSxlQUFlO0FBSTNCLFFBQVEsSUFBSSxTQUFTO0FBQ3JCLElBQUksTUFBTSxVQUFVLEtBQUs7QUFDekIsTUFBTSxVQUFVLE9BQU8sS0FBSyxDQUFDLE1BQVcsRUFBRSxRQUFRLG1CQUFtQjtBQUNyRSxRQUFRLElBQUksR0FBRztBQUNmLE1BQU0sU0FBUyxLQUFLLFVBQVUsR0FBRztBQUVqQyxJQUFJLE9BQU8sYUFBYSxhQUFhO0FBRWpDLFFBQU0sVUFBVSxTQUFTLGNBQWMsS0FBSztBQUM1QyxVQUFRLFlBQVksS0FBSyxVQUFVLE1BQU07QUFDekMsV0FBUyxLQUFLLFlBQVksT0FBTztBQUNyQyxPQUFPO0FBRUgsVUFBUSxJQUFJLFNBQVMsTUFBTTtBQUMvQjsiLCJuYW1lcyI6W119 \ No newline at end of file +//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi9ob21lL2FsZXgvUHJvamVjdHMvYXV0b21lcmdlL2F1dG9tZXJnZS1ycy9hdXRvbWVyZ2UtanMvZXhhbXBsZXMvdml0ZS9zcmMvbWFpbi50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgKiBhcyBBdXRvbWVyZ2UgZnJvbSBcImF1dG9tZXJnZS1qc1wiXG5cbi8vIGhlbGxvIHdvcmxkIGNvZGUgdGhhdCB3aWxsIHJ1biBjb3JyZWN0bHkgb24gd2ViIG9yIG5vZGVcblxuY29uc29sZS5sb2coQXV0b21lcmdlKVxubGV0IGRvYyA9IEF1dG9tZXJnZS5pbml0KClcbmRvYyA9IEF1dG9tZXJnZS5jaGFuZ2UoZG9jLCAoZDogYW55KSA9PiBkLmhlbGxvID0gXCJmcm9tIGF1dG9tZXJnZS1qc1wiKVxuY29uc29sZS5sb2coZG9jKVxuY29uc3QgcmVzdWx0ID0gSlNPTi5zdHJpbmdpZnkoZG9jKVxuXG5pZiAodHlwZW9mIGRvY3VtZW50ICE9PSAndW5kZWZpbmVkJykge1xuICAgIC8vIGJyb3dzZXJcbiAgICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnZGl2Jyk7XG4gICAgZWxlbWVudC5pbm5lckhUTUwgPSBKU09OLnN0cmluZ2lmeShyZXN1bHQpXG4gICAgZG9jdW1lbnQuYm9keS5hcHBlbmRDaGlsZChlbGVtZW50KTtcbn0gZWxzZSB7XG4gICAgLy8gc2VydmVyXG4gICAgY29uc29sZS5sb2coXCJub2RlOlwiLCByZXN1bHQpXG59XG5cbiJdLCJtYXBwaW5ncyI6IkFBQUEsWUFBWSxlQUFlO0FBSTNCLFFBQVEsSUFBSSxTQUFTO0FBQ3JCLElBQUksTUFBTSxVQUFVLEtBQUs7QUFDekIsTUFBTSxVQUFVLE9BQU8sS0FBSyxDQUFDLE1BQVcsRUFBRSxRQUFRLG1CQUFtQjtBQUNyRSxRQUFRLElBQUksR0FBRztBQUNmLE1BQU0sU0FBUyxLQUFLLFVBQVUsR0FBRztBQUVqQyxJQUFJLE9BQU8sYUFBYSxhQUFhO0FBRWpDLFFBQU0sVUFBVSxTQUFTLGNBQWMsS0FBSztBQUM1QyxVQUFRLFlBQVksS0FBSyxVQUFVLE1BQU07QUFDekMsV0FBUyxLQUFLLFlBQVksT0FBTztBQUNyQyxPQUFPO0FBRUgsVUFBUSxJQUFJLFNBQVMsTUFBTTtBQUMvQjsiLCJuYW1lcyI6W119 diff --git a/javascript/examples/vite/src/counter.ts b/javascript/examples/vite/src/counter.ts index a3529e1f..3e516b6d 100644 --- a/javascript/examples/vite/src/counter.ts +++ b/javascript/examples/vite/src/counter.ts @@ -4,6 +4,6 @@ export function setupCounter(element: HTMLButtonElement) { counter = count element.innerHTML = `count is ${counter}` } - element.addEventListener('click', () => setCounter(++counter)) + element.addEventListener("click", () => setCounter(++counter)) setCounter(0) } diff --git a/javascript/examples/vite/src/main.ts b/javascript/examples/vite/src/main.ts index 8f7551d5..8dc8f92c 100644 --- a/javascript/examples/vite/src/main.ts +++ b/javascript/examples/vite/src/main.ts @@ -3,16 +3,15 @@ import * as Automerge from "@automerge/automerge" // hello world code that will run correctly on web or node let doc = Automerge.init() -doc = Automerge.change(doc, (d: any) => d.hello = "from automerge") +doc = Automerge.change(doc, (d: any) => (d.hello = "from automerge")) const result = JSON.stringify(doc) -if (typeof document !== 'undefined') { - // browser - const element = document.createElement('div'); - element.innerHTML = JSON.stringify(result) - document.body.appendChild(element); +if (typeof document !== "undefined") { + // browser + const element = document.createElement("div") + element.innerHTML = JSON.stringify(result) + document.body.appendChild(element) } else { - // server - console.log("node:", result) + // server + console.log("node:", result) } - diff --git a/javascript/examples/vite/vite.config.js b/javascript/examples/vite/vite.config.js index 9716d674..d80981bf 100644 --- a/javascript/examples/vite/vite.config.js +++ b/javascript/examples/vite/vite.config.js @@ -3,20 +3,20 @@ import wasm from "vite-plugin-wasm" import topLevelAwait from "vite-plugin-top-level-await" export default defineConfig({ + plugins: [topLevelAwait(), wasm()], + + // This is only necessary if you are using `SharedWorker` or `WebWorker`, as + // documented in https://vitejs.dev/guide/features.html#import-with-constructors + worker: { + format: "es", plugins: [topLevelAwait(), wasm()], + }, - // This is only necessary if you are using `SharedWorker` or `WebWorker`, as - // documented in https://vitejs.dev/guide/features.html#import-with-constructors - worker: { - format: "es", - plugins: [topLevelAwait(), wasm()] - }, - - optimizeDeps: { - // This is necessary because otherwise `vite dev` includes two separate - // versions of the JS wrapper. This causes problems because the JS - // wrapper has a module level variable to track JS side heap - // allocations, initializing this twice causes horrible breakage - exclude: ["@automerge/automerge-wasm"] - } + optimizeDeps: { + // This is necessary because otherwise `vite dev` includes two separate + // versions of the JS wrapper. This causes problems because the JS + // wrapper has a module level variable to track JS side heap + // allocations, initializing this twice causes horrible breakage + exclude: ["@automerge/automerge-wasm"], + }, }) diff --git a/javascript/examples/webpack/README.md b/javascript/examples/webpack/README.md index 917f9c8a..7563f27d 100644 --- a/javascript/examples/webpack/README.md +++ b/javascript/examples/webpack/README.md @@ -1,36 +1,34 @@ # Webpack + Automerge - Getting WASM working in webpack 5 is very easy. You just need to enable the `asyncWebAssembly` [experiment](https://webpack.js.org/configuration/experiments/). For example: - ```javascript -const path = require('path'); +const path = require("path") const clientConfig = { experiments: { asyncWebAssembly: true }, - target: 'web', - entry: './src/index.js', + target: "web", + entry: "./src/index.js", output: { - filename: 'main.js', - path: path.resolve(__dirname, 'public'), + filename: "main.js", + path: path.resolve(__dirname, "public"), }, mode: "development", // or production - performance: { // we dont want the wasm blob to generate warnings - hints: false, - maxEntrypointSize: 512000, - maxAssetSize: 512000 - } -}; + performance: { + // we dont want the wasm blob to generate warnings + hints: false, + maxEntrypointSize: 512000, + maxAssetSize: 512000, + }, +} module.exports = clientConfig ``` ## Running the example - ```bash yarn install yarn start diff --git a/javascript/examples/webpack/src/index.js b/javascript/examples/webpack/src/index.js index e3307083..3a9086e4 100644 --- a/javascript/examples/webpack/src/index.js +++ b/javascript/examples/webpack/src/index.js @@ -3,16 +3,15 @@ import * as Automerge from "@automerge/automerge" // hello world code that will run correctly on web or node let doc = Automerge.init() -doc = Automerge.change(doc, (d) => d.hello = "from automerge") +doc = Automerge.change(doc, d => (d.hello = "from automerge")) const result = JSON.stringify(doc) -if (typeof document !== 'undefined') { +if (typeof document !== "undefined") { // browser - const element = document.createElement('div'); + const element = document.createElement("div") element.innerHTML = JSON.stringify(result) - document.body.appendChild(element); + document.body.appendChild(element) } else { // server console.log("node:", result) } - diff --git a/javascript/examples/webpack/webpack.config.js b/javascript/examples/webpack/webpack.config.js index 3a6d83ff..51fd5127 100644 --- a/javascript/examples/webpack/webpack.config.js +++ b/javascript/examples/webpack/webpack.config.js @@ -1,36 +1,37 @@ -const path = require('path'); -const nodeExternals = require('webpack-node-externals'); +const path = require("path") +const nodeExternals = require("webpack-node-externals") // the most basic webpack config for node or web targets for automerge-wasm const serverConfig = { // basic setup for bundling a node package - target: 'node', + target: "node", externals: [nodeExternals()], externalsPresets: { node: true }, - entry: './src/index.js', + entry: "./src/index.js", output: { - filename: 'node.js', - path: path.resolve(__dirname, 'dist'), + filename: "node.js", + path: path.resolve(__dirname, "dist"), }, mode: "development", // or production -}; +} const clientConfig = { experiments: { asyncWebAssembly: true }, - target: 'web', - entry: './src/index.js', + target: "web", + entry: "./src/index.js", output: { - filename: 'main.js', - path: path.resolve(__dirname, 'public'), + filename: "main.js", + path: path.resolve(__dirname, "public"), }, mode: "development", // or production - performance: { // we dont want the wasm blob to generate warnings - hints: false, - maxEntrypointSize: 512000, - maxAssetSize: 512000 - } -}; + performance: { + // we dont want the wasm blob to generate warnings + hints: false, + maxEntrypointSize: 512000, + maxAssetSize: 512000, + }, +} -module.exports = [serverConfig, clientConfig]; +module.exports = [serverConfig, clientConfig] diff --git a/javascript/package.json b/javascript/package.json index 5fd2213e..b7afb5b7 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -53,6 +53,7 @@ "fast-sha256": "^1.3.0", "mocha": "^10.2.0", "pako": "^2.1.0", + "prettier": "^2.8.1", "ts-mocha": "^10.0.0", "ts-node": "^10.9.1", "typedoc": "^0.23.22", diff --git a/javascript/src/constants.ts b/javascript/src/constants.ts index e9517a60..d3bd8138 100644 --- a/javascript/src/constants.ts +++ b/javascript/src/constants.ts @@ -1,13 +1,12 @@ // Properties of the document root object -export const STATE = Symbol.for('_am_meta') // symbol used to hide application metadata on automerge objects -export const TRACE = Symbol.for('_am_trace') // used for debugging -export const OBJECT_ID = Symbol.for('_am_objectId') // synbol used to hide the object id on automerge objects -export const IS_PROXY = Symbol.for('_am_isProxy') // symbol used to test if the document is a proxy object - -export const UINT = Symbol.for('_am_uint') -export const INT = Symbol.for('_am_int') -export const F64 = Symbol.for('_am_f64') -export const COUNTER = Symbol.for('_am_counter') -export const TEXT = Symbol.for('_am_text') +export const STATE = Symbol.for("_am_meta") // symbol used to hide application metadata on automerge objects +export const TRACE = Symbol.for("_am_trace") // used for debugging +export const OBJECT_ID = Symbol.for("_am_objectId") // synbol used to hide the object id on automerge objects +export const IS_PROXY = Symbol.for("_am_isProxy") // symbol used to test if the document is a proxy object +export const UINT = Symbol.for("_am_uint") +export const INT = Symbol.for("_am_int") +export const F64 = Symbol.for("_am_f64") +export const COUNTER = Symbol.for("_am_counter") +export const TEXT = Symbol.for("_am_text") diff --git a/javascript/src/counter.ts b/javascript/src/counter.ts index c20d7fcf..d94a3034 100644 --- a/javascript/src/counter.ts +++ b/javascript/src/counter.ts @@ -6,7 +6,7 @@ import { COUNTER } from "./constants" * the value trivially converges. */ export class Counter { - value : number; + value: number constructor(value?: number) { this.value = value || 0 @@ -21,7 +21,7 @@ export class Counter { * concatenating it with another string, as in `x + ''`. * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/valueOf */ - valueOf() : number { + valueOf(): number { return this.value } @@ -30,7 +30,7 @@ export class Counter { * this method is called e.g. when you do `['value: ', x].join('')` or when * you use string interpolation: `value: ${x}`. */ - toString() : string { + toString(): string { return this.valueOf().toString() } @@ -38,7 +38,7 @@ export class Counter { * Returns the counter value, so that a JSON serialization of an Automerge * document represents the counter simply as an integer. */ - toJSON() : number { + toJSON(): number { return this.value } } @@ -53,20 +53,26 @@ class WriteableCounter extends Counter { objectId: ObjID key: Prop - constructor(value: number, context: Automerge, path: string[], objectId: ObjID, key: Prop) { + constructor( + value: number, + context: Automerge, + path: string[], + objectId: ObjID, + key: Prop + ) { super(value) this.context = context this.path = path this.objectId = objectId this.key = key } - + /** * Increases the value of the counter by `delta`. If `delta` is not given, * increases the value of the counter by 1. */ - increment(delta: number) : number { - delta = typeof delta === 'number' ? delta : 1 + increment(delta: number): number { + delta = typeof delta === "number" ? delta : 1 this.context.increment(this.objectId, this.key, delta) this.value += delta return this.value @@ -76,8 +82,8 @@ class WriteableCounter extends Counter { * Decreases the value of the counter by `delta`. If `delta` is not given, * decreases the value of the counter by 1. */ - decrement(delta: number) : number { - return this.increment(typeof delta === 'number' ? -delta : -1) + decrement(delta: number): number { + return this.increment(typeof delta === "number" ? -delta : -1) } } @@ -87,8 +93,14 @@ class WriteableCounter extends Counter { * `objectId` is the ID of the object containing the counter, and `key` is * the property name (key in map, or index in list) where the counter is * located. -*/ -export function getWriteableCounter(value: number, context: Automerge, path: string[], objectId: ObjID, key: Prop) { + */ +export function getWriteableCounter( + value: number, + context: Automerge, + path: string[], + objectId: ObjID, + key: Prop +) { return new WriteableCounter(value, context, path, objectId, key) } diff --git a/javascript/src/index.ts b/javascript/src/index.ts index df71c648..23df47ce 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -1,45 +1,71 @@ - /** @hidden **/ -export {/** @hidden */ uuid} from './uuid' +export { /** @hidden */ uuid } from "./uuid" -import {rootProxy, listProxy, mapProxy} from "./proxies" -import {STATE, TRACE, IS_PROXY, OBJECT_ID } from "./constants" +import { rootProxy, listProxy, mapProxy } from "./proxies" +import { STATE, TRACE, IS_PROXY, OBJECT_ID } from "./constants" -import {AutomergeValue, Counter} from "./types" -export {AutomergeValue, Counter, Int, Uint, Float64, ScalarValue} from "./types" +import { AutomergeValue, Counter } from "./types" +export { + AutomergeValue, + Counter, + Int, + Uint, + Float64, + ScalarValue, +} from "./types" -import {type API, type Patch} from "@automerge/automerge-wasm"; -export { type Patch, PutPatch, DelPatch, SplicePatch, IncPatch, SyncMessage, } from "@automerge/automerge-wasm" -import {ApiHandler, ChangeToEncode, UseApi} from "./low_level" +import { type API, type Patch } from "@automerge/automerge-wasm" +export { + type Patch, + PutPatch, + DelPatch, + SplicePatch, + IncPatch, + SyncMessage, +} from "@automerge/automerge-wasm" +import { ApiHandler, ChangeToEncode, UseApi } from "./low_level" -import {Actor as ActorId, Prop, ObjID, Change, DecodedChange, Heads, Automerge, MaterializeValue} from "@automerge/automerge-wasm" -import {JsSyncState as SyncState, SyncMessage, DecodedSyncMessage} from "@automerge/automerge-wasm" +import { + Actor as ActorId, + Prop, + ObjID, + Change, + DecodedChange, + Heads, + Automerge, + MaterializeValue, +} from "@automerge/automerge-wasm" +import { + JsSyncState as SyncState, + SyncMessage, + DecodedSyncMessage, +} from "@automerge/automerge-wasm" /** Options passed to {@link change}, and {@link emptyChange} * @typeParam T - The type of value contained in the document */ export type ChangeOptions = { - /** A message which describes the changes */ - message?: string, - /** The unix timestamp of the change (purely advisory, not used in conflict resolution) */ - time?: number, - /** A callback which will be called to notify the caller of any changes to the document */ - patchCallback?: PatchCallback + /** A message which describes the changes */ + message?: string + /** The unix timestamp of the change (purely advisory, not used in conflict resolution) */ + time?: number + /** A callback which will be called to notify the caller of any changes to the document */ + patchCallback?: PatchCallback } /** Options passed to {@link loadIncremental}, {@link applyChanges}, and {@link receiveSyncMessage} * @typeParam T - The type of value contained in the document */ -export type ApplyOptions = {patchCallback?: PatchCallback} +export type ApplyOptions = { patchCallback?: PatchCallback } -/** +/** * An automerge document. * @typeParam T - The type of the value contained in this document * * Note that this provides read only access to the fields of the value. To * modify the value use {@link change} */ -export type Doc = {readonly [P in keyof T]: T[P]} +export type Doc = { readonly [P in keyof T]: T[P] } /** * Function which is called by {@link change} when making changes to a `Doc` @@ -56,79 +82,86 @@ export type ChangeFn = (doc: T) => void * @param before - The document before the change was made * @param after - The document after the change was made */ -export type PatchCallback = (patches: Array, before: Doc, after: Doc) => void +export type PatchCallback = ( + patches: Array, + before: Doc, + after: Doc +) => void /** @hidden **/ export interface State { - change: DecodedChange - snapshot: T + change: DecodedChange + snapshot: T } /** @hidden **/ export function use(api: API) { - UseApi(api) + UseApi(api) } import * as wasm from "@automerge/automerge-wasm" use(wasm) -/** +/** * Options to be passed to {@link init} or {@link load} * @typeParam T - The type of the value the document contains */ export type InitOptions = { - /** The actor ID to use for this document, a random one will be generated if `null` is passed */ - actor?: ActorId, - freeze?: boolean, - /** A callback which will be called with the initial patch once the document has finished loading */ - patchCallback?: PatchCallback, -}; - + /** The actor ID to use for this document, a random one will be generated if `null` is passed */ + actor?: ActorId + freeze?: boolean + /** A callback which will be called with the initial patch once the document has finished loading */ + patchCallback?: PatchCallback +} interface InternalState { - handle: Automerge, - heads: Heads | undefined, - freeze: boolean, - patchCallback?: PatchCallback + handle: Automerge + heads: Heads | undefined + freeze: boolean + patchCallback?: PatchCallback } /** @hidden */ export function getBackend(doc: Doc): Automerge { - return _state(doc).handle + return _state(doc).handle } function _state(doc: Doc, checkroot = true): InternalState { - if (typeof doc !== 'object') { - throw new RangeError("must be the document root") - } - const state = Reflect.get(doc, STATE) as InternalState - if (state === undefined || state == null || (checkroot && _obj(doc) !== "_root")) { - throw new RangeError("must be the document root") - } - return state + if (typeof doc !== "object") { + throw new RangeError("must be the document root") + } + const state = Reflect.get(doc, STATE) as InternalState + if ( + state === undefined || + state == null || + (checkroot && _obj(doc) !== "_root") + ) { + throw new RangeError("must be the document root") + } + return state } function _trace(doc: Doc): string | undefined { - return Reflect.get(doc, TRACE) as string + return Reflect.get(doc, TRACE) as string } function _obj(doc: Doc): ObjID | null { - if (!(typeof doc === 'object') || doc === null) { - return null - } - return Reflect.get(doc, OBJECT_ID) as ObjID + if (!(typeof doc === "object") || doc === null) { + return null + } + return Reflect.get(doc, OBJECT_ID) as ObjID } function _is_proxy(doc: Doc): boolean { - return !!Reflect.get(doc, IS_PROXY) + return !!Reflect.get(doc, IS_PROXY) } function importOpts(_actor?: ActorId | InitOptions): InitOptions { - if (typeof _actor === 'object') { - return _actor - } else { - return {actor: _actor} - } + if (typeof _actor === "object") { + return _actor + } else { + return { actor: _actor } + } } /** @@ -141,22 +174,27 @@ function importOpts(_actor?: ActorId | InitOptions): InitOptions { * random actor ID */ export function init(_opts?: ActorId | InitOptions): Doc { - const opts = importOpts(_opts) - const freeze = !!opts.freeze - const patchCallback = opts.patchCallback - const handle = ApiHandler.create(opts.actor) - handle.enablePatches(true) - handle.enableFreeze(!!opts.freeze) - handle.registerDatatype("counter", (n) => new Counter(n)) - const doc = handle.materialize("/", undefined, {handle, heads: undefined, freeze, patchCallback}) as Doc - return doc + const opts = importOpts(_opts) + const freeze = !!opts.freeze + const patchCallback = opts.patchCallback + const handle = ApiHandler.create(opts.actor) + handle.enablePatches(true) + handle.enableFreeze(!!opts.freeze) + handle.registerDatatype("counter", n => new Counter(n)) + const doc = handle.materialize("/", undefined, { + handle, + heads: undefined, + freeze, + patchCallback, + }) as Doc + return doc } /** * Make an immutable view of an automerge document as at `heads` * * @remarks - * The document returned from this function cannot be passed to {@link change}. + * The document returned from this function cannot be passed to {@link change}. * This is because it shares the same underlying memory as `doc`, but it is * consequently a very cheap copy. * @@ -168,9 +206,13 @@ export function init(_opts?: ActorId | InitOptions): Doc { * @param heads - The hashes of the heads to create a view at */ export function view(doc: Doc, heads: Heads): Doc { - const state = _state(doc) - const handle = state.handle - return state.handle.materialize("/", heads, { ...state, handle, heads }) as Doc + const state = _state(doc) + const handle = state.handle + return state.handle.materialize("/", heads, { + ...state, + handle, + heads, + }) as Doc } /** @@ -188,16 +230,19 @@ export function view(doc: Doc, heads: Heads): Doc { * @param doc - The document to clone * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} */ -export function clone(doc: Doc, _opts?: ActorId | InitOptions): Doc { - const state = _state(doc) - const heads = state.heads - const opts = importOpts(_opts) - const handle = state.handle.fork(opts.actor, heads) +export function clone( + doc: Doc, + _opts?: ActorId | InitOptions +): Doc { + const state = _state(doc) + const heads = state.heads + const opts = importOpts(_opts) + const handle = state.handle.fork(opts.actor, heads) - // `change` uses the presence of state.heads to determine if we are in a view - // set it to undefined to indicate that this is a full fat document - const {heads: oldHeads, ...stateSansHeads} = state - return handle.applyPatches(doc, { ... stateSansHeads, handle }) + // `change` uses the presence of state.heads to determine if we are in a view + // set it to undefined to indicate that this is a full fat document + const { heads: oldHeads, ...stateSansHeads } = state + return handle.applyPatches(doc, { ...stateSansHeads, handle }) } /** Explicity free the memory backing a document. Note that this is note @@ -205,10 +250,10 @@ export function clone(doc: Doc, _opts?: ActorId | InitOptions): Doc * [`FinalizationRegistry`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry) */ export function free(doc: Doc) { - return _state(doc).handle.free() + return _state(doc).handle.free() } -/** +/** * Create an automerge document from a POJO * * @param initialState - The initial state which will be copied into the document @@ -224,11 +269,14 @@ export function free(doc: Doc) { * }) * ``` */ -export function from>(initialState: T | Doc, _opts?: ActorId | InitOptions): Doc { - return change(init(_opts), (d) => Object.assign(d, initialState)) +export function from>( + initialState: T | Doc, + _opts?: ActorId | InitOptions +): Doc { + return change(init(_opts), d => Object.assign(d, initialState)) } -/** +/** * Update the contents of an automerge document * @typeParam T - The type of the value contained in the document * @param doc - The document to update @@ -255,7 +303,7 @@ export function from>(initialState: T | Doc * ``` * * @example A change with a message and a timestamp - * + * * ``` * doc1 = automerge.change(doc1, {message: "add another value", timestamp: 1640995200}, d => { * d.key2 = "value2" @@ -274,66 +322,82 @@ export function from>(initialState: T | Doc * assert.equal(patchedPath, ["key2"]) * ``` */ -export function change(doc: Doc, options: string | ChangeOptions | ChangeFn, callback?: ChangeFn): Doc { - if (typeof options === 'function') { - return _change(doc, {}, options) - } else if (typeof callback === 'function') { - if (typeof options === "string") { - options = {message: options} - } - return _change(doc, options, callback) - } else { - throw RangeError("Invalid args for change") +export function change( + doc: Doc, + options: string | ChangeOptions | ChangeFn, + callback?: ChangeFn +): Doc { + if (typeof options === "function") { + return _change(doc, {}, options) + } else if (typeof callback === "function") { + if (typeof options === "string") { + options = { message: options } } + return _change(doc, options, callback) + } else { + throw RangeError("Invalid args for change") + } } -function progressDocument(doc: Doc, heads: Heads | null, callback?: PatchCallback): Doc { - if (heads == null) { - return doc - } - const state = _state(doc) - const nextState = {...state, heads: undefined}; - const nextDoc = state.handle.applyPatches(doc, nextState, callback) +function progressDocument( + doc: Doc, + heads: Heads | null, + callback?: PatchCallback +): Doc { + if (heads == null) { + return doc + } + const state = _state(doc) + const nextState = { ...state, heads: undefined } + const nextDoc = state.handle.applyPatches(doc, nextState, callback) + state.heads = heads + return nextDoc +} + +function _change( + doc: Doc, + options: ChangeOptions, + callback: ChangeFn +): Doc { + if (typeof callback !== "function") { + throw new RangeError("invalid change function") + } + + const state = _state(doc) + + if (doc === undefined || state === undefined) { + throw new RangeError("must be the document root") + } + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + try { state.heads = heads - return nextDoc -} - -function _change(doc: Doc, options: ChangeOptions, callback: ChangeFn): Doc { - - - if (typeof callback !== "function") { - throw new RangeError("invalid change function"); - } - - const state = _state(doc) - - if (doc === undefined || state === undefined) { - throw new RangeError("must be the document root"); - } - if (state.heads) { - throw new RangeError("Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy.") - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - try { - state.heads = heads - const root: T = rootProxy(state.handle); - callback(root) - if (state.handle.pendingOps() === 0) { - state.heads = undefined - return doc - } else { - state.handle.commit(options.message, options.time) - return progressDocument(doc, heads, options.patchCallback || state.patchCallback); - } - } catch (e) { - //console.log("ERROR: ",e) - state.heads = undefined - state.handle.rollback() - throw e + const root: T = rootProxy(state.handle) + callback(root) + if (state.handle.pendingOps() === 0) { + state.heads = undefined + return doc + } else { + state.handle.commit(options.message, options.time) + return progressDocument( + doc, + heads, + options.patchCallback || state.patchCallback + ) } + } catch (e) { + //console.log("ERROR: ",e) + state.heads = undefined + state.handle.rollback() + throw e + } } /** @@ -347,26 +411,31 @@ function _change(doc: Doc, options: ChangeOptions, callback: ChangeFn(doc: Doc, options: string | ChangeOptions | void) { - if (options === undefined) { - options = {} - } - if (typeof options === "string") { - options = {message: options} - } +export function emptyChange( + doc: Doc, + options: string | ChangeOptions | void +) { + if (options === undefined) { + options = {} + } + if (typeof options === "string") { + options = { message: options } + } - const state = _state(doc) + const state = _state(doc) - if (state.heads) { - throw new RangeError("Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy.") - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } - const heads = state.handle.getHeads() - state.handle.emptyChange(options.message, options.time) - return progressDocument(doc, heads) + const heads = state.handle.getHeads() + state.handle.emptyChange(options.message, options.time) + return progressDocument(doc, heads) } /** @@ -384,16 +453,23 @@ export function emptyChange(doc: Doc, options: string | ChangeOptions | * have the complete document yet). If you need to handle incomplete content use * {@link init} followed by {@link loadIncremental}. */ -export function load(data: Uint8Array, _opts?: ActorId | InitOptions): Doc { - const opts = importOpts(_opts) - const actor = opts.actor - const patchCallback = opts.patchCallback - const handle = ApiHandler.load(data, actor) - handle.enablePatches(true) - handle.enableFreeze(!!opts.freeze) - handle.registerDatatype("counter", (n) => new Counter(n)) - const doc = handle.materialize("/", undefined, {handle, heads: undefined, patchCallback}) as Doc - return doc +export function load( + data: Uint8Array, + _opts?: ActorId | InitOptions +): Doc { + const opts = importOpts(_opts) + const actor = opts.actor + const patchCallback = opts.patchCallback + const handle = ApiHandler.load(data, actor) + handle.enablePatches(true) + handle.enableFreeze(!!opts.freeze) + handle.registerDatatype("counter", n => new Counter(n)) + const doc = handle.materialize("/", undefined, { + handle, + heads: undefined, + patchCallback, + }) as Doc + return doc } /** @@ -413,18 +489,26 @@ export function load(data: Uint8Array, _opts?: ActorId | InitOptions): Doc * Note that this function will succesfully load the results of {@link save} as * well as {@link getLastLocalChange} or any other incremental change. */ -export function loadIncremental(doc: Doc, data: Uint8Array, opts?: ApplyOptions): Doc { - if (!opts) {opts = {}} - const state = _state(doc) - if (state.heads) { - throw new RangeError("Attempting to change an out of date document - set at: " + _trace(doc)); - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - state.handle.loadIncremental(data) - return progressDocument(doc, heads, opts.patchCallback || state.patchCallback) +export function loadIncremental( + doc: Doc, + data: Uint8Array, + opts?: ApplyOptions +): Doc { + if (!opts) { + opts = {} + } + const state = _state(doc) + if (state.heads) { + throw new RangeError( + "Attempting to change an out of date document - set at: " + _trace(doc) + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.loadIncremental(data) + return progressDocument(doc, heads, opts.patchCallback || state.patchCallback) } /** @@ -435,7 +519,7 @@ export function loadIncremental(doc: Doc, data: Uint8Array, opts?: ApplyOp * The returned bytes can be passed to {@link load} or {@link loadIncremental} */ export function save(doc: Doc): Uint8Array { - return _state(doc).handle.save() + return _state(doc).handle.save() } /** @@ -446,7 +530,7 @@ export function save(doc: Doc): Uint8Array { * * @returns - The merged document * - * Often when you are merging documents you will also need to clone them. Both + * Often when you are merging documents you will also need to clone them. Both * arguments to `merge` are frozen after the call so you can no longer call * mutating methods (such as {@link change}) on them. The symtom of this will be * an error which says "Attempting to change an out of date document". To @@ -454,29 +538,31 @@ export function save(doc: Doc): Uint8Array { * merge}. */ export function merge(local: Doc, remote: Doc): Doc { - const localState = _state(local) + const localState = _state(local) - if (localState.heads) { - throw new RangeError("Attempting to change an out of date document - set at: " + _trace(local)); - } - const heads = localState.handle.getHeads() - const remoteState = _state(remote) - const changes = localState.handle.getChangesAdded(remoteState.handle) - localState.handle.applyChanges(changes) - return progressDocument(local, heads, localState.patchCallback) + if (localState.heads) { + throw new RangeError( + "Attempting to change an out of date document - set at: " + _trace(local) + ) + } + const heads = localState.handle.getHeads() + const remoteState = _state(remote) + const changes = localState.handle.getChangesAdded(remoteState.handle) + localState.handle.applyChanges(changes) + return progressDocument(local, heads, localState.patchCallback) } /** * Get the actor ID associated with the document */ export function getActorId(doc: Doc): ActorId { - const state = _state(doc) - return state.handle.getActorId() + const state = _state(doc) + return state.handle.getActorId() } /** * The type of conflicts for particular key or index - * + * * Maps and sequences in automerge can contain conflicting values for a * particular key or index. In this case {@link getConflicts} can be used to * obtain a `Conflicts` representing the multiple values present for the property @@ -484,47 +570,51 @@ export function getActorId(doc: Doc): ActorId { * A `Conflicts` is a map from a unique (per property or index) key to one of * the possible conflicting values for the given property. */ -type Conflicts = {[key: string]: AutomergeValue} +type Conflicts = { [key: string]: AutomergeValue } -function conflictAt(context: Automerge, objectId: ObjID, prop: Prop): Conflicts | undefined { - const values = context.getAll(objectId, prop) - if (values.length <= 1) { - return +function conflictAt( + context: Automerge, + objectId: ObjID, + prop: Prop +): Conflicts | undefined { + const values = context.getAll(objectId, prop) + if (values.length <= 1) { + return + } + const result: Conflicts = {} + for (const fullVal of values) { + switch (fullVal[0]) { + case "map": + result[fullVal[1]] = mapProxy(context, fullVal[1], [prop], true) + break + case "list": + result[fullVal[1]] = listProxy(context, fullVal[1], [prop], true) + break + case "text": + result[fullVal[1]] = context.text(fullVal[1]) + break + //case "table": + //case "cursor": + case "str": + case "uint": + case "int": + case "f64": + case "boolean": + case "bytes": + case "null": + result[fullVal[2]] = fullVal[1] + break + case "counter": + result[fullVal[2]] = new Counter(fullVal[1]) + break + case "timestamp": + result[fullVal[2]] = new Date(fullVal[1]) + break + default: + throw RangeError(`datatype ${fullVal[0]} unimplemented`) } - const result: Conflicts = {} - for (const fullVal of values) { - switch (fullVal[0]) { - case "map": - result[fullVal[1]] = mapProxy(context, fullVal[1], [prop], true) - break; - case "list": - result[fullVal[1]] = listProxy(context, fullVal[1], [prop], true) - break; - case "text": - result[fullVal[1]] = context.text(fullVal[1]) - break; - //case "table": - //case "cursor": - case "str": - case "uint": - case "int": - case "f64": - case "boolean": - case "bytes": - case "null": - result[fullVal[2]] = fullVal[1] - break; - case "counter": - result[fullVal[2]] = new Counter(fullVal[1]) - break; - case "timestamp": - result[fullVal[2]] = new Date(fullVal[1]) - break; - default: - throw RangeError(`datatype ${fullVal[0]} unimplemented`) - } - } - return result + } + return result } /** @@ -532,36 +622,36 @@ function conflictAt(context: Automerge, objectId: ObjID, prop: Prop): Conflicts * * The values of properties in a map in automerge can be conflicted if there * are concurrent "put" operations to the same key. Automerge chooses one value - * arbitrarily (but deterministically, any two nodes who have the same set of - * changes will choose the same value) from the set of conflicting values to - * present as the value of the key. + * arbitrarily (but deterministically, any two nodes who have the same set of + * changes will choose the same value) from the set of conflicting values to + * present as the value of the key. * * Sometimes you may want to examine these conflicts, in this case you can use - * {@link getConflicts} to get the conflicts for the key. + * {@link getConflicts} to get the conflicts for the key. * * @example * ``` * import * as automerge from "@automerge/automerge" - * + * * type Profile = { * pets: Array<{name: string, type: string}> * } - * + * * let doc1 = automerge.init("aaaa") * doc1 = automerge.change(doc1, d => { * d.pets = [{name: "Lassie", type: "dog"}] * }) * let doc2 = automerge.init("bbbb") * doc2 = automerge.merge(doc2, automerge.clone(doc1)) - * + * * doc2 = automerge.change(doc2, d => { * d.pets[0].name = "Beethoven" * }) - * + * * doc1 = automerge.change(doc1, d => { * d.pets[0].name = "Babe" * }) - * + * * const doc3 = automerge.merge(doc1, doc2) * * // Note that here we pass `doc3.pets`, not `doc3` @@ -571,14 +661,17 @@ function conflictAt(context: Automerge, objectId: ObjID, prop: Prop): Conflicts * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) * ``` */ -export function getConflicts(doc: Doc, prop: Prop): Conflicts | undefined { - const state = _state(doc, false) - const objectId = _obj(doc) - if (objectId != null) { - return conflictAt(state.handle, objectId, prop) - } else { - return undefined - } +export function getConflicts( + doc: Doc, + prop: Prop +): Conflicts | undefined { + const state = _state(doc, false) + const objectId = _obj(doc) + if (objectId != null) { + return conflictAt(state.handle, objectId, prop) + } else { + return undefined + } } /** @@ -589,8 +682,8 @@ export function getConflicts(doc: Doc, prop: Prop): Conflicts | undefined * getLastLocalChange} and send the result over the network to other peers. */ export function getLastLocalChange(doc: Doc): Change | undefined { - const state = _state(doc) - return state.handle.getLastLocalChange() || undefined + const state = _state(doc) + return state.handle.getLastLocalChange() || undefined } /** @@ -600,16 +693,16 @@ export function getLastLocalChange(doc: Doc): Change | undefined { * if `doc` is not an automerge document this will return null. */ export function getObjectId(doc: Doc, prop?: Prop): ObjID | null { - if (prop) { - const state = _state(doc, false) - const objectId = _obj(doc) - if (!state || !objectId) { - return null - } - return state.handle.get(objectId, prop) as ObjID - } else { - return _obj(doc) + if (prop) { + const state = _state(doc, false) + const objectId = _obj(doc) + if (!state || !objectId) { + return null } + return state.handle.get(objectId, prop) as ObjID + } else { + return _obj(doc) + } } /** @@ -619,11 +712,11 @@ export function getObjectId(doc: Doc, prop?: Prop): ObjID | null { * Note that this will crash if there are changes in `oldState` which are not in `newState`. */ export function getChanges(oldState: Doc, newState: Doc): Change[] { - const n = _state(newState) - return n.handle.getChanges(getHeads(oldState)) + const n = _state(newState) + return n.handle.getChanges(getHeads(oldState)) } -/** +/** * Get all the changes in a document * * This is different to {@link save} because the output is an array of changes @@ -631,8 +724,8 @@ export function getChanges(oldState: Doc, newState: Doc): Change[] { * */ export function getAllChanges(doc: Doc): Change[] { - const state = _state(doc) - return state.handle.getChanges([]) + const state = _state(doc) + return state.handle.getChanges([]) } /** @@ -646,48 +739,58 @@ export function getAllChanges(doc: Doc): Change[] { * informed of any changes which occur as a result of applying the changes * */ -export function applyChanges(doc: Doc, changes: Change[], opts?: ApplyOptions): [Doc] { - const state = _state(doc) - if (!opts) {opts = {}} - if (state.heads) { - throw new RangeError("Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy.") - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads(); - state.handle.applyChanges(changes) - state.heads = heads; - return [progressDocument(doc, heads, opts.patchCallback || state.patchCallback)] +export function applyChanges( + doc: Doc, + changes: Change[], + opts?: ApplyOptions +): [Doc] { + const state = _state(doc) + if (!opts) { + opts = {} + } + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.applyChanges(changes) + state.heads = heads + return [ + progressDocument(doc, heads, opts.patchCallback || state.patchCallback), + ] } /** @hidden */ export function getHistory(doc: Doc): State[] { - const history = getAllChanges(doc) - return history.map((change, index) => ({ - get change() { - return decodeChange(change) - }, - get snapshot() { - const [state] = applyChanges(init(), history.slice(0, index + 1)) - return state - } - }) - ) + const history = getAllChanges(doc) + return history.map((change, index) => ({ + get change() { + return decodeChange(change) + }, + get snapshot() { + const [state] = applyChanges(init(), history.slice(0, index + 1)) + return state + }, + })) } /** @hidden */ // FIXME : no tests // FIXME can we just use deep equals now? export function equals(val1: unknown, val2: unknown): boolean { - if (!isObject(val1) || !isObject(val2)) return val1 === val2 - const keys1 = Object.keys(val1).sort(), keys2 = Object.keys(val2).sort() - if (keys1.length !== keys2.length) return false - for (let i = 0; i < keys1.length; i++) { - if (keys1[i] !== keys2[i]) return false - if (!equals(val1[keys1[i]], val2[keys2[i]])) return false - } - return true + if (!isObject(val1) || !isObject(val2)) return val1 === val2 + const keys1 = Object.keys(val1).sort(), + keys2 = Object.keys(val2).sort() + if (keys1.length !== keys2.length) return false + for (let i = 0; i < keys1.length; i++) { + if (keys1[i] !== keys2[i]) return false + if (!equals(val1[keys1[i]], val2[keys2[i]])) return false + } + return true } /** @@ -696,10 +799,10 @@ export function equals(val1: unknown, val2: unknown): boolean { * @group sync * */ export function encodeSyncState(state: SyncState): Uint8Array { - const sync = ApiHandler.importSyncState(state) - const result = ApiHandler.encodeSyncState(sync) - sync.free() - return result + const sync = ApiHandler.importSyncState(state) + const result = ApiHandler.encodeSyncState(sync) + sync.free() + return result } /** @@ -708,10 +811,10 @@ export function encodeSyncState(state: SyncState): Uint8Array { * @group sync */ export function decodeSyncState(state: Uint8Array): SyncState { - const sync = ApiHandler.decodeSyncState(state) - const result = ApiHandler.exportSyncState(sync) - sync.free() - return result + const sync = ApiHandler.decodeSyncState(state) + const result = ApiHandler.exportSyncState(sync) + sync.free() + return result } /** @@ -725,12 +828,15 @@ export function decodeSyncState(state: Uint8Array): SyncState { * `newSyncState` should replace `inState` and `syncMessage` should be sent to * the peer if it is not null. If `syncMessage` is null then we are up to date. */ -export function generateSyncMessage(doc: Doc, inState: SyncState): [SyncState, SyncMessage | null] { - const state = _state(doc) - const syncState = ApiHandler.importSyncState(inState) - const message = state.handle.generateSyncMessage(syncState) - const outState = ApiHandler.exportSyncState(syncState) - return [outState, message] +export function generateSyncMessage( + doc: Doc, + inState: SyncState +): [SyncState, SyncMessage | null] { + const state = _state(doc) + const syncState = ApiHandler.importSyncState(inState) + const message = state.handle.generateSyncMessage(syncState) + const outState = ApiHandler.exportSyncState(syncState) + return [outState, message] } /** @@ -741,7 +847,7 @@ export function generateSyncMessage(doc: Doc, inState: SyncState): [SyncSt * @param doc - The doc the sync message is about * @param inState - The {@link SyncState} for the peer we are communicating with * @param message - The message which was received - * @param opts - Any {@link ApplyOption}s, used for passing a + * @param opts - Any {@link ApplyOption}s, used for passing a * {@link PatchCallback} which will be informed of any changes * in `doc` which occur because of the received sync message. * @@ -750,20 +856,33 @@ export function generateSyncMessage(doc: Doc, inState: SyncState): [SyncSt * `inState` and `syncMessage` should be sent to the peer if it is not null. If * `syncMessage` is null then we are up to date. */ -export function receiveSyncMessage(doc: Doc, inState: SyncState, message: SyncMessage, opts?: ApplyOptions): [Doc, SyncState, null] { - const syncState = ApiHandler.importSyncState(inState) - if (!opts) {opts = {}} - const state = _state(doc) - if (state.heads) { - throw new RangeError("Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy.") - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - state.handle.receiveSyncMessage(syncState, message) - const outSyncState = ApiHandler.exportSyncState(syncState) - return [progressDocument(doc, heads, opts.patchCallback || state.patchCallback), outSyncState, null]; +export function receiveSyncMessage( + doc: Doc, + inState: SyncState, + message: SyncMessage, + opts?: ApplyOptions +): [Doc, SyncState, null] { + const syncState = ApiHandler.importSyncState(inState) + if (!opts) { + opts = {} + } + const state = _state(doc) + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.receiveSyncMessage(syncState, message) + const outSyncState = ApiHandler.exportSyncState(syncState) + return [ + progressDocument(doc, heads, opts.patchCallback || state.patchCallback), + outSyncState, + null, + ] } /** @@ -775,75 +894,81 @@ export function receiveSyncMessage(doc: Doc, inState: SyncState, message: * @group sync */ export function initSyncState(): SyncState { - return ApiHandler.exportSyncState(ApiHandler.initSyncState()) + return ApiHandler.exportSyncState(ApiHandler.initSyncState()) } /** @hidden */ export function encodeChange(change: ChangeToEncode): Change { - return ApiHandler.encodeChange(change) + return ApiHandler.encodeChange(change) } /** @hidden */ export function decodeChange(data: Change): DecodedChange { - return ApiHandler.decodeChange(data) + return ApiHandler.decodeChange(data) } /** @hidden */ export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { - return ApiHandler.encodeSyncMessage(message) + return ApiHandler.encodeSyncMessage(message) } /** @hidden */ export function decodeSyncMessage(message: SyncMessage): DecodedSyncMessage { - return ApiHandler.decodeSyncMessage(message) + return ApiHandler.decodeSyncMessage(message) } /** * Get any changes in `doc` which are not dependencies of `heads` */ export function getMissingDeps(doc: Doc, heads: Heads): Heads { - const state = _state(doc) - return state.handle.getMissingDeps(heads) + const state = _state(doc) + return state.handle.getMissingDeps(heads) } -export function splice(doc: Doc, prop: Prop, index: number, del: number, newText?: string) { - if (!_is_proxy(doc)) { - throw new RangeError("object cannot be modified outside of a change block") - } - const state = _state(doc, false) - const objectId = _obj(doc) - if (!objectId) { - throw new RangeError("invalid object for splice") - } - const value = `${objectId}/${prop}` - try { - return state.handle.splice(value, index, del, newText) - } catch (e) { - throw new RangeError(`Cannot splice: ${e}`) - } +export function splice( + doc: Doc, + prop: Prop, + index: number, + del: number, + newText?: string +) { + if (!_is_proxy(doc)) { + throw new RangeError("object cannot be modified outside of a change block") + } + const state = _state(doc, false) + const objectId = _obj(doc) + if (!objectId) { + throw new RangeError("invalid object for splice") + } + const value = `${objectId}/${prop}` + try { + return state.handle.splice(value, index, del, newText) + } catch (e) { + throw new RangeError(`Cannot splice: ${e}`) + } } /** * Get the hashes of the heads of this document */ export function getHeads(doc: Doc): Heads { - const state = _state(doc) - return state.heads || state.handle.getHeads() + const state = _state(doc) + return state.heads || state.handle.getHeads() } /** @hidden */ export function dump(doc: Doc) { - const state = _state(doc) - state.handle.dump() + const state = _state(doc) + state.handle.dump() } /** @hidden */ export function toJS(doc: Doc): T { - const state = _state(doc) - const enabled = state.handle.enableFreeze(false) - const result = state.handle.materialize() - state.handle.enableFreeze(enabled) - return result as T + const state = _state(doc) + const enabled = state.handle.enableFreeze(false) + const result = state.handle.materialize() + state.handle.enableFreeze(enabled) + return result as T } export function isAutomerge(doc: unknown): boolean { @@ -855,7 +980,19 @@ export function isAutomerge(doc: unknown): boolean { } function isObject(obj: unknown): obj is Record { - return typeof obj === 'object' && obj !== null + return typeof obj === "object" && obj !== null } -export type {API, SyncState, ActorId, Conflicts, Prop, Change, ObjID, DecodedChange, DecodedSyncMessage, Heads, MaterializeValue} +export type { + API, + SyncState, + ActorId, + Conflicts, + Prop, + Change, + ObjID, + DecodedChange, + DecodedSyncMessage, + Heads, + MaterializeValue, +} diff --git a/javascript/src/low_level.ts b/javascript/src/low_level.ts index 6eabfa52..51017cb3 100644 --- a/javascript/src/low_level.ts +++ b/javascript/src/low_level.ts @@ -1,5 +1,14 @@ - -import { Automerge, Change, DecodedChange, Actor, SyncState, SyncMessage, JsSyncState, DecodedSyncMessage, ChangeToEncode } from "@automerge/automerge-wasm" +import { + Automerge, + Change, + DecodedChange, + Actor, + SyncState, + SyncMessage, + JsSyncState, + DecodedSyncMessage, + ChangeToEncode, +} from "@automerge/automerge-wasm" export { ChangeToEncode } from "@automerge/automerge-wasm" import { API } from "@automerge/automerge-wasm" @@ -10,17 +19,39 @@ export function UseApi(api: API) { } /* eslint-disable */ -export const ApiHandler : API = { - create(actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called") }, - load(data: Uint8Array, actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called (load)") }, - encodeChange(change: ChangeToEncode): Change { throw new RangeError("Automerge.use() not called (encodeChange)") }, - decodeChange(change: Change): DecodedChange { throw new RangeError("Automerge.use() not called (decodeChange)") }, - initSyncState(): SyncState { throw new RangeError("Automerge.use() not called (initSyncState)") }, - encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { throw new RangeError("Automerge.use() not called (encodeSyncMessage)") }, - decodeSyncMessage(msg: SyncMessage): DecodedSyncMessage { throw new RangeError("Automerge.use() not called (decodeSyncMessage)") }, - encodeSyncState(state: SyncState): Uint8Array { throw new RangeError("Automerge.use() not called (encodeSyncState)") }, - decodeSyncState(data: Uint8Array): SyncState { throw new RangeError("Automerge.use() not called (decodeSyncState)") }, - exportSyncState(state: SyncState): JsSyncState { throw new RangeError("Automerge.use() not called (exportSyncState)") }, - importSyncState(state: JsSyncState): SyncState { throw new RangeError("Automerge.use() not called (importSyncState)") }, +export const ApiHandler: API = { + create(actor?: Actor): Automerge { + throw new RangeError("Automerge.use() not called") + }, + load(data: Uint8Array, actor?: Actor): Automerge { + throw new RangeError("Automerge.use() not called (load)") + }, + encodeChange(change: ChangeToEncode): Change { + throw new RangeError("Automerge.use() not called (encodeChange)") + }, + decodeChange(change: Change): DecodedChange { + throw new RangeError("Automerge.use() not called (decodeChange)") + }, + initSyncState(): SyncState { + throw new RangeError("Automerge.use() not called (initSyncState)") + }, + encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { + throw new RangeError("Automerge.use() not called (encodeSyncMessage)") + }, + decodeSyncMessage(msg: SyncMessage): DecodedSyncMessage { + throw new RangeError("Automerge.use() not called (decodeSyncMessage)") + }, + encodeSyncState(state: SyncState): Uint8Array { + throw new RangeError("Automerge.use() not called (encodeSyncState)") + }, + decodeSyncState(data: Uint8Array): SyncState { + throw new RangeError("Automerge.use() not called (decodeSyncState)") + }, + exportSyncState(state: SyncState): JsSyncState { + throw new RangeError("Automerge.use() not called (exportSyncState)") + }, + importSyncState(state: JsSyncState): SyncState { + throw new RangeError("Automerge.use() not called (importSyncState)") + }, } /* eslint-enable */ diff --git a/javascript/src/numbers.ts b/javascript/src/numbers.ts index 9d63bcc5..d52a36c5 100644 --- a/javascript/src/numbers.ts +++ b/javascript/src/numbers.ts @@ -3,10 +3,16 @@ import { INT, UINT, F64 } from "./constants" export class Int { - value: number; + value: number constructor(value: number) { - if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= Number.MIN_SAFE_INTEGER)) { + if ( + !( + Number.isInteger(value) && + value <= Number.MAX_SAFE_INTEGER && + value >= Number.MIN_SAFE_INTEGER + ) + ) { throw new RangeError(`Value ${value} cannot be a uint`) } this.value = value @@ -16,10 +22,16 @@ export class Int { } export class Uint { - value: number; + value: number constructor(value: number) { - if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= 0)) { + if ( + !( + Number.isInteger(value) && + value <= Number.MAX_SAFE_INTEGER && + value >= 0 + ) + ) { throw new RangeError(`Value ${value} cannot be a uint`) } this.value = value @@ -29,10 +41,10 @@ export class Uint { } export class Float64 { - value: number; + value: number constructor(value: number) { - if (typeof value !== 'number') { + if (typeof value !== "number") { throw new RangeError(`Value ${value} cannot be a float64`) } this.value = value || 0.0 @@ -40,4 +52,3 @@ export class Float64 { Object.freeze(this) } } - diff --git a/javascript/src/proxies.ts b/javascript/src/proxies.ts index ff03be4d..523c4547 100644 --- a/javascript/src/proxies.ts +++ b/javascript/src/proxies.ts @@ -1,123 +1,149 @@ - import { Automerge, Heads, ObjID } from "@automerge/automerge-wasm" import { Prop } from "@automerge/automerge-wasm" import { AutomergeValue, ScalarValue, MapValue, ListValue } from "./types" import { Counter, getWriteableCounter } from "./counter" -import { STATE, TRACE, IS_PROXY, OBJECT_ID, COUNTER, INT, UINT, F64 } from "./constants" +import { + STATE, + TRACE, + IS_PROXY, + OBJECT_ID, + COUNTER, + INT, + UINT, + F64, +} from "./constants" function parseListIndex(key) { - if (typeof key === 'string' && /^[0-9]+$/.test(key)) key = parseInt(key, 10) - if (typeof key !== 'number') { + if (typeof key === "string" && /^[0-9]+$/.test(key)) key = parseInt(key, 10) + if (typeof key !== "number") { return key } if (key < 0 || isNaN(key) || key === Infinity || key === -Infinity) { - throw new RangeError('A list index must be positive, but you passed ' + key) + throw new RangeError("A list index must be positive, but you passed " + key) } return key } -function valueAt(target, prop: Prop) : AutomergeValue | undefined { - const { context, objectId, path, readonly, heads} = target - const value = context.getWithType(objectId, prop, heads) - if (value === null) { - return - } - const datatype = value[0] - const val = value[1] - switch (datatype) { - case undefined: return; - case "map": return mapProxy(context, val, [ ... path, prop ], readonly, heads); - case "list": return listProxy(context, val, [ ... path, prop ], readonly, heads); - case "text": return context.text(val, heads); - case "str": return val; - case "uint": return val; - case "int": return val; - case "f64": return val; - case "boolean": return val; - case "null": return null; - case "bytes": return val; - case "timestamp": return val; - case "counter": { - if (readonly) { - return new Counter(val); - } else { - return getWriteableCounter(val, context, path, objectId, prop) - } - } - default: - throw RangeError(`datatype ${datatype} unimplemented`) +function valueAt(target, prop: Prop): AutomergeValue | undefined { + const { context, objectId, path, readonly, heads } = target + const value = context.getWithType(objectId, prop, heads) + if (value === null) { + return + } + const datatype = value[0] + const val = value[1] + switch (datatype) { + case undefined: + return + case "map": + return mapProxy(context, val, [...path, prop], readonly, heads) + case "list": + return listProxy(context, val, [...path, prop], readonly, heads) + case "text": + return context.text(val, heads) + case "str": + return val + case "uint": + return val + case "int": + return val + case "f64": + return val + case "boolean": + return val + case "null": + return null + case "bytes": + return val + case "timestamp": + return val + case "counter": { + if (readonly) { + return new Counter(val) + } else { + return getWriteableCounter(val, context, path, objectId, prop) } + } + default: + throw RangeError(`datatype ${datatype} unimplemented`) + } } function import_value(value) { - switch (typeof value) { - case 'object': - if (value == null) { - return [ null, "null"] - } else if (value[UINT]) { - return [ value.value, "uint" ] - } else if (value[INT]) { - return [ value.value, "int" ] - } else if (value[F64]) { - return [ value.value, "f64" ] - } else if (value[COUNTER]) { - return [ value.value, "counter" ] - } else if (value instanceof Date) { - return [ value.getTime(), "timestamp" ] - } else if (value instanceof Uint8Array) { - return [ value, "bytes" ] - } else if (value instanceof Array) { - return [ value, "list" ] - } else if (Object.getPrototypeOf(value) === Object.getPrototypeOf({})) { - return [ value, "map" ] - } else if (value[OBJECT_ID]) { - throw new RangeError('Cannot create a reference to an existing document object') - } else { - throw new RangeError(`Cannot assign unknown object: ${value}`) - } - break; - case 'boolean': - return [ value, "boolean" ] - case 'number': - if (Number.isInteger(value)) { - return [ value, "int" ] - } else { - return [ value, "f64" ] - } - break; - case 'string': - return [ value, "text" ] - break; - default: - throw new RangeError(`Unsupported type of value: ${typeof value}`) - } + switch (typeof value) { + case "object": + if (value == null) { + return [null, "null"] + } else if (value[UINT]) { + return [value.value, "uint"] + } else if (value[INT]) { + return [value.value, "int"] + } else if (value[F64]) { + return [value.value, "f64"] + } else if (value[COUNTER]) { + return [value.value, "counter"] + } else if (value instanceof Date) { + return [value.getTime(), "timestamp"] + } else if (value instanceof Uint8Array) { + return [value, "bytes"] + } else if (value instanceof Array) { + return [value, "list"] + } else if (Object.getPrototypeOf(value) === Object.getPrototypeOf({})) { + return [value, "map"] + } else if (value[OBJECT_ID]) { + throw new RangeError( + "Cannot create a reference to an existing document object" + ) + } else { + throw new RangeError(`Cannot assign unknown object: ${value}`) + } + break + case "boolean": + return [value, "boolean"] + case "number": + if (Number.isInteger(value)) { + return [value, "int"] + } else { + return [value, "f64"] + } + break + case "string": + return [value, "text"] + break + default: + throw new RangeError(`Unsupported type of value: ${typeof value}`) + } } const MapHandler = { - get (target, key) : AutomergeValue { + get(target, key): AutomergeValue { const { context, objectId, cache } = target - if (key === Symbol.toStringTag) { return target[Symbol.toStringTag] } + if (key === Symbol.toStringTag) { + return target[Symbol.toStringTag] + } if (key === OBJECT_ID) return objectId if (key === IS_PROXY) return true if (key === TRACE) return target.trace - if (key === STATE) return { handle: context }; + if (key === STATE) return { handle: context } if (!cache[key]) { cache[key] = valueAt(target, key) } return cache[key] }, - set (target, key, val) { - const { context, objectId, path, readonly, frozen} = target + set(target, key, val) { + const { context, objectId, path, readonly, frozen } = target target.cache = {} // reset cache on set if (val && val[OBJECT_ID]) { - throw new RangeError('Cannot create a reference to an existing document object') + throw new RangeError( + "Cannot create a reference to an existing document object" + ) } if (key === TRACE) { target.trace = val return true } - const [ value, datatype ] = import_value(val) + const [value, datatype] = import_value(val) if (frozen) { throw new RangeError("Attempting to use an outdated Automerge document") } @@ -127,7 +153,7 @@ const MapHandler = { switch (datatype) { case "list": { const list = context.putObject(objectId, key, []) - const proxyList = listProxy(context, list, [ ... path, key ], readonly ); + const proxyList = listProxy(context, list, [...path, key], readonly) for (let i = 0; i < value.length; i++) { proxyList[i] = value[i] } @@ -139,11 +165,11 @@ const MapHandler = { } case "map": { const map = context.putObject(objectId, key, {}) - const proxyMap = mapProxy(context, map, [ ... path, key ], readonly ); + const proxyMap = mapProxy(context, map, [...path, key], readonly) for (const key in value) { proxyMap[key] = value[key] } - break; + break } default: context.put(objectId, key, value, datatype) @@ -151,7 +177,7 @@ const MapHandler = { return true }, - deleteProperty (target, key) { + deleteProperty(target, key) { const { context, objectId, readonly } = target target.cache = {} // reset cache on delete if (readonly) { @@ -161,62 +187,71 @@ const MapHandler = { return true }, - has (target, key) { + has(target, key) { const value = this.get(target, key) return value !== undefined }, - getOwnPropertyDescriptor (target, key) { + getOwnPropertyDescriptor(target, key) { // const { context, objectId } = target const value = this.get(target, key) - if (typeof value !== 'undefined') { + if (typeof value !== "undefined") { return { - configurable: true, enumerable: true, value + configurable: true, + enumerable: true, + value, } } }, - ownKeys (target) { - const { context, objectId, heads} = target + ownKeys(target) { + const { context, objectId, heads } = target // FIXME - this is a tmp workaround until fix the dupe key bug in keys() const keys = context.keys(objectId, heads) return [...new Set(keys)] }, } - const ListHandler = { - get (target, index) { - const {context, objectId, heads } = target + get(target, index) { + const { context, objectId, heads } = target index = parseListIndex(index) - if (index === Symbol.hasInstance) { return (instance) => { return Array.isArray(instance) } } - if (index === Symbol.toStringTag) { return target[Symbol.toStringTag] } + if (index === Symbol.hasInstance) { + return instance => { + return Array.isArray(instance) + } + } + if (index === Symbol.toStringTag) { + return target[Symbol.toStringTag] + } if (index === OBJECT_ID) return objectId if (index === IS_PROXY) return true if (index === TRACE) return target.trace - if (index === STATE) return { handle: context }; - if (index === 'length') return context.length(objectId, heads); - if (typeof index === 'number') { + if (index === STATE) return { handle: context } + if (index === "length") return context.length(objectId, heads) + if (typeof index === "number") { return valueAt(target, index) } else { return listMethods(target)[index] } }, - set (target, index, val) { - const {context, objectId, path, readonly, frozen } = target + set(target, index, val) { + const { context, objectId, path, readonly, frozen } = target index = parseListIndex(index) if (val && val[OBJECT_ID]) { - throw new RangeError('Cannot create a reference to an existing document object') + throw new RangeError( + "Cannot create a reference to an existing document object" + ) } if (index === TRACE) { target.trace = val return true } if (typeof index == "string") { - throw new RangeError('list index must be a number') + throw new RangeError("list index must be a number") } - const [ value, datatype] = import_value(val) + const [value, datatype] = import_value(val) if (frozen) { throw new RangeError("Attempting to use an outdated Automerge document") } @@ -231,9 +266,9 @@ const ListHandler = { } else { list = context.putObject(objectId, index, []) } - const proxyList = listProxy(context, list, [ ... path, index ], readonly); - proxyList.splice(0,0,...value) - break; + const proxyList = listProxy(context, list, [...path, index], readonly) + proxyList.splice(0, 0, ...value) + break } case "text": { if (index >= context.length(objectId)) { @@ -241,7 +276,7 @@ const ListHandler = { } else { context.putObject(objectId, index, value, "text") } - break; + break } case "map": { let map @@ -250,11 +285,11 @@ const ListHandler = { } else { map = context.putObject(objectId, index, {}) } - const proxyMap = mapProxy(context, map, [ ... path, index ], readonly); + const proxyMap = mapProxy(context, map, [...path, index], readonly) for (const key in value) { proxyMap[key] = value[key] } - break; + break } default: if (index >= context.length(objectId)) { @@ -266,30 +301,34 @@ const ListHandler = { return true }, - deleteProperty (target, index) { - const {context, objectId} = target + deleteProperty(target, index) { + const { context, objectId } = target index = parseListIndex(index) if (context.get(objectId, index)[0] == "counter") { - throw new TypeError('Unsupported operation: deleting a counter from a list') + throw new TypeError( + "Unsupported operation: deleting a counter from a list" + ) } context.delete(objectId, index) return true }, - has (target, index) { - const {context, objectId, heads} = target + has(target, index) { + const { context, objectId, heads } = target index = parseListIndex(index) - if (typeof index === 'number') { + if (typeof index === "number") { return index < context.length(objectId, heads) } - return index === 'length' + return index === "length" }, - getOwnPropertyDescriptor (target, index) { - const {context, objectId, heads} = target + getOwnPropertyDescriptor(target, index) { + const { context, objectId, heads } = target - if (index === 'length') return {writable: true, value: context.length(objectId, heads) } - if (index === OBJECT_ID) return {configurable: false, enumerable: false, value: objectId} + if (index === "length") + return { writable: true, value: context.length(objectId, heads) } + if (index === OBJECT_ID) + return { configurable: false, enumerable: false, value: objectId } index = parseListIndex(index) @@ -297,38 +336,71 @@ const ListHandler = { return { configurable: true, enumerable: true, value } }, - getPrototypeOf(target) { return Object.getPrototypeOf(target) }, - ownKeys (/*target*/) : string[] { - const keys : string[] = [] + getPrototypeOf(target) { + return Object.getPrototypeOf(target) + }, + ownKeys(/*target*/): string[] { + const keys: string[] = [] // uncommenting this causes assert.deepEqual() to fail when comparing to a pojo array // but not uncommenting it causes for (i in list) {} to not enumerate values properly //const {context, objectId, heads } = target //for (let i = 0; i < target.context.length(objectId, heads); i++) { keys.push(i.toString()) } - keys.push("length"); + keys.push("length") return keys - } + }, } -export function mapProxy(context: Automerge, objectId: ObjID, path?: Prop[], readonly?: boolean, heads?: Heads) : MapValue { - return new Proxy({context, objectId, path, readonly: !!readonly, frozen: false, heads, cache: {}}, MapHandler) +export function mapProxy( + context: Automerge, + objectId: ObjID, + path?: Prop[], + readonly?: boolean, + heads?: Heads +): MapValue { + return new Proxy( + { + context, + objectId, + path, + readonly: !!readonly, + frozen: false, + heads, + cache: {}, + }, + MapHandler + ) } -export function listProxy(context: Automerge, objectId: ObjID, path?: Prop[], readonly?: boolean, heads?: Heads) : ListValue { +export function listProxy( + context: Automerge, + objectId: ObjID, + path?: Prop[], + readonly?: boolean, + heads?: Heads +): ListValue { const target = [] - Object.assign(target, {context, objectId, path, readonly: !!readonly, frozen: false, heads, cache: {}}) + Object.assign(target, { + context, + objectId, + path, + readonly: !!readonly, + frozen: false, + heads, + cache: {}, + }) return new Proxy(target, ListHandler) } -export function rootProxy(context: Automerge, readonly?: boolean) : T { +export function rootProxy(context: Automerge, readonly?: boolean): T { /* eslint-disable-next-line */ return mapProxy(context, "_root", [], !!readonly) } function listMethods(target) { - const {context, objectId, path, readonly, frozen, heads} = target + const { context, objectId, path, readonly, frozen, heads } = target const methods = { deleteAt(index, numDelete) { - if (typeof numDelete === 'number') { + if (typeof numDelete === "number") { context.splice(objectId, index, numDelete) } else { context.delete(objectId, index) @@ -355,7 +427,7 @@ function listMethods(target) { const length = context.length(objectId) for (let i = start; i < length; i++) { const value = context.getWithType(objectId, i, heads) - if (value && value[1] === o[OBJECT_ID] || value[1] === o) { + if ((value && value[1] === o[OBJECT_ID]) || value[1] === o) { return i } } @@ -395,16 +467,20 @@ function listMethods(target) { del = parseListIndex(del) for (const val of vals) { if (val && val[OBJECT_ID]) { - throw new RangeError('Cannot create a reference to an existing document object') + throw new RangeError( + "Cannot create a reference to an existing document object" + ) } } if (frozen) { throw new RangeError("Attempting to use an outdated Automerge document") } if (readonly) { - throw new RangeError("Sequence object cannot be modified outside of a change block") + throw new RangeError( + "Sequence object cannot be modified outside of a change block" + ) } - const result : AutomergeValue[] = [] + const result: AutomergeValue[] = [] for (let i = 0; i < del; i++) { const value = valueAt(target, index) if (value !== undefined) { @@ -412,26 +488,31 @@ function listMethods(target) { } context.delete(objectId, index) } - const values = vals.map((val) => import_value(val)) - for (const [value,datatype] of values) { + const values = vals.map(val => import_value(val)) + for (const [value, datatype] of values) { switch (datatype) { case "list": { const list = context.insertObject(objectId, index, []) - const proxyList = listProxy(context, list, [ ... path, index ], readonly); - proxyList.splice(0,0,...value) - break; + const proxyList = listProxy( + context, + list, + [...path, index], + readonly + ) + proxyList.splice(0, 0, ...value) + break } case "text": { context.insertObject(objectId, index, value) - break; + break } case "map": { const map = context.insertObject(objectId, index, {}) - const proxyMap = mapProxy(context, map, [ ... path, index ], readonly); + const proxyMap = mapProxy(context, map, [...path, index], readonly) for (const key in value) { proxyMap[key] = value[key] } - break; + break } default: context.insert(objectId, index, value, datatype) @@ -447,35 +528,38 @@ function listMethods(target) { }, entries() { - const i = 0; + const i = 0 const iterator = { next: () => { const value = valueAt(target, i) if (value === undefined) { return { value: undefined, done: true } } else { - return { value: [ i, value ], done: false } + return { value: [i, value], done: false } } - } + }, } return iterator }, keys() { - let i = 0; + let i = 0 const len = context.length(objectId, heads) const iterator = { next: () => { - let value : undefined | number = undefined - if (i < len) { value = i; i++ } + let value: undefined | number = undefined + if (i < len) { + value = i + i++ + } return { value, done: true } - } + }, } return iterator }, values() { - const i = 0; + const i = 0 const iterator = { next: () => { const value = valueAt(target, i) @@ -484,13 +568,13 @@ function listMethods(target) { } else { return { value, done: false } } - } + }, } return iterator }, - toArray() : AutomergeValue[] { - const list : AutomergeValue = [] + toArray(): AutomergeValue[] { + const list: AutomergeValue = [] let value do { value = valueAt(target, list.length) @@ -502,36 +586,36 @@ function listMethods(target) { return list }, - map(f: (AutomergeValue, number) => T) : T[] { + map(f: (AutomergeValue, number) => T): T[] { return this.toArray().map(f) }, - toString() : string { + toString(): string { return this.toArray().toString() }, - toLocaleString() : string { + toLocaleString(): string { return this.toArray().toLocaleString() }, - forEach(f: (AutomergeValue, number) => undefined ) { + forEach(f: (AutomergeValue, number) => undefined) { return this.toArray().forEach(f) }, // todo: real concat function is different - concat(other: AutomergeValue[]) : AutomergeValue[] { + concat(other: AutomergeValue[]): AutomergeValue[] { return this.toArray().concat(other) }, - every(f: (AutomergeValue, number) => boolean) : boolean { + every(f: (AutomergeValue, number) => boolean): boolean { return this.toArray().every(f) }, - filter(f: (AutomergeValue, number) => boolean) : AutomergeValue[] { + filter(f: (AutomergeValue, number) => boolean): AutomergeValue[] { return this.toArray().filter(f) }, - find(f: (AutomergeValue, number) => boolean) : AutomergeValue | undefined { + find(f: (AutomergeValue, number) => boolean): AutomergeValue | undefined { let index = 0 for (const v of this) { if (f(v, index)) { @@ -541,7 +625,7 @@ function listMethods(target) { } }, - findIndex(f: (AutomergeValue, number) => boolean) : number { + findIndex(f: (AutomergeValue, number) => boolean): number { let index = 0 for (const v of this) { if (f(v, index)) { @@ -552,37 +636,40 @@ function listMethods(target) { return -1 }, - includes(elem: AutomergeValue) : boolean { - return this.find((e) => e === elem) !== undefined + includes(elem: AutomergeValue): boolean { + return this.find(e => e === elem) !== undefined }, - join(sep?: string) : string { + join(sep?: string): string { return this.toArray().join(sep) }, // todo: remove the any - reduce(f: (any, AutomergeValue) => T, initalValue?: T) : T | undefined { - return this.toArray().reduce(f,initalValue) + reduce(f: (any, AutomergeValue) => T, initalValue?: T): T | undefined { + return this.toArray().reduce(f, initalValue) }, // todo: remove the any - reduceRight(f: (any, AutomergeValue) => T, initalValue?: T) : T | undefined{ - return this.toArray().reduceRight(f,initalValue) + reduceRight( + f: (any, AutomergeValue) => T, + initalValue?: T + ): T | undefined { + return this.toArray().reduceRight(f, initalValue) }, - lastIndexOf(search: AutomergeValue, fromIndex = +Infinity) : number { + lastIndexOf(search: AutomergeValue, fromIndex = +Infinity): number { // this can be faster - return this.toArray().lastIndexOf(search,fromIndex) + return this.toArray().lastIndexOf(search, fromIndex) }, - slice(index?: number, num?: number) : AutomergeValue[] { - return this.toArray().slice(index,num) + slice(index?: number, num?: number): AutomergeValue[] { + return this.toArray().slice(index, num) }, - some(f: (AutomergeValue, number) => boolean) : boolean { - let index = 0; + some(f: (AutomergeValue, number) => boolean): boolean { + let index = 0 for (const v of this) { - if (f(v,index)) { + if (f(v, index)) { return true } index += 1 @@ -590,16 +677,15 @@ function listMethods(target) { return false }, - [Symbol.iterator]: function *() { - let i = 0; + [Symbol.iterator]: function* () { + let i = 0 let value = valueAt(target, i) while (value !== undefined) { - yield value - i += 1 - value = valueAt(target, i) + yield value + i += 1 + value = valueAt(target, i) } - } + }, } return methods } - diff --git a/javascript/src/types.ts b/javascript/src/types.ts index add3f492..62fdbba8 100644 --- a/javascript/src/types.ts +++ b/javascript/src/types.ts @@ -1,10 +1,19 @@ - -export { Counter } from "./counter" -export { Int, Uint, Float64 } from "./numbers" +export { Counter } from "./counter" +export { Int, Uint, Float64 } from "./numbers" import { Counter } from "./counter" -export type AutomergeValue = ScalarValue | { [key: string]: AutomergeValue } | Array -export type MapValue = { [key: string]: AutomergeValue } -export type ListValue = Array -export type ScalarValue = string | number | null | boolean | Date | Counter | Uint8Array +export type AutomergeValue = + | ScalarValue + | { [key: string]: AutomergeValue } + | Array +export type MapValue = { [key: string]: AutomergeValue } +export type ListValue = Array +export type ScalarValue = + | string + | number + | null + | boolean + | Date + | Counter + | Uint8Array diff --git a/javascript/src/uuid.ts b/javascript/src/uuid.ts index 5ddb5ae6..421ddf9d 100644 --- a/javascript/src/uuid.ts +++ b/javascript/src/uuid.ts @@ -1,21 +1,24 @@ -import { v4 } from 'uuid' +import { v4 } from "uuid" function defaultFactory() { - return v4().replace(/-/g, '') + return v4().replace(/-/g, "") } let factory = defaultFactory interface UUIDFactory extends Function { - setFactory(f: typeof factory): void; - reset(): void; + setFactory(f: typeof factory): void + reset(): void } -export const uuid : UUIDFactory = () => { +export const uuid: UUIDFactory = () => { return factory() } -uuid.setFactory = newFactory => { factory = newFactory } - -uuid.reset = () => { factory = defaultFactory } +uuid.setFactory = newFactory => { + factory = newFactory +} +uuid.reset = () => { + factory = defaultFactory +} diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index e50e8782..8bf30914 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -1,366 +1,473 @@ -import * as assert from 'assert' -import * as Automerge from '../src' +import * as assert from "assert" +import * as Automerge from "../src" import * as WASM from "@automerge/automerge-wasm" -describe('Automerge', () => { - describe('basics', () => { - it('should init clone and free', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.clone(doc1); +describe("Automerge", () => { + describe("basics", () => { + it("should init clone and free", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.clone(doc1) - // this is only needed if weakrefs are not supported - Automerge.free(doc1) - Automerge.free(doc2) - }) - - it('should be able to make a view with specifc heads', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => d.value = 1) - let heads2 = Automerge.getHeads(doc2) - let doc3 = Automerge.change(doc2, (d) => d.value = 2) - let doc2_v2 = Automerge.view(doc3, heads2) - assert.deepEqual(doc2, doc2_v2) - let doc2_v2_clone = Automerge.clone(doc2, "aabbcc") - assert.deepEqual(doc2, doc2_v2_clone) - assert.equal(Automerge.getActorId(doc2_v2_clone), "aabbcc") - }) - - it("should allow you to change a clone of a view", () => { - let doc1 = Automerge.init() - doc1 = Automerge.change(doc1, d => d.key = "value") - let heads = Automerge.getHeads(doc1) - doc1 = Automerge.change(doc1, d => d.key = "value2") - let fork = Automerge.clone(Automerge.view(doc1, heads)) - assert.deepEqual(fork, {key: "value"}) - fork = Automerge.change(fork, d => d.key = "value3") - assert.deepEqual(fork, {key: "value3"}) - }) - - it('handle basic set and read on root object', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.hello = "world" - d.big = "little" - d.zip = "zop" - d.app = "dap" - assert.deepEqual(d, { hello: "world", big: "little", zip: "zop", app: "dap" }) - }) - assert.deepEqual(doc2, { hello: "world", big: "little", zip: "zop", app: "dap" }) - }) - - it('can detect an automerge doc with isAutomerge()', () => { - const doc1 = Automerge.from({ sub: { object: true } }) - assert(Automerge.isAutomerge(doc1)) - assert(!Automerge.isAutomerge(doc1.sub)) - assert(!Automerge.isAutomerge("String")) - assert(!Automerge.isAutomerge({ sub: { object: true }})) - assert(!Automerge.isAutomerge(undefined)) - const jsObj = Automerge.toJS(doc1) - assert(!Automerge.isAutomerge(jsObj)) - assert.deepEqual(jsObj, doc1) - }) - - it('it should recursively freeze the document if requested', () => { - let doc1 = Automerge.init({ freeze: true } ) - let doc2 = Automerge.init() - - assert(Object.isFrozen(doc1)) - assert(!Object.isFrozen(doc2)) - - // will also freeze sub objects - doc1 = Automerge.change(doc1, (doc) => doc.book = { title: "how to win friends" }) - doc2 = Automerge.merge(doc2,doc1) - assert(Object.isFrozen(doc1)) - assert(Object.isFrozen(doc1.book)) - assert(!Object.isFrozen(doc2)) - assert(!Object.isFrozen(doc2.book)) - - // works on from - let doc3 = Automerge.from({ sub: { obj: "inner" } }, { freeze: true }) - assert(Object.isFrozen(doc3)) - assert(Object.isFrozen(doc3.sub)) - - // works on load - let doc4 = Automerge.load(Automerge.save(doc3), { freeze: true }) - assert(Object.isFrozen(doc4)) - assert(Object.isFrozen(doc4.sub)) - - // follows clone - let doc5 = Automerge.clone(doc4) - assert(Object.isFrozen(doc5)) - assert(Object.isFrozen(doc5.sub)) - - // toJS does not freeze - let exported = Automerge.toJS(doc5) - assert(!Object.isFrozen(exported)) - }) - - it('handle basic sets over many changes', () => { - let doc1 = Automerge.init() - let timestamp = new Date(); - let counter = new Automerge.Counter(100); - let bytes = new Uint8Array([10,11,12]); - let doc2 = Automerge.change(doc1, (d) => { - d.hello = "world" - }) - let doc3 = Automerge.change(doc2, (d) => { - d.counter1 = counter - }) - let doc4 = Automerge.change(doc3, (d) => { - d.timestamp1 = timestamp - }) - let doc5 = Automerge.change(doc4, (d) => { - d.app = null - }) - let doc6 = Automerge.change(doc5, (d) => { - d.bytes1 = bytes - }) - let doc7 = Automerge.change(doc6, (d) => { - d.uint = new Automerge.Uint(1) - d.int = new Automerge.Int(-1) - d.float64 = new Automerge.Float64(5.5) - d.number1 = 100 - d.number2 = -45.67 - d.true = true - d.false = false - }) - - assert.deepEqual(doc7, { hello: "world", true: true, false: false, int: -1, uint: 1, float64: 5.5, number1: 100, number2: -45.67, counter1: counter, timestamp1: timestamp, bytes1: bytes, app: null }) - - let changes = Automerge.getAllChanges(doc7) - let t1 = Automerge.init() - ;let [t2] = Automerge.applyChanges(t1, changes) - assert.deepEqual(doc7,t2) - }) - - it('handle overwrites to values', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.hello = "world1" - }) - let doc3 = Automerge.change(doc2, (d) => { - d.hello = "world2" - }) - let doc4 = Automerge.change(doc3, (d) => { - d.hello = "world3" - }) - let doc5 = Automerge.change(doc4, (d) => { - d.hello = "world4" - }) - assert.deepEqual(doc5, { hello: "world4" } ) - }) - - it('handle set with object value', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.subobj = { hello: "world", subsubobj: { zip: "zop" } } - }) - assert.deepEqual(doc2, { subobj: { hello: "world", subsubobj: { zip: "zop" } } }) - }) - - it('handle simple list creation', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => d.list = []) - assert.deepEqual(doc2, { list: []}) - }) - - it('handle simple lists', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.list = [ 1, 2, 3 ] - }) - assert.deepEqual(doc2.list.length, 3) - assert.deepEqual(doc2.list[0], 1) - assert.deepEqual(doc2.list[1], 2) - assert.deepEqual(doc2.list[2], 3) - assert.deepEqual(doc2, { list: [1,2,3] }) - // assert.deepStrictEqual(Automerge.toJS(doc2), { list: [1,2,3] }) - - let doc3 = Automerge.change(doc2, (d) => { - d.list[1] = "a" - }) - - assert.deepEqual(doc3.list.length, 3) - assert.deepEqual(doc3.list[0], 1) - assert.deepEqual(doc3.list[1], "a") - assert.deepEqual(doc3.list[2], 3) - assert.deepEqual(doc3, { list: [1,"a",3] }) - }) - it('handle simple lists', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.list = [ 1, 2, 3 ] - }) - let changes = Automerge.getChanges(doc1, doc2) - let docB1 = Automerge.init() - ;let [docB2] = Automerge.applyChanges(docB1, changes) - assert.deepEqual(docB2, doc2); - }) - it('handle text', () => { - let doc1 = Automerge.init() - let doc2 = Automerge.change(doc1, (d) => { - d.list = "hello" - Automerge.splice(d, "list", 2, 0, "Z") - }) - let changes = Automerge.getChanges(doc1, doc2) - let docB1 = Automerge.init() - ;let [docB2] = Automerge.applyChanges(docB1, changes) - assert.deepEqual(docB2, doc2); - }) - - it('handle non-text strings', () => { - let doc1 = WASM.create(); - doc1.put("_root", "text", "hello world"); - let doc2 = Automerge.load(doc1.save()) - assert.throws(() => { - Automerge.change(doc2, (d) => { Automerge.splice(d, "text", 1, 0, "Z") }) - }, /Cannot splice/) - }) - - it('have many list methods', () => { - let doc1 = Automerge.from({ list: [1,2,3] }) - assert.deepEqual(doc1, { list: [1,2,3] }); - let doc2 = Automerge.change(doc1, (d) => { - d.list.splice(1,1,9,10) - }) - assert.deepEqual(doc2, { list: [1,9,10,3] }); - let doc3 = Automerge.change(doc2, (d) => { - d.list.push(11,12) - }) - assert.deepEqual(doc3, { list: [1,9,10,3,11,12] }); - let doc4 = Automerge.change(doc3, (d) => { - d.list.unshift(2,2) - }) - assert.deepEqual(doc4, { list: [2,2,1,9,10,3,11,12] }); - let doc5 = Automerge.change(doc4, (d) => { - d.list.shift() - }) - assert.deepEqual(doc5, { list: [2,1,9,10,3,11,12] }); - let doc6 = Automerge.change(doc5, (d) => { - // @ts-ignore - d.list.insertAt(3,100,101) - }) - assert.deepEqual(doc6, { list: [2,1,9,100,101,10,3,11,12] }); - }) - - it('allows access to the backend', () => { - let doc = Automerge.init() - assert.deepEqual(Object.keys(Automerge.getBackend(doc)), ["ptr"]) - }) - - it('lists and text have indexof', () => { - let doc = Automerge.from({ list: [0,1,2,3,4,5,6], text: "hello world" }) - assert.deepEqual(doc.list.indexOf(5), 5) - assert.deepEqual(doc.text.indexOf("world"), 6) - }) + // this is only needed if weakrefs are not supported + Automerge.free(doc1) + Automerge.free(doc2) }) - describe('emptyChange', () => { - it('should generate a hash', () => { - let doc = Automerge.init() - doc = Automerge.change(doc, d => { - d.key = "value" - }) - Automerge.save(doc) - let headsBefore = Automerge.getHeads(doc) - headsBefore.sort() - doc = Automerge.emptyChange(doc, "empty change") - let headsAfter = Automerge.getHeads(doc) - headsAfter.sort() - assert.notDeepEqual(headsBefore, headsAfter) - }) + it("should be able to make a view with specifc heads", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => (d.value = 1)) + let heads2 = Automerge.getHeads(doc2) + let doc3 = Automerge.change(doc2, d => (d.value = 2)) + let doc2_v2 = Automerge.view(doc3, heads2) + assert.deepEqual(doc2, doc2_v2) + let doc2_v2_clone = Automerge.clone(doc2, "aabbcc") + assert.deepEqual(doc2, doc2_v2_clone) + assert.equal(Automerge.getActorId(doc2_v2_clone), "aabbcc") }) - describe('proxy lists', () => { - it('behave like arrays', () => { - let doc = Automerge.from({ - chars: ["a","b","c"], - numbers: [20,3,100], - repeats: [20,20,3,3,3,3,100,100] - }) - let r1: Array = [] - doc = Automerge.change(doc, (d) => { - assert.deepEqual((d.chars as any[]).concat([1,2]), ["a","b","c",1,2]) - assert.deepEqual(d.chars.map((n) => n + "!"), ["a!", "b!", "c!"]) - assert.deepEqual(d.numbers.map((n) => n + 10), [30, 13, 110]) - assert.deepEqual(d.numbers.toString(), "20,3,100") - assert.deepEqual(d.numbers.toLocaleString(), "20,3,100") - assert.deepEqual(d.numbers.forEach((n: number) => r1.push(n)), undefined) - assert.deepEqual(d.numbers.every((n) => n > 1), true) - assert.deepEqual(d.numbers.every((n) => n > 10), false) - assert.deepEqual(d.numbers.filter((n) => n > 10), [20,100]) - assert.deepEqual(d.repeats.find((n) => n < 10), 3) - assert.deepEqual(d.repeats.find((n) => n < 10), 3) - assert.deepEqual(d.repeats.find((n) => n < 0), undefined) - assert.deepEqual(d.repeats.findIndex((n) => n < 10), 2) - assert.deepEqual(d.repeats.findIndex((n) => n < 0), -1) - assert.deepEqual(d.repeats.findIndex((n) => n < 10), 2) - assert.deepEqual(d.repeats.findIndex((n) => n < 0), -1) - assert.deepEqual(d.numbers.includes(3), true) - assert.deepEqual(d.numbers.includes(-3), false) - assert.deepEqual(d.numbers.join("|"), "20|3|100") - assert.deepEqual(d.numbers.join(), "20,3,100") - assert.deepEqual(d.numbers.some((f) => f === 3), true) - assert.deepEqual(d.numbers.some((f) => f < 0), false) - assert.deepEqual(d.numbers.reduce((sum,n) => sum + n, 100), 223) - assert.deepEqual(d.repeats.reduce((sum,n) => sum + n, 100), 352) - assert.deepEqual(d.chars.reduce((sum,n) => sum + n, "="), "=abc") - assert.deepEqual(d.chars.reduceRight((sum,n) => sum + n, "="), "=cba") - assert.deepEqual(d.numbers.reduceRight((sum,n) => sum + n, 100), 223) - assert.deepEqual(d.repeats.lastIndexOf(3), 5) - assert.deepEqual(d.repeats.lastIndexOf(3,3), 3) - }) - doc = Automerge.change(doc, (d) => { - assert.deepEqual(d.numbers.fill(-1,1,2), [20,-1,100]) - assert.deepEqual(d.chars.fill("z",1,100), ["a","z","z"]) - }) - assert.deepEqual(r1, [20,3,100]) - assert.deepEqual(doc.numbers, [20,-1,100]) - assert.deepEqual(doc.chars, ["a","z","z"]) - }) - }) - - it('should obtain the same conflicts, regardless of merge order', () => { - let s1 = Automerge.init() - let s2 = Automerge.init() - s1 = Automerge.change(s1, doc => { doc.x = 1; doc.y = 2 }) - s2 = Automerge.change(s2, doc => { doc.x = 3; doc.y = 4 }) - const m1 = Automerge.merge(Automerge.clone(s1), Automerge.clone(s2)) - const m2 = Automerge.merge(Automerge.clone(s2), Automerge.clone(s1)) - assert.deepStrictEqual(Automerge.getConflicts(m1, 'x'), Automerge.getConflicts(m2, 'x')) + it("should allow you to change a clone of a view", () => { + let doc1 = Automerge.init() + doc1 = Automerge.change(doc1, d => (d.key = "value")) + let heads = Automerge.getHeads(doc1) + doc1 = Automerge.change(doc1, d => (d.key = "value2")) + let fork = Automerge.clone(Automerge.view(doc1, heads)) + assert.deepEqual(fork, { key: "value" }) + fork = Automerge.change(fork, d => (d.key = "value3")) + assert.deepEqual(fork, { key: "value3" }) }) - describe("getObjectId", () => { - let s1 = Automerge.from({ - "string": "string", - "number": 1, - "null": null, - "date": new Date(), - "counter": new Automerge.Counter(), - "bytes": new Uint8Array(10), - "text": "", - "list": [], - "map": {} - }) - - it("should return null for scalar values", () => { - assert.equal(Automerge.getObjectId(s1.string), null) - assert.equal(Automerge.getObjectId(s1.number), null) - assert.equal(Automerge.getObjectId(s1.null!), null) - assert.equal(Automerge.getObjectId(s1.date), null) - assert.equal(Automerge.getObjectId(s1.counter), null) - assert.equal(Automerge.getObjectId(s1.bytes), null) - }) - - it("should return _root for the root object", () => { - assert.equal(Automerge.getObjectId(s1), "_root") - }) - - it("should return non-null for map, list, text, and objects", () => { - assert.equal(Automerge.getObjectId(s1.text), null) - assert.notEqual(Automerge.getObjectId(s1.list), null) - assert.notEqual(Automerge.getObjectId(s1.map), null) + it("handle basic set and read on root object", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.hello = "world" + d.big = "little" + d.zip = "zop" + d.app = "dap" + assert.deepEqual(d, { + hello: "world", + big: "little", + zip: "zop", + app: "dap", }) + }) + assert.deepEqual(doc2, { + hello: "world", + big: "little", + zip: "zop", + app: "dap", + }) }) + + it("can detect an automerge doc with isAutomerge()", () => { + const doc1 = Automerge.from({ sub: { object: true } }) + assert(Automerge.isAutomerge(doc1)) + assert(!Automerge.isAutomerge(doc1.sub)) + assert(!Automerge.isAutomerge("String")) + assert(!Automerge.isAutomerge({ sub: { object: true } })) + assert(!Automerge.isAutomerge(undefined)) + const jsObj = Automerge.toJS(doc1) + assert(!Automerge.isAutomerge(jsObj)) + assert.deepEqual(jsObj, doc1) + }) + + it("it should recursively freeze the document if requested", () => { + let doc1 = Automerge.init({ freeze: true }) + let doc2 = Automerge.init() + + assert(Object.isFrozen(doc1)) + assert(!Object.isFrozen(doc2)) + + // will also freeze sub objects + doc1 = Automerge.change( + doc1, + doc => (doc.book = { title: "how to win friends" }) + ) + doc2 = Automerge.merge(doc2, doc1) + assert(Object.isFrozen(doc1)) + assert(Object.isFrozen(doc1.book)) + assert(!Object.isFrozen(doc2)) + assert(!Object.isFrozen(doc2.book)) + + // works on from + let doc3 = Automerge.from({ sub: { obj: "inner" } }, { freeze: true }) + assert(Object.isFrozen(doc3)) + assert(Object.isFrozen(doc3.sub)) + + // works on load + let doc4 = Automerge.load(Automerge.save(doc3), { freeze: true }) + assert(Object.isFrozen(doc4)) + assert(Object.isFrozen(doc4.sub)) + + // follows clone + let doc5 = Automerge.clone(doc4) + assert(Object.isFrozen(doc5)) + assert(Object.isFrozen(doc5.sub)) + + // toJS does not freeze + let exported = Automerge.toJS(doc5) + assert(!Object.isFrozen(exported)) + }) + + it("handle basic sets over many changes", () => { + let doc1 = Automerge.init() + let timestamp = new Date() + let counter = new Automerge.Counter(100) + let bytes = new Uint8Array([10, 11, 12]) + let doc2 = Automerge.change(doc1, d => { + d.hello = "world" + }) + let doc3 = Automerge.change(doc2, d => { + d.counter1 = counter + }) + let doc4 = Automerge.change(doc3, d => { + d.timestamp1 = timestamp + }) + let doc5 = Automerge.change(doc4, d => { + d.app = null + }) + let doc6 = Automerge.change(doc5, d => { + d.bytes1 = bytes + }) + let doc7 = Automerge.change(doc6, d => { + d.uint = new Automerge.Uint(1) + d.int = new Automerge.Int(-1) + d.float64 = new Automerge.Float64(5.5) + d.number1 = 100 + d.number2 = -45.67 + d.true = true + d.false = false + }) + + assert.deepEqual(doc7, { + hello: "world", + true: true, + false: false, + int: -1, + uint: 1, + float64: 5.5, + number1: 100, + number2: -45.67, + counter1: counter, + timestamp1: timestamp, + bytes1: bytes, + app: null, + }) + + let changes = Automerge.getAllChanges(doc7) + let t1 = Automerge.init() + let [t2] = Automerge.applyChanges(t1, changes) + assert.deepEqual(doc7, t2) + }) + + it("handle overwrites to values", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.hello = "world1" + }) + let doc3 = Automerge.change(doc2, d => { + d.hello = "world2" + }) + let doc4 = Automerge.change(doc3, d => { + d.hello = "world3" + }) + let doc5 = Automerge.change(doc4, d => { + d.hello = "world4" + }) + assert.deepEqual(doc5, { hello: "world4" }) + }) + + it("handle set with object value", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.subobj = { hello: "world", subsubobj: { zip: "zop" } } + }) + assert.deepEqual(doc2, { + subobj: { hello: "world", subsubobj: { zip: "zop" } }, + }) + }) + + it("handle simple list creation", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => (d.list = [])) + assert.deepEqual(doc2, { list: [] }) + }) + + it("handle simple lists", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.list = [1, 2, 3] + }) + assert.deepEqual(doc2.list.length, 3) + assert.deepEqual(doc2.list[0], 1) + assert.deepEqual(doc2.list[1], 2) + assert.deepEqual(doc2.list[2], 3) + assert.deepEqual(doc2, { list: [1, 2, 3] }) + // assert.deepStrictEqual(Automerge.toJS(doc2), { list: [1,2,3] }) + + let doc3 = Automerge.change(doc2, d => { + d.list[1] = "a" + }) + + assert.deepEqual(doc3.list.length, 3) + assert.deepEqual(doc3.list[0], 1) + assert.deepEqual(doc3.list[1], "a") + assert.deepEqual(doc3.list[2], 3) + assert.deepEqual(doc3, { list: [1, "a", 3] }) + }) + it("handle simple lists", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.list = [1, 2, 3] + }) + let changes = Automerge.getChanges(doc1, doc2) + let docB1 = Automerge.init() + let [docB2] = Automerge.applyChanges(docB1, changes) + assert.deepEqual(docB2, doc2) + }) + it("handle text", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.change(doc1, d => { + d.list = "hello" + Automerge.splice(d, "list", 2, 0, "Z") + }) + let changes = Automerge.getChanges(doc1, doc2) + let docB1 = Automerge.init() + let [docB2] = Automerge.applyChanges(docB1, changes) + assert.deepEqual(docB2, doc2) + }) + + it("handle non-text strings", () => { + let doc1 = WASM.create() + doc1.put("_root", "text", "hello world") + let doc2 = Automerge.load(doc1.save()) + assert.throws(() => { + Automerge.change(doc2, d => { + Automerge.splice(d, "text", 1, 0, "Z") + }) + }, /Cannot splice/) + }) + + it("have many list methods", () => { + let doc1 = Automerge.from({ list: [1, 2, 3] }) + assert.deepEqual(doc1, { list: [1, 2, 3] }) + let doc2 = Automerge.change(doc1, d => { + d.list.splice(1, 1, 9, 10) + }) + assert.deepEqual(doc2, { list: [1, 9, 10, 3] }) + let doc3 = Automerge.change(doc2, d => { + d.list.push(11, 12) + }) + assert.deepEqual(doc3, { list: [1, 9, 10, 3, 11, 12] }) + let doc4 = Automerge.change(doc3, d => { + d.list.unshift(2, 2) + }) + assert.deepEqual(doc4, { list: [2, 2, 1, 9, 10, 3, 11, 12] }) + let doc5 = Automerge.change(doc4, d => { + d.list.shift() + }) + assert.deepEqual(doc5, { list: [2, 1, 9, 10, 3, 11, 12] }) + let doc6 = Automerge.change(doc5, d => { + // @ts-ignore + d.list.insertAt(3, 100, 101) + }) + assert.deepEqual(doc6, { list: [2, 1, 9, 100, 101, 10, 3, 11, 12] }) + }) + + it("allows access to the backend", () => { + let doc = Automerge.init() + assert.deepEqual(Object.keys(Automerge.getBackend(doc)), ["ptr"]) + }) + + it("lists and text have indexof", () => { + let doc = Automerge.from({ + list: [0, 1, 2, 3, 4, 5, 6], + text: "hello world", + }) + assert.deepEqual(doc.list.indexOf(5), 5) + assert.deepEqual(doc.text.indexOf("world"), 6) + }) + }) + + describe("emptyChange", () => { + it("should generate a hash", () => { + let doc = Automerge.init() + doc = Automerge.change(doc, d => { + d.key = "value" + }) + Automerge.save(doc) + let headsBefore = Automerge.getHeads(doc) + headsBefore.sort() + doc = Automerge.emptyChange(doc, "empty change") + let headsAfter = Automerge.getHeads(doc) + headsAfter.sort() + assert.notDeepEqual(headsBefore, headsAfter) + }) + }) + + describe("proxy lists", () => { + it("behave like arrays", () => { + let doc = Automerge.from({ + chars: ["a", "b", "c"], + numbers: [20, 3, 100], + repeats: [20, 20, 3, 3, 3, 3, 100, 100], + }) + let r1: Array = [] + doc = Automerge.change(doc, d => { + assert.deepEqual((d.chars as any[]).concat([1, 2]), [ + "a", + "b", + "c", + 1, + 2, + ]) + assert.deepEqual( + d.chars.map(n => n + "!"), + ["a!", "b!", "c!"] + ) + assert.deepEqual( + d.numbers.map(n => n + 10), + [30, 13, 110] + ) + assert.deepEqual(d.numbers.toString(), "20,3,100") + assert.deepEqual(d.numbers.toLocaleString(), "20,3,100") + assert.deepEqual( + d.numbers.forEach((n: number) => r1.push(n)), + undefined + ) + assert.deepEqual( + d.numbers.every(n => n > 1), + true + ) + assert.deepEqual( + d.numbers.every(n => n > 10), + false + ) + assert.deepEqual( + d.numbers.filter(n => n > 10), + [20, 100] + ) + assert.deepEqual( + d.repeats.find(n => n < 10), + 3 + ) + assert.deepEqual( + d.repeats.find(n => n < 10), + 3 + ) + assert.deepEqual( + d.repeats.find(n => n < 0), + undefined + ) + assert.deepEqual( + d.repeats.findIndex(n => n < 10), + 2 + ) + assert.deepEqual( + d.repeats.findIndex(n => n < 0), + -1 + ) + assert.deepEqual( + d.repeats.findIndex(n => n < 10), + 2 + ) + assert.deepEqual( + d.repeats.findIndex(n => n < 0), + -1 + ) + assert.deepEqual(d.numbers.includes(3), true) + assert.deepEqual(d.numbers.includes(-3), false) + assert.deepEqual(d.numbers.join("|"), "20|3|100") + assert.deepEqual(d.numbers.join(), "20,3,100") + assert.deepEqual( + d.numbers.some(f => f === 3), + true + ) + assert.deepEqual( + d.numbers.some(f => f < 0), + false + ) + assert.deepEqual( + d.numbers.reduce((sum, n) => sum + n, 100), + 223 + ) + assert.deepEqual( + d.repeats.reduce((sum, n) => sum + n, 100), + 352 + ) + assert.deepEqual( + d.chars.reduce((sum, n) => sum + n, "="), + "=abc" + ) + assert.deepEqual( + d.chars.reduceRight((sum, n) => sum + n, "="), + "=cba" + ) + assert.deepEqual( + d.numbers.reduceRight((sum, n) => sum + n, 100), + 223 + ) + assert.deepEqual(d.repeats.lastIndexOf(3), 5) + assert.deepEqual(d.repeats.lastIndexOf(3, 3), 3) + }) + doc = Automerge.change(doc, d => { + assert.deepEqual(d.numbers.fill(-1, 1, 2), [20, -1, 100]) + assert.deepEqual(d.chars.fill("z", 1, 100), ["a", "z", "z"]) + }) + assert.deepEqual(r1, [20, 3, 100]) + assert.deepEqual(doc.numbers, [20, -1, 100]) + assert.deepEqual(doc.chars, ["a", "z", "z"]) + }) + }) + + it("should obtain the same conflicts, regardless of merge order", () => { + let s1 = Automerge.init() + let s2 = Automerge.init() + s1 = Automerge.change(s1, doc => { + doc.x = 1 + doc.y = 2 + }) + s2 = Automerge.change(s2, doc => { + doc.x = 3 + doc.y = 4 + }) + const m1 = Automerge.merge(Automerge.clone(s1), Automerge.clone(s2)) + const m2 = Automerge.merge(Automerge.clone(s2), Automerge.clone(s1)) + assert.deepStrictEqual( + Automerge.getConflicts(m1, "x"), + Automerge.getConflicts(m2, "x") + ) + }) + + describe("getObjectId", () => { + let s1 = Automerge.from({ + string: "string", + number: 1, + null: null, + date: new Date(), + counter: new Automerge.Counter(), + bytes: new Uint8Array(10), + text: "", + list: [], + map: {}, + }) + + it("should return null for scalar values", () => { + assert.equal(Automerge.getObjectId(s1.string), null) + assert.equal(Automerge.getObjectId(s1.number), null) + assert.equal(Automerge.getObjectId(s1.null!), null) + assert.equal(Automerge.getObjectId(s1.date), null) + assert.equal(Automerge.getObjectId(s1.counter), null) + assert.equal(Automerge.getObjectId(s1.bytes), null) + }) + + it("should return _root for the root object", () => { + assert.equal(Automerge.getObjectId(s1), "_root") + }) + + it("should return non-null for map, list, text, and objects", () => { + assert.equal(Automerge.getObjectId(s1.text), null) + assert.notEqual(Automerge.getObjectId(s1.list), null) + assert.notEqual(Automerge.getObjectId(s1.map), null) + }) + }) }) - diff --git a/javascript/test/extra_api_tests.ts b/javascript/test/extra_api_tests.ts index c0c18177..69932d1f 100644 --- a/javascript/test/extra_api_tests.ts +++ b/javascript/test/extra_api_tests.ts @@ -1,20 +1,28 @@ +import * as assert from "assert" +import * as Automerge from "../src" -import * as assert from 'assert' -import * as Automerge from '../src' - -describe('Automerge', () => { - describe('basics', () => { - it('should allow you to load incrementally', () => { - let doc1 = Automerge.from({ foo: "bar" }) - let doc2 = Automerge.init(); - doc2 = Automerge.loadIncremental(doc2, Automerge.save(doc1)) - doc1 = Automerge.change(doc1, (d) => d.foo2 = "bar2") - doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) - doc1 = Automerge.change(doc1, (d) => d.foo = "bar2") - doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) - doc1 = Automerge.change(doc1, (d) => d.x = "y") - doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) - assert.deepEqual(doc1,doc2) - }) +describe("Automerge", () => { + describe("basics", () => { + it("should allow you to load incrementally", () => { + let doc1 = Automerge.from({ foo: "bar" }) + let doc2 = Automerge.init() + doc2 = Automerge.loadIncremental(doc2, Automerge.save(doc1)) + doc1 = Automerge.change(doc1, d => (d.foo2 = "bar2")) + doc2 = Automerge.loadIncremental( + doc2, + Automerge.getBackend(doc1).saveIncremental() + ) + doc1 = Automerge.change(doc1, d => (d.foo = "bar2")) + doc2 = Automerge.loadIncremental( + doc2, + Automerge.getBackend(doc1).saveIncremental() + ) + doc1 = Automerge.change(doc1, d => (d.x = "y")) + doc2 = Automerge.loadIncremental( + doc2, + Automerge.getBackend(doc1).saveIncremental() + ) + assert.deepEqual(doc1, doc2) }) + }) }) diff --git a/javascript/test/helpers.ts b/javascript/test/helpers.ts index 7799cb84..df76e558 100644 --- a/javascript/test/helpers.ts +++ b/javascript/test/helpers.ts @@ -1,5 +1,5 @@ -import * as assert from 'assert' -import { Encoder } from './legacy/encoding' +import * as assert from "assert" +import { Encoder } from "./legacy/encoding" // Assertion that succeeds if the first argument deepStrictEquals at least one of the // subsequent arguments (but we don't care which one) @@ -11,7 +11,8 @@ export function assertEqualsOneOf(actual, ...expected) { return // if we get here without an exception, that means success } catch (e) { if (e instanceof assert.AssertionError) { - if (!e.name.match(/^AssertionError/) || i === expected.length - 1) throw e + if (!e.name.match(/^AssertionError/) || i === expected.length - 1) + throw e } else { throw e } @@ -24,9 +25,10 @@ export function assertEqualsOneOf(actual, ...expected) { * sequence as the array `bytes`. */ export function checkEncoded(encoder, bytes, detail?) { - const encoded = (encoder instanceof Encoder) ? encoder.buffer : encoder + const encoded = encoder instanceof Encoder ? encoder.buffer : encoder const expected = new Uint8Array(bytes) - const message = (detail ? `${detail}: ` : '') + `${encoded} expected to equal ${expected}` + const message = + (detail ? `${detail}: ` : "") + `${encoded} expected to equal ${expected}` assert(encoded.byteLength === expected.byteLength, message) for (let i = 0; i < encoded.byteLength; i++) { assert(encoded[i] === expected[i], message) diff --git a/javascript/test/legacy/columnar.js b/javascript/test/legacy/columnar.js index b97e6275..6a9b5874 100644 --- a/javascript/test/legacy/columnar.js +++ b/javascript/test/legacy/columnar.js @@ -1,9 +1,18 @@ -const pako = require('pako') -const { copyObject, parseOpId, equalBytes } = require('./common') +const pako = require("pako") +const { copyObject, parseOpId, equalBytes } = require("./common") const { - utf8ToString, hexStringToBytes, bytesToHexString, - Encoder, Decoder, RLEEncoder, RLEDecoder, DeltaEncoder, DeltaDecoder, BooleanEncoder, BooleanDecoder -} = require('./encoding') + utf8ToString, + hexStringToBytes, + bytesToHexString, + Encoder, + Decoder, + RLEEncoder, + RLEDecoder, + DeltaEncoder, + DeltaDecoder, + BooleanEncoder, + BooleanDecoder, +} = require("./encoding") // Maybe we should be using the platform's built-in hash implementation? // Node has the crypto module: https://nodejs.org/api/crypto.html and browsers have @@ -18,7 +27,7 @@ const { // - It does not need a secure source of random bits and does not need to be // constant-time; // - I have reviewed the source code and it seems pretty reasonable. -const { Hash } = require('fast-sha256') +const { Hash } = require("fast-sha256") // These bytes don't mean anything, they were generated randomly const MAGIC_BYTES = new Uint8Array([0x85, 0x6f, 0x4a, 0x83]) @@ -33,8 +42,14 @@ const DEFLATE_MIN_SIZE = 256 // The least-significant 3 bits of a columnId indicate its datatype const COLUMN_TYPE = { - GROUP_CARD: 0, ACTOR_ID: 1, INT_RLE: 2, INT_DELTA: 3, BOOLEAN: 4, - STRING_RLE: 5, VALUE_LEN: 6, VALUE_RAW: 7 + GROUP_CARD: 0, + ACTOR_ID: 1, + INT_RLE: 2, + INT_DELTA: 3, + BOOLEAN: 4, + STRING_RLE: 5, + VALUE_LEN: 6, + VALUE_RAW: 7, } // The 4th-least-significant bit of a columnId is set if the column is DEFLATE-compressed @@ -44,53 +59,77 @@ const COLUMN_TYPE_DEFLATE = 8 // one of the following types in VALUE_TYPE. The higher bits indicate the length of the value in the // associated VALUE_RAW column (in bytes). const VALUE_TYPE = { - NULL: 0, FALSE: 1, TRUE: 2, LEB128_UINT: 3, LEB128_INT: 4, IEEE754: 5, - UTF8: 6, BYTES: 7, COUNTER: 8, TIMESTAMP: 9, MIN_UNKNOWN: 10, MAX_UNKNOWN: 15 + NULL: 0, + FALSE: 1, + TRUE: 2, + LEB128_UINT: 3, + LEB128_INT: 4, + IEEE754: 5, + UTF8: 6, + BYTES: 7, + COUNTER: 8, + TIMESTAMP: 9, + MIN_UNKNOWN: 10, + MAX_UNKNOWN: 15, } // make* actions must be at even-numbered indexes in this list -const ACTIONS = ['makeMap', 'set', 'makeList', 'del', 'makeText', 'inc', 'makeTable', 'link'] +const ACTIONS = [ + "makeMap", + "set", + "makeList", + "del", + "makeText", + "inc", + "makeTable", + "link", +] -const OBJECT_TYPE = {makeMap: 'map', makeList: 'list', makeText: 'text', makeTable: 'table'} +const OBJECT_TYPE = { + makeMap: "map", + makeList: "list", + makeText: "text", + makeTable: "table", +} const COMMON_COLUMNS = [ - {columnName: 'objActor', columnId: 0 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'objCtr', columnId: 0 << 4 | COLUMN_TYPE.INT_RLE}, - {columnName: 'keyActor', columnId: 1 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'keyCtr', columnId: 1 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'keyStr', columnId: 1 << 4 | COLUMN_TYPE.STRING_RLE}, - {columnName: 'idActor', columnId: 2 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'idCtr', columnId: 2 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'insert', columnId: 3 << 4 | COLUMN_TYPE.BOOLEAN}, - {columnName: 'action', columnId: 4 << 4 | COLUMN_TYPE.INT_RLE}, - {columnName: 'valLen', columnId: 5 << 4 | COLUMN_TYPE.VALUE_LEN}, - {columnName: 'valRaw', columnId: 5 << 4 | COLUMN_TYPE.VALUE_RAW}, - {columnName: 'chldActor', columnId: 6 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'chldCtr', columnId: 6 << 4 | COLUMN_TYPE.INT_DELTA} + { columnName: "objActor", columnId: (0 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "objCtr", columnId: (0 << 4) | COLUMN_TYPE.INT_RLE }, + { columnName: "keyActor", columnId: (1 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "keyCtr", columnId: (1 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "keyStr", columnId: (1 << 4) | COLUMN_TYPE.STRING_RLE }, + { columnName: "idActor", columnId: (2 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "idCtr", columnId: (2 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "insert", columnId: (3 << 4) | COLUMN_TYPE.BOOLEAN }, + { columnName: "action", columnId: (4 << 4) | COLUMN_TYPE.INT_RLE }, + { columnName: "valLen", columnId: (5 << 4) | COLUMN_TYPE.VALUE_LEN }, + { columnName: "valRaw", columnId: (5 << 4) | COLUMN_TYPE.VALUE_RAW }, + { columnName: "chldActor", columnId: (6 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "chldCtr", columnId: (6 << 4) | COLUMN_TYPE.INT_DELTA }, ] const CHANGE_COLUMNS = COMMON_COLUMNS.concat([ - {columnName: 'predNum', columnId: 7 << 4 | COLUMN_TYPE.GROUP_CARD}, - {columnName: 'predActor', columnId: 7 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'predCtr', columnId: 7 << 4 | COLUMN_TYPE.INT_DELTA} + { columnName: "predNum", columnId: (7 << 4) | COLUMN_TYPE.GROUP_CARD }, + { columnName: "predActor", columnId: (7 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "predCtr", columnId: (7 << 4) | COLUMN_TYPE.INT_DELTA }, ]) const DOC_OPS_COLUMNS = COMMON_COLUMNS.concat([ - {columnName: 'succNum', columnId: 8 << 4 | COLUMN_TYPE.GROUP_CARD}, - {columnName: 'succActor', columnId: 8 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'succCtr', columnId: 8 << 4 | COLUMN_TYPE.INT_DELTA} + { columnName: "succNum", columnId: (8 << 4) | COLUMN_TYPE.GROUP_CARD }, + { columnName: "succActor", columnId: (8 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "succCtr", columnId: (8 << 4) | COLUMN_TYPE.INT_DELTA }, ]) const DOCUMENT_COLUMNS = [ - {columnName: 'actor', columnId: 0 << 4 | COLUMN_TYPE.ACTOR_ID}, - {columnName: 'seq', columnId: 0 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'maxOp', columnId: 1 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'time', columnId: 2 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'message', columnId: 3 << 4 | COLUMN_TYPE.STRING_RLE}, - {columnName: 'depsNum', columnId: 4 << 4 | COLUMN_TYPE.GROUP_CARD}, - {columnName: 'depsIndex', columnId: 4 << 4 | COLUMN_TYPE.INT_DELTA}, - {columnName: 'extraLen', columnId: 5 << 4 | COLUMN_TYPE.VALUE_LEN}, - {columnName: 'extraRaw', columnId: 5 << 4 | COLUMN_TYPE.VALUE_RAW} + { columnName: "actor", columnId: (0 << 4) | COLUMN_TYPE.ACTOR_ID }, + { columnName: "seq", columnId: (0 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "maxOp", columnId: (1 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "time", columnId: (2 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "message", columnId: (3 << 4) | COLUMN_TYPE.STRING_RLE }, + { columnName: "depsNum", columnId: (4 << 4) | COLUMN_TYPE.GROUP_CARD }, + { columnName: "depsIndex", columnId: (4 << 4) | COLUMN_TYPE.INT_DELTA }, + { columnName: "extraLen", columnId: (5 << 4) | COLUMN_TYPE.VALUE_LEN }, + { columnName: "extraRaw", columnId: (5 << 4) | COLUMN_TYPE.VALUE_RAW }, ] /** @@ -102,8 +141,8 @@ function actorIdToActorNum(opId, actorIds) { if (!opId || !opId.actorId) return opId const counter = opId.counter const actorNum = actorIds.indexOf(opId.actorId) - if (actorNum < 0) throw new RangeError('missing actorId') // should not happen - return {counter, actorNum, actorId: opId.actorId} + if (actorNum < 0) throw new RangeError("missing actorId") // should not happen + return { counter, actorNum, actorId: opId.actorId } } /** @@ -131,15 +170,16 @@ function compareParsedOpIds(id1, id2) { * false. */ function parseAllOpIds(changes, single) { - const actors = {}, newChanges = [] + const actors = {}, + newChanges = [] for (let change of changes) { change = copyObject(change) actors[change.actor] = true change.ops = expandMultiOps(change.ops, change.startOp, change.actor) change.ops = change.ops.map(op => { op = copyObject(op) - if (op.obj !== '_root') op.obj = parseOpId(op.obj) - if (op.elemId && op.elemId !== '_head') op.elemId = parseOpId(op.elemId) + if (op.obj !== "_root") op.obj = parseOpId(op.obj) + if (op.elemId && op.elemId !== "_head") op.elemId = parseOpId(op.elemId) if (op.child) op.child = parseOpId(op.child) if (op.pred) op.pred = op.pred.map(parseOpId) if (op.obj.actorId) actors[op.obj.actorId] = true @@ -153,20 +193,26 @@ function parseAllOpIds(changes, single) { let actorIds = Object.keys(actors).sort() if (single) { - actorIds = [changes[0].actor].concat(actorIds.filter(actor => actor !== changes[0].actor)) + actorIds = [changes[0].actor].concat( + actorIds.filter(actor => actor !== changes[0].actor) + ) } for (let change of newChanges) { change.actorNum = actorIds.indexOf(change.actor) for (let i = 0; i < change.ops.length; i++) { let op = change.ops[i] - op.id = {counter: change.startOp + i, actorNum: change.actorNum, actorId: change.actor} + op.id = { + counter: change.startOp + i, + actorNum: change.actorNum, + actorId: change.actor, + } op.obj = actorIdToActorNum(op.obj, actorIds) op.elemId = actorIdToActorNum(op.elemId, actorIds) op.child = actorIdToActorNum(op.child, actorIds) op.pred = op.pred.map(pred => actorIdToActorNum(pred, actorIds)) } } - return {changes: newChanges, actorIds} + return { changes: newChanges, actorIds } } /** @@ -174,14 +220,16 @@ function parseAllOpIds(changes, single) { * `objActor` and `objCtr`. */ function encodeObjectId(op, columns) { - if (op.obj === '_root') { + if (op.obj === "_root") { columns.objActor.appendValue(null) columns.objCtr.appendValue(null) } else if (op.obj.actorNum >= 0 && op.obj.counter > 0) { columns.objActor.appendValue(op.obj.actorNum) columns.objCtr.appendValue(op.obj.counter) } else { - throw new RangeError(`Unexpected objectId reference: ${JSON.stringify(op.obj)}`) + throw new RangeError( + `Unexpected objectId reference: ${JSON.stringify(op.obj)}` + ) } } @@ -194,7 +242,7 @@ function encodeOperationKey(op, columns) { columns.keyActor.appendValue(null) columns.keyCtr.appendValue(null) columns.keyStr.appendValue(op.key) - } else if (op.elemId === '_head' && op.insert) { + } else if (op.elemId === "_head" && op.insert) { columns.keyActor.appendValue(null) columns.keyCtr.appendValue(0) columns.keyStr.appendValue(null) @@ -214,7 +262,7 @@ function encodeOperationAction(op, columns) { const actionCode = ACTIONS.indexOf(op.action) if (actionCode >= 0) { columns.action.appendValue(actionCode) - } else if (typeof op.action === 'number') { + } else if (typeof op.action === "number") { columns.action.appendValue(op.action) } else { throw new RangeError(`Unexpected operation action: ${op.action}`) @@ -228,26 +276,32 @@ function encodeOperationAction(op, columns) { function getNumberTypeAndValue(op) { switch (op.datatype) { case "counter": - return [ VALUE_TYPE.COUNTER, op.value ] + return [VALUE_TYPE.COUNTER, op.value] case "timestamp": - return [ VALUE_TYPE.TIMESTAMP, op.value ] + return [VALUE_TYPE.TIMESTAMP, op.value] case "uint": - return [ VALUE_TYPE.LEB128_UINT, op.value ] + return [VALUE_TYPE.LEB128_UINT, op.value] case "int": - return [ VALUE_TYPE.LEB128_INT, op.value ] + return [VALUE_TYPE.LEB128_INT, op.value] case "float64": { - const buf64 = new ArrayBuffer(8), view64 = new DataView(buf64) + const buf64 = new ArrayBuffer(8), + view64 = new DataView(buf64) view64.setFloat64(0, op.value, true) - return [ VALUE_TYPE.IEEE754, new Uint8Array(buf64) ] + return [VALUE_TYPE.IEEE754, new Uint8Array(buf64)] } default: // increment operators get resolved here ... - if (Number.isInteger(op.value) && op.value <= Number.MAX_SAFE_INTEGER && op.value >= Number.MIN_SAFE_INTEGER) { - return [ VALUE_TYPE.LEB128_INT, op.value ] + if ( + Number.isInteger(op.value) && + op.value <= Number.MAX_SAFE_INTEGER && + op.value >= Number.MIN_SAFE_INTEGER + ) { + return [VALUE_TYPE.LEB128_INT, op.value] } else { - const buf64 = new ArrayBuffer(8), view64 = new DataView(buf64) + const buf64 = new ArrayBuffer(8), + view64 = new DataView(buf64) view64.setFloat64(0, op.value, true) - return [ VALUE_TYPE.IEEE754, new Uint8Array(buf64) ] + return [VALUE_TYPE.IEEE754, new Uint8Array(buf64)] } } } @@ -257,19 +311,21 @@ function getNumberTypeAndValue(op) { * `valLen` and `valRaw`. */ function encodeValue(op, columns) { - if ((op.action !== 'set' && op.action !== 'inc') || op.value === null) { + if ((op.action !== "set" && op.action !== "inc") || op.value === null) { columns.valLen.appendValue(VALUE_TYPE.NULL) } else if (op.value === false) { columns.valLen.appendValue(VALUE_TYPE.FALSE) } else if (op.value === true) { columns.valLen.appendValue(VALUE_TYPE.TRUE) - } else if (typeof op.value === 'string') { + } else if (typeof op.value === "string") { const numBytes = columns.valRaw.appendRawString(op.value) - columns.valLen.appendValue(numBytes << 4 | VALUE_TYPE.UTF8) + columns.valLen.appendValue((numBytes << 4) | VALUE_TYPE.UTF8) } else if (ArrayBuffer.isView(op.value)) { - const numBytes = columns.valRaw.appendRawBytes(new Uint8Array(op.value.buffer)) - columns.valLen.appendValue(numBytes << 4 | VALUE_TYPE.BYTES) - } else if (typeof op.value === 'number') { + const numBytes = columns.valRaw.appendRawBytes( + new Uint8Array(op.value.buffer) + ) + columns.valLen.appendValue((numBytes << 4) | VALUE_TYPE.BYTES) + } else if (typeof op.value === "number") { let [typeTag, value] = getNumberTypeAndValue(op) let numBytes if (typeTag === VALUE_TYPE.LEB128_UINT) { @@ -279,13 +335,19 @@ function encodeValue(op, columns) { } else { numBytes = columns.valRaw.appendInt53(value) } - columns.valLen.appendValue(numBytes << 4 | typeTag) - } else if (typeof op.datatype === 'number' && op.datatype >= VALUE_TYPE.MIN_UNKNOWN && - op.datatype <= VALUE_TYPE.MAX_UNKNOWN && op.value instanceof Uint8Array) { + columns.valLen.appendValue((numBytes << 4) | typeTag) + } else if ( + typeof op.datatype === "number" && + op.datatype >= VALUE_TYPE.MIN_UNKNOWN && + op.datatype <= VALUE_TYPE.MAX_UNKNOWN && + op.value instanceof Uint8Array + ) { const numBytes = columns.valRaw.appendRawBytes(op.value) - columns.valLen.appendValue(numBytes << 4 | op.datatype) + columns.valLen.appendValue((numBytes << 4) | op.datatype) } else if (op.datatype) { - throw new RangeError(`Unknown datatype ${op.datatype} for value ${op.value}`) + throw new RangeError( + `Unknown datatype ${op.datatype} for value ${op.value}` + ) } else { throw new RangeError(`Unsupported value in operation: ${op.value}`) } @@ -299,31 +361,37 @@ function encodeValue(op, columns) { */ function decodeValue(sizeTag, bytes) { if (sizeTag === VALUE_TYPE.NULL) { - return {value: null} + return { value: null } } else if (sizeTag === VALUE_TYPE.FALSE) { - return {value: false} + return { value: false } } else if (sizeTag === VALUE_TYPE.TRUE) { - return {value: true} + return { value: true } } else if (sizeTag % 16 === VALUE_TYPE.UTF8) { - return {value: utf8ToString(bytes)} + return { value: utf8ToString(bytes) } } else { if (sizeTag % 16 === VALUE_TYPE.LEB128_UINT) { - return {value: new Decoder(bytes).readUint53(), datatype: "uint"} + return { value: new Decoder(bytes).readUint53(), datatype: "uint" } } else if (sizeTag % 16 === VALUE_TYPE.LEB128_INT) { - return {value: new Decoder(bytes).readInt53(), datatype: "int"} + return { value: new Decoder(bytes).readInt53(), datatype: "int" } } else if (sizeTag % 16 === VALUE_TYPE.IEEE754) { - const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + const view = new DataView( + bytes.buffer, + bytes.byteOffset, + bytes.byteLength + ) if (bytes.byteLength === 8) { - return {value: view.getFloat64(0, true), datatype: "float64"} + return { value: view.getFloat64(0, true), datatype: "float64" } } else { - throw new RangeError(`Invalid length for floating point number: ${bytes.byteLength}`) + throw new RangeError( + `Invalid length for floating point number: ${bytes.byteLength}` + ) } } else if (sizeTag % 16 === VALUE_TYPE.COUNTER) { - return {value: new Decoder(bytes).readInt53(), datatype: 'counter'} + return { value: new Decoder(bytes).readInt53(), datatype: "counter" } } else if (sizeTag % 16 === VALUE_TYPE.TIMESTAMP) { - return {value: new Decoder(bytes).readInt53(), datatype: 'timestamp'} + return { value: new Decoder(bytes).readInt53(), datatype: "timestamp" } } else { - return {value: bytes, datatype: sizeTag % 16} + return { value: bytes, datatype: sizeTag % 16 } } } } @@ -338,20 +406,24 @@ function decodeValue(sizeTag, bytes) { */ function decodeValueColumns(columns, colIndex, actorIds, result) { const { columnId, columnName, decoder } = columns[colIndex] - if (columnId % 8 === COLUMN_TYPE.VALUE_LEN && colIndex + 1 < columns.length && - columns[colIndex + 1].columnId === columnId + 1) { + if ( + columnId % 8 === COLUMN_TYPE.VALUE_LEN && + colIndex + 1 < columns.length && + columns[colIndex + 1].columnId === columnId + 1 + ) { const sizeTag = decoder.readValue() const rawValue = columns[colIndex + 1].decoder.readRawBytes(sizeTag >> 4) const { value, datatype } = decodeValue(sizeTag, rawValue) result[columnName] = value - if (datatype) result[columnName + '_datatype'] = datatype + if (datatype) result[columnName + "_datatype"] = datatype return 2 } else if (columnId % 8 === COLUMN_TYPE.ACTOR_ID) { const actorNum = decoder.readValue() if (actorNum === null) { result[columnName] = null } else { - if (!actorIds[actorNum]) throw new RangeError(`No actor index ${actorNum}`) + if (!actorIds[actorNum]) + throw new RangeError(`No actor index ${actorNum}`) result[columnName] = actorIds[actorNum] } } else { @@ -369,29 +441,29 @@ function decodeValueColumns(columns, colIndex, actorIds, result) { */ function encodeOps(ops, forDocument) { const columns = { - objActor : new RLEEncoder('uint'), - objCtr : new RLEEncoder('uint'), - keyActor : new RLEEncoder('uint'), - keyCtr : new DeltaEncoder(), - keyStr : new RLEEncoder('utf8'), - insert : new BooleanEncoder(), - action : new RLEEncoder('uint'), - valLen : new RLEEncoder('uint'), - valRaw : new Encoder(), - chldActor : new RLEEncoder('uint'), - chldCtr : new DeltaEncoder() + objActor: new RLEEncoder("uint"), + objCtr: new RLEEncoder("uint"), + keyActor: new RLEEncoder("uint"), + keyCtr: new DeltaEncoder(), + keyStr: new RLEEncoder("utf8"), + insert: new BooleanEncoder(), + action: new RLEEncoder("uint"), + valLen: new RLEEncoder("uint"), + valRaw: new Encoder(), + chldActor: new RLEEncoder("uint"), + chldCtr: new DeltaEncoder(), } if (forDocument) { - columns.idActor = new RLEEncoder('uint') - columns.idCtr = new DeltaEncoder() - columns.succNum = new RLEEncoder('uint') - columns.succActor = new RLEEncoder('uint') - columns.succCtr = new DeltaEncoder() + columns.idActor = new RLEEncoder("uint") + columns.idCtr = new DeltaEncoder() + columns.succNum = new RLEEncoder("uint") + columns.succActor = new RLEEncoder("uint") + columns.succCtr = new DeltaEncoder() } else { - columns.predNum = new RLEEncoder('uint') - columns.predCtr = new DeltaEncoder() - columns.predActor = new RLEEncoder('uint') + columns.predNum = new RLEEncoder("uint") + columns.predCtr = new DeltaEncoder() + columns.predActor = new RLEEncoder("uint") } for (let op of ops) { @@ -429,17 +501,22 @@ function encodeOps(ops, forDocument) { } let columnList = [] - for (let {columnName, columnId} of forDocument ? DOC_OPS_COLUMNS : CHANGE_COLUMNS) { - if (columns[columnName]) columnList.push({columnId, columnName, encoder: columns[columnName]}) + for (let { columnName, columnId } of forDocument + ? DOC_OPS_COLUMNS + : CHANGE_COLUMNS) { + if (columns[columnName]) + columnList.push({ columnId, columnName, encoder: columns[columnName] }) } return columnList.sort((a, b) => a.columnId - b.columnId) } function validDatatype(value, datatype) { if (datatype === undefined) { - return (typeof value === 'string' || typeof value === 'boolean' || value === null) + return ( + typeof value === "string" || typeof value === "boolean" || value === null + ) } else { - return typeof value === 'number' + return typeof value === "number" } } @@ -447,23 +524,37 @@ function expandMultiOps(ops, startOp, actor) { let opNum = startOp let expandedOps = [] for (const op of ops) { - if (op.action === 'set' && op.values && op.insert) { - if (op.pred.length !== 0) throw new RangeError('multi-insert pred must be empty') + if (op.action === "set" && op.values && op.insert) { + if (op.pred.length !== 0) + throw new RangeError("multi-insert pred must be empty") let lastElemId = op.elemId const datatype = op.datatype for (const value of op.values) { - if (!validDatatype(value, datatype)) throw new RangeError(`Decode failed: bad value/datatype association (${value},${datatype})`) - expandedOps.push({action: 'set', obj: op.obj, elemId: lastElemId, datatype, value, pred: [], insert: true}) + if (!validDatatype(value, datatype)) + throw new RangeError( + `Decode failed: bad value/datatype association (${value},${datatype})` + ) + expandedOps.push({ + action: "set", + obj: op.obj, + elemId: lastElemId, + datatype, + value, + pred: [], + insert: true, + }) lastElemId = `${opNum}@${actor}` opNum += 1 } - } else if (op.action === 'del' && op.multiOp > 1) { - if (op.pred.length !== 1) throw new RangeError('multiOp deletion must have exactly one pred') - const startElemId = parseOpId(op.elemId), startPred = parseOpId(op.pred[0]) + } else if (op.action === "del" && op.multiOp > 1) { + if (op.pred.length !== 1) + throw new RangeError("multiOp deletion must have exactly one pred") + const startElemId = parseOpId(op.elemId), + startPred = parseOpId(op.pred[0]) for (let i = 0; i < op.multiOp; i++) { const elemId = `${startElemId.counter + i}@${startElemId.actorId}` const pred = [`${startPred.counter + i}@${startPred.actorId}`] - expandedOps.push({action: 'del', obj: op.obj, elemId, pred}) + expandedOps.push({ action: "del", obj: op.obj, elemId, pred }) opNum += 1 } } else { @@ -483,26 +574,44 @@ function expandMultiOps(ops, startOp, actor) { function decodeOps(ops, forDocument) { const newOps = [] for (let op of ops) { - const obj = (op.objCtr === null) ? '_root' : `${op.objCtr}@${op.objActor}` - const elemId = op.keyStr ? undefined : (op.keyCtr === 0 ? '_head' : `${op.keyCtr}@${op.keyActor}`) + const obj = op.objCtr === null ? "_root" : `${op.objCtr}@${op.objActor}` + const elemId = op.keyStr + ? undefined + : op.keyCtr === 0 + ? "_head" + : `${op.keyCtr}@${op.keyActor}` const action = ACTIONS[op.action] || op.action - const newOp = elemId ? {obj, elemId, action} : {obj, key: op.keyStr, action} + const newOp = elemId + ? { obj, elemId, action } + : { obj, key: op.keyStr, action } newOp.insert = !!op.insert - if (ACTIONS[op.action] === 'set' || ACTIONS[op.action] === 'inc') { + if (ACTIONS[op.action] === "set" || ACTIONS[op.action] === "inc") { newOp.value = op.valLen if (op.valLen_datatype) newOp.datatype = op.valLen_datatype } if (!!op.chldCtr !== !!op.chldActor) { - throw new RangeError(`Mismatched child columns: ${op.chldCtr} and ${op.chldActor}`) + throw new RangeError( + `Mismatched child columns: ${op.chldCtr} and ${op.chldActor}` + ) } if (op.chldCtr !== null) newOp.child = `${op.chldCtr}@${op.chldActor}` if (forDocument) { newOp.id = `${op.idCtr}@${op.idActor}` newOp.succ = op.succNum.map(succ => `${succ.succCtr}@${succ.succActor}`) - checkSortedOpIds(op.succNum.map(succ => ({counter: succ.succCtr, actorId: succ.succActor}))) + checkSortedOpIds( + op.succNum.map(succ => ({ + counter: succ.succCtr, + actorId: succ.succActor, + })) + ) } else { newOp.pred = op.predNum.map(pred => `${pred.predCtr}@${pred.predActor}`) - checkSortedOpIds(op.predNum.map(pred => ({counter: pred.predCtr, actorId: pred.predActor}))) + checkSortedOpIds( + op.predNum.map(pred => ({ + counter: pred.predCtr, + actorId: pred.predActor, + })) + ) } newOps.push(newOp) } @@ -516,7 +625,7 @@ function checkSortedOpIds(opIds) { let last = null for (let opId of opIds) { if (last && compareParsedOpIds(last, opId) !== -1) { - throw new RangeError('operation IDs are not in ascending order') + throw new RangeError("operation IDs are not in ascending order") } last = opId } @@ -528,11 +637,11 @@ function encoderByColumnId(columnId) { } else if ((columnId & 7) === COLUMN_TYPE.BOOLEAN) { return new BooleanEncoder() } else if ((columnId & 7) === COLUMN_TYPE.STRING_RLE) { - return new RLEEncoder('utf8') + return new RLEEncoder("utf8") } else if ((columnId & 7) === COLUMN_TYPE.VALUE_RAW) { return new Encoder() } else { - return new RLEEncoder('uint') + return new RLEEncoder("uint") } } @@ -542,31 +651,49 @@ function decoderByColumnId(columnId, buffer) { } else if ((columnId & 7) === COLUMN_TYPE.BOOLEAN) { return new BooleanDecoder(buffer) } else if ((columnId & 7) === COLUMN_TYPE.STRING_RLE) { - return new RLEDecoder('utf8', buffer) + return new RLEDecoder("utf8", buffer) } else if ((columnId & 7) === COLUMN_TYPE.VALUE_RAW) { return new Decoder(buffer) } else { - return new RLEDecoder('uint', buffer) + return new RLEDecoder("uint", buffer) } } function makeDecoders(columns, columnSpec) { const emptyBuf = new Uint8Array(0) - let decoders = [], columnIndex = 0, specIndex = 0 + let decoders = [], + columnIndex = 0, + specIndex = 0 while (columnIndex < columns.length || specIndex < columnSpec.length) { - if (columnIndex === columns.length || - (specIndex < columnSpec.length && columnSpec[specIndex].columnId < columns[columnIndex].columnId)) { - const {columnId, columnName} = columnSpec[specIndex] - decoders.push({columnId, columnName, decoder: decoderByColumnId(columnId, emptyBuf)}) + if ( + columnIndex === columns.length || + (specIndex < columnSpec.length && + columnSpec[specIndex].columnId < columns[columnIndex].columnId) + ) { + const { columnId, columnName } = columnSpec[specIndex] + decoders.push({ + columnId, + columnName, + decoder: decoderByColumnId(columnId, emptyBuf), + }) specIndex++ - } else if (specIndex === columnSpec.length || columns[columnIndex].columnId < columnSpec[specIndex].columnId) { - const {columnId, buffer} = columns[columnIndex] - decoders.push({columnId, decoder: decoderByColumnId(columnId, buffer)}) + } else if ( + specIndex === columnSpec.length || + columns[columnIndex].columnId < columnSpec[specIndex].columnId + ) { + const { columnId, buffer } = columns[columnIndex] + decoders.push({ columnId, decoder: decoderByColumnId(columnId, buffer) }) columnIndex++ - } else { // columns[columnIndex].columnId === columnSpec[specIndex].columnId - const {columnId, buffer} = columns[columnIndex], {columnName} = columnSpec[specIndex] - decoders.push({columnId, columnName, decoder: decoderByColumnId(columnId, buffer)}) + } else { + // columns[columnIndex].columnId === columnSpec[specIndex].columnId + const { columnId, buffer } = columns[columnIndex], + { columnName } = columnSpec[specIndex] + decoders.push({ + columnId, + columnName, + decoder: decoderByColumnId(columnId, buffer), + }) columnIndex++ specIndex++ } @@ -578,16 +705,22 @@ function decodeColumns(columns, actorIds, columnSpec) { columns = makeDecoders(columns, columnSpec) let parsedRows = [] while (columns.some(col => !col.decoder.done)) { - let row = {}, col = 0 + let row = {}, + col = 0 while (col < columns.length) { const columnId = columns[col].columnId - let groupId = columnId >> 4, groupCols = 1 - while (col + groupCols < columns.length && columns[col + groupCols].columnId >> 4 === groupId) { + let groupId = columnId >> 4, + groupCols = 1 + while ( + col + groupCols < columns.length && + columns[col + groupCols].columnId >> 4 === groupId + ) { groupCols++ } if (columnId % 8 === COLUMN_TYPE.GROUP_CARD) { - const values = [], count = columns[col].decoder.readValue() + const values = [], + count = columns[col].decoder.readValue() for (let i = 0; i < count; i++) { let value = {} for (let colOffset = 1; colOffset < groupCols; colOffset++) { @@ -611,20 +744,25 @@ function decodeColumnInfo(decoder) { // deflate-compressed. We ignore this bit when checking whether columns are sorted by ID. const COLUMN_ID_MASK = (-1 ^ COLUMN_TYPE_DEFLATE) >>> 0 - let lastColumnId = -1, columns = [], numColumns = decoder.readUint53() + let lastColumnId = -1, + columns = [], + numColumns = decoder.readUint53() for (let i = 0; i < numColumns; i++) { - const columnId = decoder.readUint53(), bufferLen = decoder.readUint53() + const columnId = decoder.readUint53(), + bufferLen = decoder.readUint53() if ((columnId & COLUMN_ID_MASK) <= (lastColumnId & COLUMN_ID_MASK)) { - throw new RangeError('Columns must be in ascending order') + throw new RangeError("Columns must be in ascending order") } lastColumnId = columnId - columns.push({columnId, bufferLen}) + columns.push({ columnId, bufferLen }) } return columns } function encodeColumnInfo(encoder, columns) { - const nonEmptyColumns = columns.filter(column => column.encoder.buffer.byteLength > 0) + const nonEmptyColumns = columns.filter( + column => column.encoder.buffer.byteLength > 0 + ) encoder.appendUint53(nonEmptyColumns.length) for (let column of nonEmptyColumns) { encoder.appendUint53(column.columnId) @@ -633,19 +771,21 @@ function encodeColumnInfo(encoder, columns) { } function decodeChangeHeader(decoder) { - const numDeps = decoder.readUint53(), deps = [] + const numDeps = decoder.readUint53(), + deps = [] for (let i = 0; i < numDeps; i++) { deps.push(bytesToHexString(decoder.readRawBytes(32))) } let change = { - actor: decoder.readHexString(), - seq: decoder.readUint53(), + actor: decoder.readHexString(), + seq: decoder.readUint53(), startOp: decoder.readUint53(), - time: decoder.readInt53(), + time: decoder.readInt53(), message: decoder.readPrefixedString(), - deps + deps, } - const actorIds = [change.actor], numActorIds = decoder.readUint53() + const actorIds = [change.actor], + numActorIds = decoder.readUint53() for (let i = 0; i < numActorIds; i++) actorIds.push(decoder.readHexString()) change.actorIds = actorIds return change @@ -676,31 +816,47 @@ function encodeContainer(chunkType, encodeContentsCallback) { const sha256 = new Hash() sha256.update(headerBuf) sha256.update(bodyBuf.subarray(HEADER_SPACE)) - const hash = sha256.digest(), checksum = hash.subarray(0, CHECKSUM_SIZE) + const hash = sha256.digest(), + checksum = hash.subarray(0, CHECKSUM_SIZE) // Copy header into the body buffer so that they are contiguous - bodyBuf.set(MAGIC_BYTES, HEADER_SPACE - headerBuf.byteLength - CHECKSUM_SIZE - MAGIC_BYTES.byteLength) - bodyBuf.set(checksum, HEADER_SPACE - headerBuf.byteLength - CHECKSUM_SIZE) - bodyBuf.set(headerBuf, HEADER_SPACE - headerBuf.byteLength) - return {hash, bytes: bodyBuf.subarray(HEADER_SPACE - headerBuf.byteLength - CHECKSUM_SIZE - MAGIC_BYTES.byteLength)} + bodyBuf.set( + MAGIC_BYTES, + HEADER_SPACE - headerBuf.byteLength - CHECKSUM_SIZE - MAGIC_BYTES.byteLength + ) + bodyBuf.set(checksum, HEADER_SPACE - headerBuf.byteLength - CHECKSUM_SIZE) + bodyBuf.set(headerBuf, HEADER_SPACE - headerBuf.byteLength) + return { + hash, + bytes: bodyBuf.subarray( + HEADER_SPACE - + headerBuf.byteLength - + CHECKSUM_SIZE - + MAGIC_BYTES.byteLength + ), + } } function decodeContainerHeader(decoder, computeHash) { if (!equalBytes(decoder.readRawBytes(MAGIC_BYTES.byteLength), MAGIC_BYTES)) { - throw new RangeError('Data does not begin with magic bytes 85 6f 4a 83') + throw new RangeError("Data does not begin with magic bytes 85 6f 4a 83") } const expectedHash = decoder.readRawBytes(4) const hashStartOffset = decoder.offset const chunkType = decoder.readByte() const chunkLength = decoder.readUint53() - const header = {chunkType, chunkLength, chunkData: decoder.readRawBytes(chunkLength)} + const header = { + chunkType, + chunkLength, + chunkData: decoder.readRawBytes(chunkLength), + } if (computeHash) { const sha256 = new Hash() sha256.update(decoder.buf.subarray(hashStartOffset, decoder.offset)) const binaryHash = sha256.digest() if (!equalBytes(binaryHash.subarray(0, 4), expectedHash)) { - throw new RangeError('checksum does not match data') + throw new RangeError("checksum does not match data") } header.hash = bytesToHexString(binaryHash) } @@ -712,7 +868,7 @@ function encodeChange(changeObj) { const change = changes[0] const { hash, bytes } = encodeContainer(CHUNK_TYPE_CHANGE, encoder => { - if (!Array.isArray(change.deps)) throw new TypeError('deps is not an array') + if (!Array.isArray(change.deps)) throw new TypeError("deps is not an array") encoder.appendUint53(change.deps.length) for (let hash of change.deps.slice().sort()) { encoder.appendRawBytes(hexStringToBytes(hash)) @@ -721,7 +877,7 @@ function encodeChange(changeObj) { encoder.appendUint53(change.seq) encoder.appendUint53(change.startOp) encoder.appendInt53(change.time) - encoder.appendPrefixedString(change.message || '') + encoder.appendPrefixedString(change.message || "") encoder.appendUint53(actorIds.length - 1) for (let actor of actorIds.slice(1)) encoder.appendHexString(actor) @@ -733,9 +889,11 @@ function encodeChange(changeObj) { const hexHash = bytesToHexString(hash) if (changeObj.hash && changeObj.hash !== hexHash) { - throw new RangeError(`Change hash does not match encoding: ${changeObj.hash} != ${hexHash}`) + throw new RangeError( + `Change hash does not match encoding: ${changeObj.hash} != ${hexHash}` + ) } - return (bytes.byteLength >= DEFLATE_MIN_SIZE) ? deflateChange(bytes) : bytes + return bytes.byteLength >= DEFLATE_MIN_SIZE ? deflateChange(bytes) : bytes } function decodeChangeColumns(buffer) { @@ -743,14 +901,15 @@ function decodeChangeColumns(buffer) { const decoder = new Decoder(buffer) const header = decodeContainerHeader(decoder, true) const chunkDecoder = new Decoder(header.chunkData) - if (!decoder.done) throw new RangeError('Encoded change has trailing data') - if (header.chunkType !== CHUNK_TYPE_CHANGE) throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) + if (!decoder.done) throw new RangeError("Encoded change has trailing data") + if (header.chunkType !== CHUNK_TYPE_CHANGE) + throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) const change = decodeChangeHeader(chunkDecoder) const columns = decodeColumnInfo(chunkDecoder) for (let i = 0; i < columns.length; i++) { if ((columns[i].columnId & COLUMN_TYPE_DEFLATE) !== 0) { - throw new RangeError('change must not contain deflated columns') + throw new RangeError("change must not contain deflated columns") } columns[i].buffer = chunkDecoder.readRawBytes(columns[i].bufferLen) } @@ -769,7 +928,10 @@ function decodeChangeColumns(buffer) { */ function decodeChange(buffer) { const change = decodeChangeColumns(buffer) - change.ops = decodeOps(decodeColumns(change.columns, change.actorIds, CHANGE_COLUMNS), false) + change.ops = decodeOps( + decodeColumns(change.columns, change.actorIds, CHANGE_COLUMNS), + false + ) delete change.actorIds delete change.columns return change @@ -784,7 +946,7 @@ function decodeChangeMeta(buffer, computeHash) { if (buffer[8] === CHUNK_TYPE_DEFLATE) buffer = inflateChange(buffer) const header = decodeContainerHeader(new Decoder(buffer), computeHash) if (header.chunkType !== CHUNK_TYPE_CHANGE) { - throw new RangeError('Buffer chunk type is not a change') + throw new RangeError("Buffer chunk type is not a change") } const meta = decodeChangeHeader(new Decoder(header.chunkData)) meta.change = buffer @@ -797,7 +959,8 @@ function decodeChangeMeta(buffer, computeHash) { */ function deflateChange(buffer) { const header = decodeContainerHeader(new Decoder(buffer), false) - if (header.chunkType !== CHUNK_TYPE_CHANGE) throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) + if (header.chunkType !== CHUNK_TYPE_CHANGE) + throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) const compressed = pako.deflateRaw(header.chunkData) const encoder = new Encoder() encoder.appendRawBytes(buffer.subarray(0, 8)) // copy MAGIC_BYTES and checksum @@ -812,7 +975,8 @@ function deflateChange(buffer) { */ function inflateChange(buffer) { const header = decodeContainerHeader(new Decoder(buffer), false) - if (header.chunkType !== CHUNK_TYPE_DEFLATE) throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) + if (header.chunkType !== CHUNK_TYPE_DEFLATE) + throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) const decompressed = pako.inflateRaw(header.chunkData) const encoder = new Encoder() encoder.appendRawBytes(buffer.subarray(0, 8)) // copy MAGIC_BYTES and checksum @@ -827,7 +991,9 @@ function inflateChange(buffer) { * returns an array of subarrays, each subarray containing one change. */ function splitContainers(buffer) { - let decoder = new Decoder(buffer), chunks = [], startOffset = 0 + let decoder = new Decoder(buffer), + chunks = [], + startOffset = 0 while (!decoder.done) { decodeContainerHeader(decoder, false) chunks.push(buffer.subarray(startOffset, decoder.offset)) @@ -846,7 +1012,10 @@ function decodeChanges(binaryChanges) { for (let chunk of splitContainers(binaryChange)) { if (chunk[8] === CHUNK_TYPE_DOCUMENT) { decoded = decoded.concat(decodeDocument(chunk)) - } else if (chunk[8] === CHUNK_TYPE_CHANGE || chunk[8] === CHUNK_TYPE_DEFLATE) { + } else if ( + chunk[8] === CHUNK_TYPE_CHANGE || + chunk[8] === CHUNK_TYPE_DEFLATE + ) { decoded.push(decodeChange(chunk)) } else { // ignoring chunk of unknown type @@ -858,9 +1027,10 @@ function decodeChanges(binaryChanges) { function sortOpIds(a, b) { if (a === b) return 0 - if (a === '_root') return -1 - if (b === '_root') return +1 - const a_ = parseOpId(a), b_ = parseOpId(b) + if (a === "_root") return -1 + if (b === "_root") return +1 + const a_ = parseOpId(a), + b_ = parseOpId(b) if (a_.counter < b_.counter) return -1 if (a_.counter > b_.counter) return +1 if (a_.actorId < b_.actorId) return -1 @@ -879,26 +1049,46 @@ function groupChangeOps(changes, ops) { change.ops = [] if (!changesByActor[change.actor]) changesByActor[change.actor] = [] if (change.seq !== changesByActor[change.actor].length + 1) { - throw new RangeError(`Expected seq = ${changesByActor[change.actor].length + 1}, got ${change.seq}`) + throw new RangeError( + `Expected seq = ${changesByActor[change.actor].length + 1}, got ${ + change.seq + }` + ) } - if (change.seq > 1 && changesByActor[change.actor][change.seq - 2].maxOp > change.maxOp) { - throw new RangeError('maxOp must increase monotonically per actor') + if ( + change.seq > 1 && + changesByActor[change.actor][change.seq - 2].maxOp > change.maxOp + ) { + throw new RangeError("maxOp must increase monotonically per actor") } changesByActor[change.actor].push(change) } let opsById = {} for (let op of ops) { - if (op.action === 'del') throw new RangeError('document should not contain del operations') + if (op.action === "del") + throw new RangeError("document should not contain del operations") op.pred = opsById[op.id] ? opsById[op.id].pred : [] opsById[op.id] = op for (let succ of op.succ) { if (!opsById[succ]) { if (op.elemId) { const elemId = op.insert ? op.id : op.elemId - opsById[succ] = {id: succ, action: 'del', obj: op.obj, elemId, pred: []} + opsById[succ] = { + id: succ, + action: "del", + obj: op.obj, + elemId, + pred: [], + } } else { - opsById[succ] = {id: succ, action: 'del', obj: op.obj, key: op.key, pred: []} + opsById[succ] = { + id: succ, + action: "del", + obj: op.obj, + key: op.key, + pred: [], + } } } opsById[succ].pred.push(op.id) @@ -906,14 +1096,15 @@ function groupChangeOps(changes, ops) { delete op.succ } for (let op of Object.values(opsById)) { - if (op.action === 'del') ops.push(op) + if (op.action === "del") ops.push(op) } for (let op of ops) { const { counter, actorId } = parseOpId(op.id) const actorChanges = changesByActor[actorId] // Binary search to find the change that should contain this operation - let left = 0, right = actorChanges.length + let left = 0, + right = actorChanges.length while (left < right) { const index = Math.floor((left + right) / 2) if (actorChanges[index].maxOp < counter) { @@ -933,7 +1124,8 @@ function groupChangeOps(changes, ops) { change.startOp = change.maxOp - change.ops.length + 1 delete change.maxOp for (let i = 0; i < change.ops.length; i++) { - const op = change.ops[i], expectedId = `${change.startOp + i}@${change.actor}` + const op = change.ops[i], + expectedId = `${change.startOp + i}@${change.actor}` if (op.id !== expectedId) { throw new RangeError(`Expected opId ${expectedId}, got ${op.id}`) } @@ -949,7 +1141,9 @@ function decodeDocumentChanges(changes, expectedHeads) { change.deps = [] for (let index of change.depsNum.map(d => d.depsIndex)) { if (!changes[index] || !changes[index].hash) { - throw new RangeError(`No hash for index ${index} while processing index ${i}`) + throw new RangeError( + `No hash for index ${index} while processing index ${i}` + ) } const hash = changes[index].hash change.deps.push(hash) @@ -970,18 +1164,30 @@ function decodeDocumentChanges(changes, expectedHeads) { } const actualHeads = Object.keys(heads).sort() - let headsEqual = (actualHeads.length === expectedHeads.length), i = 0 + let headsEqual = actualHeads.length === expectedHeads.length, + i = 0 while (headsEqual && i < actualHeads.length) { - headsEqual = (actualHeads[i] === expectedHeads[i]) + headsEqual = actualHeads[i] === expectedHeads[i] i++ } if (!headsEqual) { - throw new RangeError(`Mismatched heads hashes: expected ${expectedHeads.join(', ')}, got ${actualHeads.join(', ')}`) + throw new RangeError( + `Mismatched heads hashes: expected ${expectedHeads.join( + ", " + )}, got ${actualHeads.join(", ")}` + ) } } function encodeDocumentHeader(doc) { - const { changesColumns, opsColumns, actorIds, heads, headsIndexes, extraBytes } = doc + const { + changesColumns, + opsColumns, + actorIds, + heads, + headsIndexes, + extraBytes, + } = doc for (let column of changesColumns) deflateColumn(column) for (let column of opsColumns) deflateColumn(column) @@ -996,7 +1202,8 @@ function encodeDocumentHeader(doc) { } encodeColumnInfo(encoder, changesColumns) encodeColumnInfo(encoder, opsColumns) - for (let column of changesColumns) encoder.appendRawBytes(column.encoder.buffer) + for (let column of changesColumns) + encoder.appendRawBytes(column.encoder.buffer) for (let column of opsColumns) encoder.appendRawBytes(column.encoder.buffer) for (let index of headsIndexes) encoder.appendUint53(index) if (extraBytes) encoder.appendRawBytes(extraBytes) @@ -1007,14 +1214,19 @@ function decodeDocumentHeader(buffer) { const documentDecoder = new Decoder(buffer) const header = decodeContainerHeader(documentDecoder, true) const decoder = new Decoder(header.chunkData) - if (!documentDecoder.done) throw new RangeError('Encoded document has trailing data') - if (header.chunkType !== CHUNK_TYPE_DOCUMENT) throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) + if (!documentDecoder.done) + throw new RangeError("Encoded document has trailing data") + if (header.chunkType !== CHUNK_TYPE_DOCUMENT) + throw new RangeError(`Unexpected chunk type: ${header.chunkType}`) - const actorIds = [], numActors = decoder.readUint53() + const actorIds = [], + numActors = decoder.readUint53() for (let i = 0; i < numActors; i++) { actorIds.push(decoder.readHexString()) } - const heads = [], headsIndexes = [], numHeads = decoder.readUint53() + const heads = [], + headsIndexes = [], + numHeads = decoder.readUint53() for (let i = 0; i < numHeads; i++) { heads.push(bytesToHexString(decoder.readRawBytes(32))) } @@ -1033,14 +1245,27 @@ function decodeDocumentHeader(buffer) { for (let i = 0; i < numHeads; i++) headsIndexes.push(decoder.readUint53()) } - const extraBytes = decoder.readRawBytes(decoder.buf.byteLength - decoder.offset) - return { changesColumns, opsColumns, actorIds, heads, headsIndexes, extraBytes } + const extraBytes = decoder.readRawBytes( + decoder.buf.byteLength - decoder.offset + ) + return { + changesColumns, + opsColumns, + actorIds, + heads, + headsIndexes, + extraBytes, + } } function decodeDocument(buffer) { - const { changesColumns, opsColumns, actorIds, heads } = decodeDocumentHeader(buffer) + const { changesColumns, opsColumns, actorIds, heads } = + decodeDocumentHeader(buffer) const changes = decodeColumns(changesColumns, actorIds, DOCUMENT_COLUMNS) - const ops = decodeOps(decodeColumns(opsColumns, actorIds, DOC_OPS_COLUMNS), true) + const ops = decodeOps( + decodeColumns(opsColumns, actorIds, DOC_OPS_COLUMNS), + true + ) groupChangeOps(changes, ops) decodeDocumentChanges(changes, heads) return changes @@ -1051,7 +1276,7 @@ function decodeDocument(buffer) { */ function deflateColumn(column) { if (column.encoder.buffer.byteLength >= DEFLATE_MIN_SIZE) { - column.encoder = {buffer: pako.deflateRaw(column.encoder.buffer)} + column.encoder = { buffer: pako.deflateRaw(column.encoder.buffer) } column.columnId |= COLUMN_TYPE_DEFLATE } } @@ -1067,8 +1292,24 @@ function inflateColumn(column) { } module.exports = { - COLUMN_TYPE, VALUE_TYPE, ACTIONS, OBJECT_TYPE, DOC_OPS_COLUMNS, CHANGE_COLUMNS, DOCUMENT_COLUMNS, - encoderByColumnId, decoderByColumnId, makeDecoders, decodeValue, - splitContainers, encodeChange, decodeChangeColumns, decodeChange, decodeChangeMeta, decodeChanges, - encodeDocumentHeader, decodeDocumentHeader, decodeDocument + COLUMN_TYPE, + VALUE_TYPE, + ACTIONS, + OBJECT_TYPE, + DOC_OPS_COLUMNS, + CHANGE_COLUMNS, + DOCUMENT_COLUMNS, + encoderByColumnId, + decoderByColumnId, + makeDecoders, + decodeValue, + splitContainers, + encodeChange, + decodeChangeColumns, + decodeChange, + decodeChangeMeta, + decodeChanges, + encodeDocumentHeader, + decodeDocumentHeader, + decodeDocument, } diff --git a/javascript/test/legacy/common.js b/javascript/test/legacy/common.js index 02e91392..7668e982 100644 --- a/javascript/test/legacy/common.js +++ b/javascript/test/legacy/common.js @@ -1,5 +1,5 @@ function isObject(obj) { - return typeof obj === 'object' && obj !== null + return typeof obj === "object" && obj !== null } /** @@ -20,11 +20,11 @@ function copyObject(obj) { * with an actor ID, separated by an `@` sign) and returns an object `{counter, actorId}`. */ function parseOpId(opId) { - const match = /^(\d+)@(.*)$/.exec(opId || '') + const match = /^(\d+)@(.*)$/.exec(opId || "") if (!match) { throw new RangeError(`Not a valid opId: ${opId}`) } - return {counter: parseInt(match[1], 10), actorId: match[2]} + return { counter: parseInt(match[1], 10), actorId: match[2] } } /** @@ -32,7 +32,7 @@ function parseOpId(opId) { */ function equalBytes(array1, array2) { if (!(array1 instanceof Uint8Array) || !(array2 instanceof Uint8Array)) { - throw new TypeError('equalBytes can only compare Uint8Arrays') + throw new TypeError("equalBytes can only compare Uint8Arrays") } if (array1.byteLength !== array2.byteLength) return false for (let i = 0; i < array1.byteLength; i++) { @@ -51,5 +51,9 @@ function createArrayOfNulls(length) { } module.exports = { - isObject, copyObject, parseOpId, equalBytes, createArrayOfNulls + isObject, + copyObject, + parseOpId, + equalBytes, + createArrayOfNulls, } diff --git a/javascript/test/legacy/encoding.js b/javascript/test/legacy/encoding.js index 92b62df6..f7650faf 100644 --- a/javascript/test/legacy/encoding.js +++ b/javascript/test/legacy/encoding.js @@ -6,7 +6,7 @@ * https://github.com/anonyco/FastestSmallestTextEncoderDecoder */ const utf8encoder = new TextEncoder() -const utf8decoder = new TextDecoder('utf-8') +const utf8decoder = new TextDecoder("utf-8") function stringToUtf8(string) { return utf8encoder.encode(string) @@ -20,30 +20,48 @@ function utf8ToString(buffer) { * Converts a string consisting of hexadecimal digits into an Uint8Array. */ function hexStringToBytes(value) { - if (typeof value !== 'string') { - throw new TypeError('value is not a string') + if (typeof value !== "string") { + throw new TypeError("value is not a string") } if (!/^([0-9a-f][0-9a-f])*$/.test(value)) { - throw new RangeError('value is not hexadecimal') + throw new RangeError("value is not hexadecimal") } - if (value === '') { + if (value === "") { return new Uint8Array(0) } else { return new Uint8Array(value.match(/../g).map(b => parseInt(b, 16))) } } -const NIBBLE_TO_HEX = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] +const NIBBLE_TO_HEX = [ + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "c", + "d", + "e", + "f", +] const BYTE_TO_HEX = new Array(256) for (let i = 0; i < 256; i++) { - BYTE_TO_HEX[i] = `${NIBBLE_TO_HEX[(i >>> 4) & 0xf]}${NIBBLE_TO_HEX[i & 0xf]}`; + BYTE_TO_HEX[i] = `${NIBBLE_TO_HEX[(i >>> 4) & 0xf]}${NIBBLE_TO_HEX[i & 0xf]}` } /** * Converts a Uint8Array into the equivalent hexadecimal string. */ function bytesToHexString(bytes) { - let hex = '', len = bytes.byteLength + let hex = "", + len = bytes.byteLength for (let i = 0; i < len; i++) { hex += BYTE_TO_HEX[bytes[i]] } @@ -95,14 +113,17 @@ class Encoder { * appends it to the buffer. Returns the number of bytes written. */ appendUint32(value) { - if (!Number.isInteger(value)) throw new RangeError('value is not an integer') - if (value < 0 || value > 0xffffffff) throw new RangeError('number out of range') + if (!Number.isInteger(value)) + throw new RangeError("value is not an integer") + if (value < 0 || value > 0xffffffff) + throw new RangeError("number out of range") const numBytes = Math.max(1, Math.ceil((32 - Math.clz32(value)) / 7)) if (this.offset + numBytes > this.buf.byteLength) this.grow() for (let i = 0; i < numBytes; i++) { - this.buf[this.offset + i] = (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) + this.buf[this.offset + i] = + (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) value >>>= 7 // zero-filling right shift } this.offset += numBytes @@ -115,14 +136,19 @@ class Encoder { * it to the buffer. Returns the number of bytes written. */ appendInt32(value) { - if (!Number.isInteger(value)) throw new RangeError('value is not an integer') - if (value < -0x80000000 || value > 0x7fffffff) throw new RangeError('number out of range') + if (!Number.isInteger(value)) + throw new RangeError("value is not an integer") + if (value < -0x80000000 || value > 0x7fffffff) + throw new RangeError("number out of range") - const numBytes = Math.ceil((33 - Math.clz32(value >= 0 ? value : -value - 1)) / 7) + const numBytes = Math.ceil( + (33 - Math.clz32(value >= 0 ? value : -value - 1)) / 7 + ) if (this.offset + numBytes > this.buf.byteLength) this.grow() for (let i = 0; i < numBytes; i++) { - this.buf[this.offset + i] = (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) + this.buf[this.offset + i] = + (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) value >>= 7 // sign-propagating right shift } this.offset += numBytes @@ -135,9 +161,10 @@ class Encoder { * (53 bits). */ appendUint53(value) { - if (!Number.isInteger(value)) throw new RangeError('value is not an integer') + if (!Number.isInteger(value)) + throw new RangeError("value is not an integer") if (value < 0 || value > Number.MAX_SAFE_INTEGER) { - throw new RangeError('number out of range') + throw new RangeError("number out of range") } const high32 = Math.floor(value / 0x100000000) const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned @@ -150,9 +177,10 @@ class Encoder { * (53 bits). */ appendInt53(value) { - if (!Number.isInteger(value)) throw new RangeError('value is not an integer') + if (!Number.isInteger(value)) + throw new RangeError("value is not an integer") if (value < Number.MIN_SAFE_INTEGER || value > Number.MAX_SAFE_INTEGER) { - throw new RangeError('number out of range') + throw new RangeError("number out of range") } const high32 = Math.floor(value / 0x100000000) const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned @@ -167,10 +195,10 @@ class Encoder { */ appendUint64(high32, low32) { if (!Number.isInteger(high32) || !Number.isInteger(low32)) { - throw new RangeError('value is not an integer') + throw new RangeError("value is not an integer") } if (high32 < 0 || high32 > 0xffffffff || low32 < 0 || low32 > 0xffffffff) { - throw new RangeError('number out of range') + throw new RangeError("number out of range") } if (high32 === 0) return this.appendUint32(low32) @@ -180,10 +208,12 @@ class Encoder { this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 low32 >>>= 7 // zero-filling right shift } - this.buf[this.offset + 4] = (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) + this.buf[this.offset + 4] = + (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) high32 >>>= 3 for (let i = 5; i < numBytes; i++) { - this.buf[this.offset + i] = (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) + this.buf[this.offset + i] = + (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) high32 >>>= 7 } this.offset += numBytes @@ -200,25 +230,35 @@ class Encoder { */ appendInt64(high32, low32) { if (!Number.isInteger(high32) || !Number.isInteger(low32)) { - throw new RangeError('value is not an integer') + throw new RangeError("value is not an integer") } - if (high32 < -0x80000000 || high32 > 0x7fffffff || low32 < -0x80000000 || low32 > 0xffffffff) { - throw new RangeError('number out of range') + if ( + high32 < -0x80000000 || + high32 > 0x7fffffff || + low32 < -0x80000000 || + low32 > 0xffffffff + ) { + throw new RangeError("number out of range") } low32 >>>= 0 // interpret as unsigned if (high32 === 0 && low32 <= 0x7fffffff) return this.appendInt32(low32) - if (high32 === -1 && low32 >= 0x80000000) return this.appendInt32(low32 - 0x100000000) + if (high32 === -1 && low32 >= 0x80000000) + return this.appendInt32(low32 - 0x100000000) - const numBytes = Math.ceil((65 - Math.clz32(high32 >= 0 ? high32 : -high32 - 1)) / 7) + const numBytes = Math.ceil( + (65 - Math.clz32(high32 >= 0 ? high32 : -high32 - 1)) / 7 + ) if (this.offset + numBytes > this.buf.byteLength) this.grow() for (let i = 0; i < 4; i++) { this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 low32 >>>= 7 // zero-filling right shift } - this.buf[this.offset + 4] = (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) + this.buf[this.offset + 4] = + (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) high32 >>= 3 // sign-propagating right shift for (let i = 5; i < numBytes; i++) { - this.buf[this.offset + i] = (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) + this.buf[this.offset + i] = + (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) high32 >>= 7 } this.offset += numBytes @@ -243,7 +283,7 @@ class Encoder { * number of bytes appended. */ appendRawString(value) { - if (typeof value !== 'string') throw new TypeError('value is not a string') + if (typeof value !== "string") throw new TypeError("value is not a string") return this.appendRawBytes(stringToUtf8(value)) } @@ -262,7 +302,7 @@ class Encoder { * (where the length is encoded as an unsigned LEB128 integer). */ appendPrefixedString(value) { - if (typeof value !== 'string') throw new TypeError('value is not a string') + if (typeof value !== "string") throw new TypeError("value is not a string") this.appendPrefixedBytes(stringToUtf8(value)) return this } @@ -281,8 +321,7 @@ class Encoder { * Flushes any unwritten data to the buffer. Call this before reading from * the buffer constructed by this Encoder. */ - finish() { - } + finish() {} } /** @@ -321,7 +360,7 @@ class Decoder { */ skip(bytes) { if (this.offset + bytes > this.buf.byteLength) { - throw new RangeError('cannot skip beyond end of buffer') + throw new RangeError("cannot skip beyond end of buffer") } this.offset += bytes } @@ -339,18 +378,20 @@ class Decoder { * Throws an exception if the value doesn't fit in a 32-bit unsigned int. */ readUint32() { - let result = 0, shift = 0 + let result = 0, + shift = 0 while (this.offset < this.buf.byteLength) { const nextByte = this.buf[this.offset] - if (shift === 28 && (nextByte & 0xf0) !== 0) { // more than 5 bytes, or value > 0xffffffff - throw new RangeError('number out of range') + if (shift === 28 && (nextByte & 0xf0) !== 0) { + // more than 5 bytes, or value > 0xffffffff + throw new RangeError("number out of range") } - result = (result | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned + result = (result | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned shift += 7 this.offset++ if ((nextByte & 0x80) === 0) return result } - throw new RangeError('buffer ended with incomplete number') + throw new RangeError("buffer ended with incomplete number") } /** @@ -358,13 +399,17 @@ class Decoder { * Throws an exception if the value doesn't fit in a 32-bit signed int. */ readInt32() { - let result = 0, shift = 0 + let result = 0, + shift = 0 while (this.offset < this.buf.byteLength) { const nextByte = this.buf[this.offset] - if ((shift === 28 && (nextByte & 0x80) !== 0) || // more than 5 bytes - (shift === 28 && (nextByte & 0x40) === 0 && (nextByte & 0x38) !== 0) || // positive int > 0x7fffffff - (shift === 28 && (nextByte & 0x40) !== 0 && (nextByte & 0x38) !== 0x38)) { // negative int < -0x80000000 - throw new RangeError('number out of range') + if ( + (shift === 28 && (nextByte & 0x80) !== 0) || // more than 5 bytes + (shift === 28 && (nextByte & 0x40) === 0 && (nextByte & 0x38) !== 0) || // positive int > 0x7fffffff + (shift === 28 && (nextByte & 0x40) !== 0 && (nextByte & 0x38) !== 0x38) + ) { + // negative int < -0x80000000 + throw new RangeError("number out of range") } result |= (nextByte & 0x7f) << shift shift += 7 @@ -378,7 +423,7 @@ class Decoder { } } } - throw new RangeError('buffer ended with incomplete number') + throw new RangeError("buffer ended with incomplete number") } /** @@ -389,7 +434,7 @@ class Decoder { readUint53() { const { low32, high32 } = this.readUint64() if (high32 < 0 || high32 > 0x1fffff) { - throw new RangeError('number out of range') + throw new RangeError("number out of range") } return high32 * 0x100000000 + low32 } @@ -401,8 +446,12 @@ class Decoder { */ readInt53() { const { low32, high32 } = this.readInt64() - if (high32 < -0x200000 || (high32 === -0x200000 && low32 === 0) || high32 > 0x1fffff) { - throw new RangeError('number out of range') + if ( + high32 < -0x200000 || + (high32 === -0x200000 && low32 === 0) || + high32 > 0x1fffff + ) { + throw new RangeError("number out of range") } return high32 * 0x100000000 + low32 } @@ -414,10 +463,12 @@ class Decoder { * `{high32, low32}`. */ readUint64() { - let low32 = 0, high32 = 0, shift = 0 + let low32 = 0, + high32 = 0, + shift = 0 while (this.offset < this.buf.byteLength && shift <= 28) { const nextByte = this.buf[this.offset] - low32 = (low32 | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned + low32 = (low32 | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned if (shift === 28) { high32 = (nextByte & 0x70) >>> 4 } @@ -429,15 +480,16 @@ class Decoder { shift = 3 while (this.offset < this.buf.byteLength) { const nextByte = this.buf[this.offset] - if (shift === 31 && (nextByte & 0xfe) !== 0) { // more than 10 bytes, or value > 2^64 - 1 - throw new RangeError('number out of range') + if (shift === 31 && (nextByte & 0xfe) !== 0) { + // more than 10 bytes, or value > 2^64 - 1 + throw new RangeError("number out of range") } - high32 = (high32 | (nextByte & 0x7f) << shift) >>> 0 + high32 = (high32 | ((nextByte & 0x7f) << shift)) >>> 0 shift += 7 this.offset++ if ((nextByte & 0x80) === 0) return { high32, low32 } } - throw new RangeError('buffer ended with incomplete number') + throw new RangeError("buffer ended with incomplete number") } /** @@ -448,17 +500,20 @@ class Decoder { * sign of the `high32` half indicates the sign of the 64-bit number. */ readInt64() { - let low32 = 0, high32 = 0, shift = 0 + let low32 = 0, + high32 = 0, + shift = 0 while (this.offset < this.buf.byteLength && shift <= 28) { const nextByte = this.buf[this.offset] - low32 = (low32 | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned + low32 = (low32 | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned if (shift === 28) { high32 = (nextByte & 0x70) >>> 4 } shift += 7 this.offset++ if ((nextByte & 0x80) === 0) { - if ((nextByte & 0x40) !== 0) { // sign-extend negative integer + if ((nextByte & 0x40) !== 0) { + // sign-extend negative integer if (shift < 32) low32 = (low32 | (-1 << shift)) >>> 0 high32 |= -1 << Math.max(shift - 32, 0) } @@ -472,19 +527,20 @@ class Decoder { // On the 10th byte there are only two valid values: all 7 value bits zero // (if the value is positive) or all 7 bits one (if the value is negative) if (shift === 31 && nextByte !== 0 && nextByte !== 0x7f) { - throw new RangeError('number out of range') + throw new RangeError("number out of range") } high32 |= (nextByte & 0x7f) << shift shift += 7 this.offset++ if ((nextByte & 0x80) === 0) { - if ((nextByte & 0x40) !== 0 && shift < 32) { // sign-extend negative integer + if ((nextByte & 0x40) !== 0 && shift < 32) { + // sign-extend negative integer high32 |= -1 << shift } return { high32, low32 } } } - throw new RangeError('buffer ended with incomplete number') + throw new RangeError("buffer ended with incomplete number") } /** @@ -494,7 +550,7 @@ class Decoder { readRawBytes(length) { const start = this.offset if (start + length > this.buf.byteLength) { - throw new RangeError('subarray exceeds buffer size') + throw new RangeError("subarray exceeds buffer size") } this.offset += length return this.buf.subarray(start, this.offset) @@ -559,7 +615,7 @@ class RLEEncoder extends Encoder { constructor(type) { super() this.type = type - this.state = 'empty' + this.state = "empty" this.lastValue = undefined this.count = 0 this.literal = [] @@ -578,76 +634,81 @@ class RLEEncoder extends Encoder { */ _appendValue(value, repetitions = 1) { if (repetitions <= 0) return - if (this.state === 'empty') { - this.state = (value === null ? 'nulls' : (repetitions === 1 ? 'loneValue' : 'repetition')) + if (this.state === "empty") { + this.state = + value === null + ? "nulls" + : repetitions === 1 + ? "loneValue" + : "repetition" this.lastValue = value this.count = repetitions - } else if (this.state === 'loneValue') { + } else if (this.state === "loneValue") { if (value === null) { this.flush() - this.state = 'nulls' + this.state = "nulls" this.count = repetitions } else if (value === this.lastValue) { - this.state = 'repetition' + this.state = "repetition" this.count = 1 + repetitions } else if (repetitions > 1) { this.flush() - this.state = 'repetition' + this.state = "repetition" this.count = repetitions this.lastValue = value } else { - this.state = 'literal' + this.state = "literal" this.literal = [this.lastValue] this.lastValue = value } - } else if (this.state === 'repetition') { + } else if (this.state === "repetition") { if (value === null) { this.flush() - this.state = 'nulls' + this.state = "nulls" this.count = repetitions } else if (value === this.lastValue) { this.count += repetitions } else if (repetitions > 1) { this.flush() - this.state = 'repetition' + this.state = "repetition" this.count = repetitions this.lastValue = value } else { this.flush() - this.state = 'loneValue' + this.state = "loneValue" this.lastValue = value } - } else if (this.state === 'literal') { + } else if (this.state === "literal") { if (value === null) { this.literal.push(this.lastValue) this.flush() - this.state = 'nulls' + this.state = "nulls" this.count = repetitions } else if (value === this.lastValue) { this.flush() - this.state = 'repetition' + this.state = "repetition" this.count = 1 + repetitions } else if (repetitions > 1) { this.literal.push(this.lastValue) this.flush() - this.state = 'repetition' + this.state = "repetition" this.count = repetitions this.lastValue = value } else { this.literal.push(this.lastValue) this.lastValue = value } - } else if (this.state === 'nulls') { + } else if (this.state === "nulls") { if (value === null) { this.count += repetitions } else if (repetitions > 1) { this.flush() - this.state = 'repetition' + this.state = "repetition" this.count = repetitions this.lastValue = value } else { this.flush() - this.state = 'loneValue' + this.state = "loneValue" this.lastValue = value } } @@ -666,13 +727,16 @@ class RLEEncoder extends Encoder { */ copyFrom(decoder, options = {}) { const { count, sumValues, sumShift } = options - if (!(decoder instanceof RLEDecoder) || (decoder.type !== this.type)) { - throw new TypeError('incompatible type of decoder') + if (!(decoder instanceof RLEDecoder) || decoder.type !== this.type) { + throw new TypeError("incompatible type of decoder") } - let remaining = (typeof count === 'number' ? count : Number.MAX_SAFE_INTEGER) - let nonNullValues = 0, sum = 0 - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) - if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} + let remaining = typeof count === "number" ? count : Number.MAX_SAFE_INTEGER + let nonNullValues = 0, + sum = 0 + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) + if (remaining === 0 || decoder.done) + return sumValues ? { nonNullValues, sum } : { nonNullValues } // Copy a value so that we have a well-defined starting state. NB: when super.copyFrom() is // called by the DeltaEncoder subclass, the following calls to readValue() and appendValue() @@ -684,87 +748,101 @@ class RLEEncoder extends Encoder { remaining -= numNulls decoder.count -= numNulls - 1 this.appendValue(null, numNulls) - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) - if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) + if (remaining === 0 || decoder.done) + return sumValues ? { nonNullValues, sum } : { nonNullValues } firstValue = decoder.readValue() - if (firstValue === null) throw new RangeError('null run must be followed by non-null value') + if (firstValue === null) + throw new RangeError("null run must be followed by non-null value") } this.appendValue(firstValue) remaining-- nonNullValues++ - if (sumValues) sum += (sumShift ? (firstValue >>> sumShift) : firstValue) - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) - if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} + if (sumValues) sum += sumShift ? firstValue >>> sumShift : firstValue + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) + if (remaining === 0 || decoder.done) + return sumValues ? { nonNullValues, sum } : { nonNullValues } // Copy data at the record level without expanding repetitions - let firstRun = (decoder.count > 0) + let firstRun = decoder.count > 0 while (remaining > 0 && !decoder.done) { if (!firstRun) decoder.readRecord() const numValues = Math.min(decoder.count, remaining) decoder.count -= numValues - if (decoder.state === 'literal') { + if (decoder.state === "literal") { nonNullValues += numValues for (let i = 0; i < numValues; i++) { - if (decoder.done) throw new RangeError('incomplete literal') + if (decoder.done) throw new RangeError("incomplete literal") const value = decoder.readRawValue() - if (value === decoder.lastValue) throw new RangeError('Repetition of values is not allowed in literal') + if (value === decoder.lastValue) + throw new RangeError( + "Repetition of values is not allowed in literal" + ) decoder.lastValue = value this._appendValue(value) - if (sumValues) sum += (sumShift ? (value >>> sumShift) : value) + if (sumValues) sum += sumShift ? value >>> sumShift : value } - } else if (decoder.state === 'repetition') { + } else if (decoder.state === "repetition") { nonNullValues += numValues - if (sumValues) sum += numValues * (sumShift ? (decoder.lastValue >>> sumShift) : decoder.lastValue) + if (sumValues) + sum += + numValues * + (sumShift ? decoder.lastValue >>> sumShift : decoder.lastValue) const value = decoder.lastValue this._appendValue(value) if (numValues > 1) { this._appendValue(value) - if (this.state !== 'repetition') throw new RangeError(`Unexpected state ${this.state}`) + if (this.state !== "repetition") + throw new RangeError(`Unexpected state ${this.state}`) this.count += numValues - 2 } - } else if (decoder.state === 'nulls') { + } else if (decoder.state === "nulls") { this._appendValue(null) - if (this.state !== 'nulls') throw new RangeError(`Unexpected state ${this.state}`) + if (this.state !== "nulls") + throw new RangeError(`Unexpected state ${this.state}`) this.count += numValues - 1 } firstRun = false remaining -= numValues } - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) - return sumValues ? {nonNullValues, sum} : {nonNullValues} + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) + return sumValues ? { nonNullValues, sum } : { nonNullValues } } /** * Private method, do not call from outside the class. */ flush() { - if (this.state === 'loneValue') { + if (this.state === "loneValue") { this.appendInt32(-1) this.appendRawValue(this.lastValue) - } else if (this.state === 'repetition') { + } else if (this.state === "repetition") { this.appendInt53(this.count) this.appendRawValue(this.lastValue) - } else if (this.state === 'literal') { + } else if (this.state === "literal") { this.appendInt53(-this.literal.length) for (let v of this.literal) this.appendRawValue(v) - } else if (this.state === 'nulls') { + } else if (this.state === "nulls") { this.appendInt32(0) this.appendUint53(this.count) } - this.state = 'empty' + this.state = "empty" } /** * Private method, do not call from outside the class. */ appendRawValue(value) { - if (this.type === 'int') { + if (this.type === "int") { this.appendInt53(value) - } else if (this.type === 'uint') { + } else if (this.type === "uint") { this.appendUint53(value) - } else if (this.type === 'utf8') { + } else if (this.type === "utf8") { this.appendPrefixedString(value) } else { throw new RangeError(`Unknown RLEEncoder datatype: ${this.type}`) @@ -776,9 +854,9 @@ class RLEEncoder extends Encoder { * the buffer constructed by this Encoder. */ finish() { - if (this.state === 'literal') this.literal.push(this.lastValue) + if (this.state === "literal") this.literal.push(this.lastValue) // Don't write anything if the only values we have seen are nulls - if (this.state !== 'nulls' || this.offset > 0) this.flush() + if (this.state !== "nulls" || this.offset > 0) this.flush() } } @@ -800,7 +878,7 @@ class RLEDecoder extends Decoder { * position, and true if we are at the end of the buffer. */ get done() { - return (this.count === 0) && (this.offset === this.buf.byteLength) + return this.count === 0 && this.offset === this.buf.byteLength } /** @@ -821,9 +899,10 @@ class RLEDecoder extends Decoder { if (this.done) return null if (this.count === 0) this.readRecord() this.count -= 1 - if (this.state === 'literal') { + if (this.state === "literal") { const value = this.readRawValue() - if (value === this.lastValue) throw new RangeError('Repetition of values is not allowed in literal') + if (value === this.lastValue) + throw new RangeError("Repetition of values is not allowed in literal") this.lastValue = value return value } else { @@ -839,20 +918,22 @@ class RLEDecoder extends Decoder { if (this.count === 0) { this.count = this.readInt53() if (this.count > 0) { - this.lastValue = (this.count <= numSkip) ? this.skipRawValues(1) : this.readRawValue() - this.state = 'repetition' + this.lastValue = + this.count <= numSkip ? this.skipRawValues(1) : this.readRawValue() + this.state = "repetition" } else if (this.count < 0) { this.count = -this.count - this.state = 'literal' - } else { // this.count == 0 + this.state = "literal" + } else { + // this.count == 0 this.count = this.readUint53() this.lastValue = null - this.state = 'nulls' + this.state = "nulls" } } const consume = Math.min(numSkip, this.count) - if (this.state === 'literal') this.skipRawValues(consume) + if (this.state === "literal") this.skipRawValues(consume) numSkip -= consume this.count -= consume } @@ -866,23 +947,34 @@ class RLEDecoder extends Decoder { this.count = this.readInt53() if (this.count > 1) { const value = this.readRawValue() - if ((this.state === 'repetition' || this.state === 'literal') && this.lastValue === value) { - throw new RangeError('Successive repetitions with the same value are not allowed') + if ( + (this.state === "repetition" || this.state === "literal") && + this.lastValue === value + ) { + throw new RangeError( + "Successive repetitions with the same value are not allowed" + ) } - this.state = 'repetition' + this.state = "repetition" this.lastValue = value } else if (this.count === 1) { - throw new RangeError('Repetition count of 1 is not allowed, use a literal instead') + throw new RangeError( + "Repetition count of 1 is not allowed, use a literal instead" + ) } else if (this.count < 0) { this.count = -this.count - if (this.state === 'literal') throw new RangeError('Successive literals are not allowed') - this.state = 'literal' - } else { // this.count == 0 - if (this.state === 'nulls') throw new RangeError('Successive null runs are not allowed') + if (this.state === "literal") + throw new RangeError("Successive literals are not allowed") + this.state = "literal" + } else { + // this.count == 0 + if (this.state === "nulls") + throw new RangeError("Successive null runs are not allowed") this.count = this.readUint53() - if (this.count === 0) throw new RangeError('Zero-length null runs are not allowed') + if (this.count === 0) + throw new RangeError("Zero-length null runs are not allowed") this.lastValue = null - this.state = 'nulls' + this.state = "nulls" } } @@ -891,11 +983,11 @@ class RLEDecoder extends Decoder { * Reads one value of the datatype configured on construction. */ readRawValue() { - if (this.type === 'int') { + if (this.type === "int") { return this.readInt53() - } else if (this.type === 'uint') { + } else if (this.type === "uint") { return this.readUint53() - } else if (this.type === 'utf8') { + } else if (this.type === "utf8") { return this.readPrefixedString() } else { throw new RangeError(`Unknown RLEDecoder datatype: ${this.type}`) @@ -907,14 +999,14 @@ class RLEDecoder extends Decoder { * Skips over `num` values of the datatype configured on construction. */ skipRawValues(num) { - if (this.type === 'utf8') { + if (this.type === "utf8") { for (let i = 0; i < num; i++) this.skip(this.readUint53()) } else { while (num > 0 && this.offset < this.buf.byteLength) { if ((this.buf[this.offset] & 0x80) === 0) num-- this.offset++ } - if (num > 0) throw new RangeError('cannot skip beyond end of buffer') + if (num > 0) throw new RangeError("cannot skip beyond end of buffer") } } } @@ -931,7 +1023,7 @@ class RLEDecoder extends Decoder { */ class DeltaEncoder extends RLEEncoder { constructor() { - super('int') + super("int") this.absoluteValue = 0 } @@ -941,7 +1033,7 @@ class DeltaEncoder extends RLEEncoder { */ appendValue(value, repetitions = 1) { if (repetitions <= 0) return - if (typeof value === 'number') { + if (typeof value === "number") { super.appendValue(value - this.absoluteValue, 1) this.absoluteValue = value if (repetitions > 1) super.appendValue(0, repetitions - 1) @@ -957,26 +1049,29 @@ class DeltaEncoder extends RLEEncoder { */ copyFrom(decoder, options = {}) { if (options.sumValues) { - throw new RangeError('unsupported options for DeltaEncoder.copyFrom()') + throw new RangeError("unsupported options for DeltaEncoder.copyFrom()") } if (!(decoder instanceof DeltaDecoder)) { - throw new TypeError('incompatible type of decoder') + throw new TypeError("incompatible type of decoder") } let remaining = options.count - if (remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${remaining} values`) + if (remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${remaining} values`) if (remaining === 0 || decoder.done) return // Copy any null values, and the first non-null value, so that appendValue() computes the // difference between the encoder's last value and the decoder's first (absolute) value. - let value = decoder.readValue(), nulls = 0 + let value = decoder.readValue(), + nulls = 0 this.appendValue(value) if (value === null) { nulls = decoder.count + 1 if (remaining !== undefined && remaining < nulls) nulls = remaining decoder.count -= nulls - 1 this.count += nulls - 1 - if (remaining > nulls && decoder.done) throw new RangeError(`cannot copy ${remaining} values`) + if (remaining > nulls && decoder.done) + throw new RangeError(`cannot copy ${remaining} values`) if (remaining === nulls || decoder.done) return // The next value read is certain to be non-null because we're not at the end of the decoder, @@ -989,7 +1084,10 @@ class DeltaEncoder extends RLEEncoder { // value, while subsequent values are relative. Thus, the sum of all of the (non-null) copied // values must equal the absolute value of the final element copied. if (remaining !== undefined) remaining -= nulls + 1 - const { nonNullValues, sum } = super.copyFrom(decoder, {count: remaining, sumValues: true}) + const { nonNullValues, sum } = super.copyFrom(decoder, { + count: remaining, + sumValues: true, + }) if (nonNullValues > 0) { this.absoluteValue = sum decoder.absoluteValue = sum @@ -1003,7 +1101,7 @@ class DeltaEncoder extends RLEEncoder { */ class DeltaDecoder extends RLEDecoder { constructor(buffer) { - super('int', buffer) + super("int", buffer) this.absoluteValue = 0 } @@ -1036,12 +1134,12 @@ class DeltaDecoder extends RLEDecoder { while (numSkip > 0 && !this.done) { if (this.count === 0) this.readRecord() const consume = Math.min(numSkip, this.count) - if (this.state === 'literal') { + if (this.state === "literal") { for (let i = 0; i < consume; i++) { this.lastValue = this.readRawValue() this.absoluteValue += this.lastValue } - } else if (this.state === 'repetition') { + } else if (this.state === "repetition") { this.absoluteValue += consume * this.lastValue } numSkip -= consume @@ -1090,12 +1188,13 @@ class BooleanEncoder extends Encoder { */ copyFrom(decoder, options = {}) { if (!(decoder instanceof BooleanDecoder)) { - throw new TypeError('incompatible type of decoder') + throw new TypeError("incompatible type of decoder") } const { count } = options - let remaining = (typeof count === 'number' ? count : Number.MAX_SAFE_INTEGER) - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) + let remaining = typeof count === "number" ? count : Number.MAX_SAFE_INTEGER + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) if (remaining === 0 || decoder.done) return // Copy one value to bring decoder and encoder state into sync, then finish that value's repetitions @@ -1108,7 +1207,8 @@ class BooleanEncoder extends Encoder { while (remaining > 0 && !decoder.done) { decoder.count = decoder.readUint53() - if (decoder.count === 0) throw new RangeError('Zero-length runs are not allowed') + if (decoder.count === 0) + throw new RangeError("Zero-length runs are not allowed") decoder.lastValue = !decoder.lastValue this.appendUint53(this.count) @@ -1119,7 +1219,8 @@ class BooleanEncoder extends Encoder { remaining -= numCopied } - if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) + if (count && remaining > 0 && decoder.done) + throw new RangeError(`cannot copy ${count} values`) } /** @@ -1151,7 +1252,7 @@ class BooleanDecoder extends Decoder { * position, and true if we are at the end of the buffer. */ get done() { - return (this.count === 0) && (this.offset === this.buf.byteLength) + return this.count === 0 && this.offset === this.buf.byteLength } /** @@ -1174,7 +1275,7 @@ class BooleanDecoder extends Decoder { this.count = this.readUint53() this.lastValue = !this.lastValue if (this.count === 0 && !this.firstRun) { - throw new RangeError('Zero-length runs are not allowed') + throw new RangeError("Zero-length runs are not allowed") } this.firstRun = false } @@ -1190,7 +1291,8 @@ class BooleanDecoder extends Decoder { if (this.count === 0) { this.count = this.readUint53() this.lastValue = !this.lastValue - if (this.count === 0) throw new RangeError('Zero-length runs are not allowed') + if (this.count === 0) + throw new RangeError("Zero-length runs are not allowed") } if (this.count < numSkip) { numSkip -= this.count @@ -1204,6 +1306,16 @@ class BooleanDecoder extends Decoder { } module.exports = { - stringToUtf8, utf8ToString, hexStringToBytes, bytesToHexString, - Encoder, Decoder, RLEEncoder, RLEDecoder, DeltaEncoder, DeltaDecoder, BooleanEncoder, BooleanDecoder + stringToUtf8, + utf8ToString, + hexStringToBytes, + bytesToHexString, + Encoder, + Decoder, + RLEEncoder, + RLEDecoder, + DeltaEncoder, + DeltaDecoder, + BooleanEncoder, + BooleanDecoder, } diff --git a/javascript/test/legacy/sync.js b/javascript/test/legacy/sync.js index 3bb1571d..233c4292 100644 --- a/javascript/test/legacy/sync.js +++ b/javascript/test/legacy/sync.js @@ -17,9 +17,14 @@ */ const Backend = null //require('./backend') -const { hexStringToBytes, bytesToHexString, Encoder, Decoder } = require('./encoding') -const { decodeChangeMeta } = require('./columnar') -const { copyObject } = require('./common') +const { + hexStringToBytes, + bytesToHexString, + Encoder, + Decoder, +} = require("./encoding") +const { decodeChangeMeta } = require("./columnar") +const { copyObject } = require("./common") const HASH_SIZE = 32 // 256 bits = 32 bytes const MESSAGE_TYPE_SYNC = 0x42 // first byte of a sync message, for identification @@ -28,7 +33,8 @@ const PEER_STATE_TYPE = 0x43 // first byte of an encoded peer state, for identif // These constants correspond to a 1% false positive rate. The values can be changed without // breaking compatibility of the network protocol, since the parameters used for a particular // Bloom filter are encoded in the wire format. -const BITS_PER_ENTRY = 10, NUM_PROBES = 7 +const BITS_PER_ENTRY = 10, + NUM_PROBES = 7 /** * A Bloom filter implementation that can be serialised to a byte array for transmission @@ -36,13 +42,15 @@ const BITS_PER_ENTRY = 10, NUM_PROBES = 7 * so this implementation does not perform its own hashing. */ class BloomFilter { - constructor (arg) { + constructor(arg) { if (Array.isArray(arg)) { // arg is an array of SHA256 hashes in hexadecimal encoding this.numEntries = arg.length this.numBitsPerEntry = BITS_PER_ENTRY this.numProbes = NUM_PROBES - this.bits = new Uint8Array(Math.ceil(this.numEntries * this.numBitsPerEntry / 8)) + this.bits = new Uint8Array( + Math.ceil((this.numEntries * this.numBitsPerEntry) / 8) + ) for (let hash of arg) this.addHash(hash) } else if (arg instanceof Uint8Array) { if (arg.byteLength === 0) { @@ -55,10 +63,12 @@ class BloomFilter { this.numEntries = decoder.readUint32() this.numBitsPerEntry = decoder.readUint32() this.numProbes = decoder.readUint32() - this.bits = decoder.readRawBytes(Math.ceil(this.numEntries * this.numBitsPerEntry / 8)) + this.bits = decoder.readRawBytes( + Math.ceil((this.numEntries * this.numBitsPerEntry) / 8) + ) } } else { - throw new TypeError('invalid argument') + throw new TypeError("invalid argument") } } @@ -86,12 +96,32 @@ class BloomFilter { * http://www.ccis.northeastern.edu/home/pete/pub/bloom-filters-verification.pdf */ getProbes(hash) { - const hashBytes = hexStringToBytes(hash), modulo = 8 * this.bits.byteLength - if (hashBytes.byteLength !== 32) throw new RangeError(`Not a 256-bit hash: ${hash}`) + const hashBytes = hexStringToBytes(hash), + modulo = 8 * this.bits.byteLength + if (hashBytes.byteLength !== 32) + throw new RangeError(`Not a 256-bit hash: ${hash}`) // on the next three lines, the right shift means interpret value as unsigned - let x = ((hashBytes[0] | hashBytes[1] << 8 | hashBytes[2] << 16 | hashBytes[3] << 24) >>> 0) % modulo - let y = ((hashBytes[4] | hashBytes[5] << 8 | hashBytes[6] << 16 | hashBytes[7] << 24) >>> 0) % modulo - let z = ((hashBytes[8] | hashBytes[9] << 8 | hashBytes[10] << 16 | hashBytes[11] << 24) >>> 0) % modulo + let x = + ((hashBytes[0] | + (hashBytes[1] << 8) | + (hashBytes[2] << 16) | + (hashBytes[3] << 24)) >>> + 0) % + modulo + let y = + ((hashBytes[4] | + (hashBytes[5] << 8) | + (hashBytes[6] << 16) | + (hashBytes[7] << 24)) >>> + 0) % + modulo + let z = + ((hashBytes[8] | + (hashBytes[9] << 8) | + (hashBytes[10] << 16) | + (hashBytes[11] << 24)) >>> + 0) % + modulo const probes = [x] for (let i = 1; i < this.numProbes; i++) { x = (x + y) % modulo @@ -128,12 +158,14 @@ class BloomFilter { * Encodes a sorted array of SHA-256 hashes (as hexadecimal strings) into a byte array. */ function encodeHashes(encoder, hashes) { - if (!Array.isArray(hashes)) throw new TypeError('hashes must be an array') + if (!Array.isArray(hashes)) throw new TypeError("hashes must be an array") encoder.appendUint32(hashes.length) for (let i = 0; i < hashes.length; i++) { - if (i > 0 && hashes[i - 1] >= hashes[i]) throw new RangeError('hashes must be sorted') + if (i > 0 && hashes[i - 1] >= hashes[i]) + throw new RangeError("hashes must be sorted") const bytes = hexStringToBytes(hashes[i]) - if (bytes.byteLength !== HASH_SIZE) throw new TypeError('heads hashes must be 256 bits') + if (bytes.byteLength !== HASH_SIZE) + throw new TypeError("heads hashes must be 256 bits") encoder.appendRawBytes(bytes) } } @@ -143,7 +175,8 @@ function encodeHashes(encoder, hashes) { * array of hex strings. */ function decodeHashes(decoder) { - let length = decoder.readUint32(), hashes = [] + let length = decoder.readUint32(), + hashes = [] for (let i = 0; i < length; i++) { hashes.push(bytesToHexString(decoder.readRawBytes(HASH_SIZE))) } @@ -183,11 +216,11 @@ function decodeSyncMessage(bytes) { const heads = decodeHashes(decoder) const need = decodeHashes(decoder) const haveCount = decoder.readUint32() - let message = {heads, need, have: [], changes: []} + let message = { heads, need, have: [], changes: [] } for (let i = 0; i < haveCount; i++) { const lastSync = decodeHashes(decoder) const bloom = decoder.readPrefixedBytes(decoder) - message.have.push({lastSync, bloom}) + message.have.push({ lastSync, bloom }) } const changeCount = decoder.readUint32() for (let i = 0; i < changeCount; i++) { @@ -234,7 +267,7 @@ function decodeSyncState(bytes) { function makeBloomFilter(backend, lastSync) { const newChanges = Backend.getChanges(backend, lastSync) const hashes = newChanges.map(change => decodeChangeMeta(change, true).hash) - return {lastSync, bloom: new BloomFilter(hashes).bytes} + return { lastSync, bloom: new BloomFilter(hashes).bytes } } /** @@ -245,20 +278,26 @@ function makeBloomFilter(backend, lastSync) { */ function getChangesToSend(backend, have, need) { if (have.length === 0) { - return need.map(hash => Backend.getChangeByHash(backend, hash)).filter(change => change !== undefined) + return need + .map(hash => Backend.getChangeByHash(backend, hash)) + .filter(change => change !== undefined) } - let lastSyncHashes = {}, bloomFilters = [] + let lastSyncHashes = {}, + bloomFilters = [] for (let h of have) { for (let hash of h.lastSync) lastSyncHashes[hash] = true bloomFilters.push(new BloomFilter(h.bloom)) } // Get all changes that were added since the last sync - const changes = Backend.getChanges(backend, Object.keys(lastSyncHashes)) - .map(change => decodeChangeMeta(change, true)) + const changes = Backend.getChanges(backend, Object.keys(lastSyncHashes)).map( + change => decodeChangeMeta(change, true) + ) - let changeHashes = {}, dependents = {}, hashesToSend = {} + let changeHashes = {}, + dependents = {}, + hashesToSend = {} for (let change of changes) { changeHashes[change.hash] = true @@ -292,7 +331,8 @@ function getChangesToSend(backend, have, need) { let changesToSend = [] for (let hash of need) { hashesToSend[hash] = true - if (!changeHashes[hash]) { // Change is not among those returned by getMissingChanges()? + if (!changeHashes[hash]) { + // Change is not among those returned by getMissingChanges()? const change = Backend.getChangeByHash(backend, hash) if (change) changesToSend.push(change) } @@ -317,7 +357,7 @@ function initSyncState() { } function compareArrays(a, b) { - return (a.length === b.length) && a.every((v, i) => v === b[i]) + return a.length === b.length && a.every((v, i) => v === b[i]) } /** @@ -329,10 +369,19 @@ function generateSyncMessage(backend, syncState) { throw new Error("generateSyncMessage called with no Automerge document") } if (!syncState) { - throw new Error("generateSyncMessage requires a syncState, which can be created with initSyncState()") + throw new Error( + "generateSyncMessage requires a syncState, which can be created with initSyncState()" + ) } - let { sharedHeads, lastSentHeads, theirHeads, theirNeed, theirHave, sentHashes } = syncState + let { + sharedHeads, + lastSentHeads, + theirHeads, + theirNeed, + theirHave, + sentHashes, + } = syncState const ourHeads = Backend.getHeads(backend) // Hashes to explicitly request from the remote peer: any missing dependencies of unapplied @@ -356,18 +405,28 @@ function generateSyncMessage(backend, syncState) { const lastSync = theirHave[0].lastSync if (!lastSync.every(hash => Backend.getChangeByHash(backend, hash))) { // we need to queue them to send us a fresh sync message, the one they sent is uninteligible so we don't know what they need - const resetMsg = {heads: ourHeads, need: [], have: [{ lastSync: [], bloom: new Uint8Array(0) }], changes: []} + const resetMsg = { + heads: ourHeads, + need: [], + have: [{ lastSync: [], bloom: new Uint8Array(0) }], + changes: [], + } return [syncState, encodeSyncMessage(resetMsg)] } } // XXX: we should limit ourselves to only sending a subset of all the messages, probably limited by a total message size // these changes should ideally be RLE encoded but we haven't implemented that yet. - let changesToSend = Array.isArray(theirHave) && Array.isArray(theirNeed) ? getChangesToSend(backend, theirHave, theirNeed) : [] + let changesToSend = + Array.isArray(theirHave) && Array.isArray(theirNeed) + ? getChangesToSend(backend, theirHave, theirNeed) + : [] // If the heads are equal, we're in sync and don't need to do anything further - const headsUnchanged = Array.isArray(lastSentHeads) && compareArrays(ourHeads, lastSentHeads) - const headsEqual = Array.isArray(theirHeads) && compareArrays(ourHeads, theirHeads) + const headsUnchanged = + Array.isArray(lastSentHeads) && compareArrays(ourHeads, lastSentHeads) + const headsEqual = + Array.isArray(theirHeads) && compareArrays(ourHeads, theirHeads) if (headsUnchanged && headsEqual && changesToSend.length === 0) { // no need to send a sync message if we know we're synced! return [syncState, null] @@ -375,12 +434,19 @@ function generateSyncMessage(backend, syncState) { // TODO: this recomputes the SHA-256 hash of each change; we should restructure this to avoid the // unnecessary recomputation - changesToSend = changesToSend.filter(change => !sentHashes[decodeChangeMeta(change, true).hash]) + changesToSend = changesToSend.filter( + change => !sentHashes[decodeChangeMeta(change, true).hash] + ) // Regular response to a sync message: send any changes that the other node // doesn't have. We leave the "have" field empty because the previous message // generated by `syncStart` already indicated what changes we have. - const syncMessage = {heads: ourHeads, have: ourHave, need: ourNeed, changes: changesToSend} + const syncMessage = { + heads: ourHeads, + have: ourHave, + need: ourNeed, + changes: changesToSend, + } if (changesToSend.length > 0) { sentHashes = copyObject(sentHashes) for (const change of changesToSend) { @@ -388,7 +454,10 @@ function generateSyncMessage(backend, syncState) { } } - syncState = Object.assign({}, syncState, {lastSentHeads: ourHeads, sentHashes}) + syncState = Object.assign({}, syncState, { + lastSentHeads: ourHeads, + sentHashes, + }) return [syncState, encodeSyncMessage(syncMessage)] } @@ -406,13 +475,14 @@ function generateSyncMessage(backend, syncState) { * another peer, that means that peer had those changes, and therefore we now both know about them. */ function advanceHeads(myOldHeads, myNewHeads, ourOldSharedHeads) { - const newHeads = myNewHeads.filter((head) => !myOldHeads.includes(head)) - const commonHeads = ourOldSharedHeads.filter((head) => myNewHeads.includes(head)) + const newHeads = myNewHeads.filter(head => !myOldHeads.includes(head)) + const commonHeads = ourOldSharedHeads.filter(head => + myNewHeads.includes(head) + ) const advancedHeads = [...new Set([...newHeads, ...commonHeads])].sort() return advancedHeads } - /** * Given a backend, a message message and the state of our peer, apply any changes, update what * we believe about the peer, and (if there were applied changes) produce a patch for the frontend @@ -422,10 +492,13 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { throw new Error("generateSyncMessage called with no Automerge document") } if (!oldSyncState) { - throw new Error("generateSyncMessage requires a syncState, which can be created with initSyncState()") + throw new Error( + "generateSyncMessage requires a syncState, which can be created with initSyncState()" + ) } - let { sharedHeads, lastSentHeads, sentHashes } = oldSyncState, patch = null + let { sharedHeads, lastSentHeads, sentHashes } = oldSyncState, + patch = null const message = decodeSyncMessage(binaryMessage) const beforeHeads = Backend.getHeads(backend) @@ -434,18 +507,27 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { // changes without applying them. The set of changes may also be incomplete if the sender decided // to break a large set of changes into chunks. if (message.changes.length > 0) { - [backend, patch] = Backend.applyChanges(backend, message.changes) - sharedHeads = advanceHeads(beforeHeads, Backend.getHeads(backend), sharedHeads) + ;[backend, patch] = Backend.applyChanges(backend, message.changes) + sharedHeads = advanceHeads( + beforeHeads, + Backend.getHeads(backend), + sharedHeads + ) } // If heads are equal, indicate we don't need to send a response message - if (message.changes.length === 0 && compareArrays(message.heads, beforeHeads)) { + if ( + message.changes.length === 0 && + compareArrays(message.heads, beforeHeads) + ) { lastSentHeads = message.heads } // If all of the remote heads are known to us, that means either our heads are equal, or we are // ahead of the remote peer. In this case, take the remote heads to be our shared heads. - const knownHeads = message.heads.filter(head => Backend.getChangeByHash(backend, head)) + const knownHeads = message.heads.filter(head => + Backend.getChangeByHash(backend, head) + ) if (knownHeads.length === message.heads.length) { sharedHeads = message.heads // If the remote peer has lost all its data, reset our state to perform a full resync @@ -467,14 +549,18 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { theirHave: message.have, // the information we need to calculate the changes they need theirHeads: message.heads, theirNeed: message.need, - sentHashes + sentHashes, } return [backend, syncState, patch] } module.exports = { - receiveSyncMessage, generateSyncMessage, - encodeSyncMessage, decodeSyncMessage, - initSyncState, encodeSyncState, decodeSyncState, - BloomFilter // BloomFilter is a private API, exported only for testing purposes + receiveSyncMessage, + generateSyncMessage, + encodeSyncMessage, + decodeSyncMessage, + initSyncState, + encodeSyncState, + decodeSyncState, + BloomFilter, // BloomFilter is a private API, exported only for testing purposes } diff --git a/javascript/test/legacy_tests.ts b/javascript/test/legacy_tests.ts index c5c88275..477a5545 100644 --- a/javascript/test/legacy_tests.ts +++ b/javascript/test/legacy_tests.ts @@ -1,7 +1,7 @@ -import * as assert from 'assert' -import * as Automerge from '../src' -import { assertEqualsOneOf } from './helpers' -import { decodeChange } from './legacy/columnar' +import * as assert from "assert" +import * as Automerge from "../src" +import { assertEqualsOneOf } from "./helpers" +import { decodeChange } from "./legacy/columnar" const UUID_PATTERN = /^[0-9a-f]{32}$/ const OPID_PATTERN = /^[0-9]+@([0-9a-f][0-9a-f])*$/ @@ -13,61 +13,60 @@ const OPID_PATTERN = /^[0-9]+@([0-9a-f][0-9a-f])*$/ // TODO - on-pass load() & reconstruct change from opset // TODO - micro-patches (needed for fully hydrated object in js) // TODO - valueAt(heads) / GC -// +// // AUTOMERGE UNSUPPORTED // // TODO - patchCallback - -describe('Automerge', () => { - describe('initialization ', () => { - it('should initially be an empty map', () => { +describe("Automerge", () => { + describe("initialization ", () => { + it("should initially be an empty map", () => { const doc = Automerge.init() assert.deepStrictEqual(doc, {}) }) - it('should allow instantiating from an existing object', () => { + it("should allow instantiating from an existing object", () => { const initialState = { birds: { wrens: 3, magpies: 4 } } const doc = Automerge.from(initialState) assert.deepStrictEqual(doc, initialState) }) - it('should allow merging of an object initialized with `from`', () => { + it("should allow merging of an object initialized with `from`", () => { let doc1 = Automerge.from({ cards: [] }) let doc2 = Automerge.merge(Automerge.init(), doc1) assert.deepStrictEqual(doc2, { cards: [] }) }) - it('should allow passing an actorId when instantiating from an existing object', () => { - const actorId = '1234' + it("should allow passing an actorId when instantiating from an existing object", () => { + const actorId = "1234" let doc = Automerge.from({ foo: 1 }, actorId) - assert.strictEqual(Automerge.getActorId(doc), '1234') + assert.strictEqual(Automerge.getActorId(doc), "1234") }) - it('accepts an empty object as initial state', () => { + it("accepts an empty object as initial state", () => { const doc = Automerge.from({}) assert.deepStrictEqual(doc, {}) }) - it('accepts an array as initial state, but converts it to an object', () => { + it("accepts an array as initial state, but converts it to an object", () => { // @ts-ignore - const doc = Automerge.from(['a', 'b', 'c']) - assert.deepStrictEqual(doc, { '0': 'a', '1': 'b', '2': 'c' }) + const doc = Automerge.from(["a", "b", "c"]) + assert.deepStrictEqual(doc, { "0": "a", "1": "b", "2": "c" }) }) - it('accepts strings as initial values, but treats them as an array of characters', () => { + it("accepts strings as initial values, but treats them as an array of characters", () => { // @ts-ignore - const doc = Automerge.from('abc') - assert.deepStrictEqual(doc, { '0': 'a', '1': 'b', '2': 'c' }) + const doc = Automerge.from("abc") + assert.deepStrictEqual(doc, { "0": "a", "1": "b", "2": "c" }) }) - it('ignores numbers provided as initial values', () => { + it("ignores numbers provided as initial values", () => { // @ts-ignore const doc = Automerge.from(123) assert.deepStrictEqual(doc, {}) }) - it('ignores booleans provided as initial values', () => { + it("ignores booleans provided as initial values", () => { // @ts-ignore const doc1 = Automerge.from(false) assert.deepStrictEqual(doc1, {}) @@ -77,550 +76,701 @@ describe('Automerge', () => { }) }) - describe('sequential use', () => { + describe("sequential use", () => { let s1: Automerge.Doc, s2: Automerge.Doc beforeEach(() => { s1 = Automerge.init("aabbcc") }) - it('should not mutate objects', () => { - s2 = Automerge.change(s1, doc => doc.foo = 'bar') + it("should not mutate objects", () => { + s2 = Automerge.change(s1, doc => (doc.foo = "bar")) assert.strictEqual(s1.foo, undefined) - assert.strictEqual(s2.foo, 'bar') + assert.strictEqual(s2.foo, "bar") }) - it('changes should be retrievable', () => { + it("changes should be retrievable", () => { const change1 = Automerge.getLastLocalChange(s1) - s2 = Automerge.change(s1, doc => doc.foo = 'bar') + s2 = Automerge.change(s1, doc => (doc.foo = "bar")) const change2 = Automerge.getLastLocalChange(s2) assert.strictEqual(change1, undefined) const change = Automerge.decodeChange(change2!) assert.deepStrictEqual(change, { - actor: change.actor, deps: [], seq: 1, startOp: 1, - hash: change.hash, message: null, time: change.time, + actor: change.actor, + deps: [], + seq: 1, + startOp: 1, + hash: change.hash, + message: null, + time: change.time, ops: [ - {obj: '_root', key: 'foo', action: 'makeText', pred: []}, - {action: 'set', elemId: '_head', insert: true, obj: '1@aabbcc', pred: [], value: 'b' }, - {action: 'set', elemId: '2@aabbcc', insert: true, obj: '1@aabbcc', pred: [], value: 'a' }, - {action: 'set', elemId: '3@aabbcc', insert: true, obj: '1@aabbcc', pred: [], value: 'r' }] + { obj: "_root", key: "foo", action: "makeText", pred: [] }, + { + action: "set", + elemId: "_head", + insert: true, + obj: "1@aabbcc", + pred: [], + value: "b", + }, + { + action: "set", + elemId: "2@aabbcc", + insert: true, + obj: "1@aabbcc", + pred: [], + value: "a", + }, + { + action: "set", + elemId: "3@aabbcc", + insert: true, + obj: "1@aabbcc", + pred: [], + value: "r", + }, + ], }) }) - it('should not register any conflicts on repeated assignment', () => { - assert.strictEqual(Automerge.getConflicts(s1, 'foo'), undefined) - s1 = Automerge.change(s1, 'change', doc => doc.foo = 'one') - assert.strictEqual(Automerge.getConflicts(s1, 'foo'), undefined) - s1 = Automerge.change(s1, 'change', doc => doc.foo = 'two') - assert.strictEqual(Automerge.getConflicts(s1, 'foo'), undefined) + it("should not register any conflicts on repeated assignment", () => { + assert.strictEqual(Automerge.getConflicts(s1, "foo"), undefined) + s1 = Automerge.change(s1, "change", doc => (doc.foo = "one")) + assert.strictEqual(Automerge.getConflicts(s1, "foo"), undefined) + s1 = Automerge.change(s1, "change", doc => (doc.foo = "two")) + assert.strictEqual(Automerge.getConflicts(s1, "foo"), undefined) }) - describe('changes', () => { - it('should group several changes', () => { - s2 = Automerge.change(s1, 'change message', doc => { - doc.first = 'one' - assert.strictEqual(doc.first, 'one') - doc.second = 'two' + describe("changes", () => { + it("should group several changes", () => { + s2 = Automerge.change(s1, "change message", doc => { + doc.first = "one" + assert.strictEqual(doc.first, "one") + doc.second = "two" assert.deepStrictEqual(doc, { - first: 'one', second: 'two' + first: "one", + second: "two", }) }) assert.deepStrictEqual(s1, {}) - assert.deepStrictEqual(s2, {first: 'one', second: 'two'}) + assert.deepStrictEqual(s2, { first: "one", second: "two" }) }) - it('should freeze objects if desired', () => { - s1 = Automerge.init({freeze: true}) - s2 = Automerge.change(s1, doc => doc.foo = 'bar') + it("should freeze objects if desired", () => { + s1 = Automerge.init({ freeze: true }) + s2 = Automerge.change(s1, doc => (doc.foo = "bar")) try { // @ts-ignore - s2.foo = 'lemon' - } catch (e) { } - assert.strictEqual(s2.foo, 'bar') + s2.foo = "lemon" + } catch (e) {} + assert.strictEqual(s2.foo, "bar") let deleted = false try { // @ts-ignore deleted = delete s2.foo - } catch (e) { } - assert.strictEqual(s2.foo, 'bar') + } catch (e) {} + assert.strictEqual(s2.foo, "bar") assert.strictEqual(deleted, false) Automerge.change(s2, () => { try { // @ts-ignore - s2.foo = 'lemon' - } catch (e) { } - assert.strictEqual(s2.foo, 'bar') + s2.foo = "lemon" + } catch (e) {} + assert.strictEqual(s2.foo, "bar") }) - assert.throws(() => { Object.assign(s2, {x: 4}) }) + assert.throws(() => { + Object.assign(s2, { x: 4 }) + }) assert.strictEqual(s2.x, undefined) }) - it('should allow repeated reading and writing of values', () => { - s2 = Automerge.change(s1, 'change message', doc => { - doc.value = 'a' - assert.strictEqual(doc.value, 'a') - doc.value = 'b' - doc.value = 'c' - assert.strictEqual(doc.value, 'c') + it("should allow repeated reading and writing of values", () => { + s2 = Automerge.change(s1, "change message", doc => { + doc.value = "a" + assert.strictEqual(doc.value, "a") + doc.value = "b" + doc.value = "c" + assert.strictEqual(doc.value, "c") }) assert.deepStrictEqual(s1, {}) - assert.deepStrictEqual(s2, {value: 'c'}) + assert.deepStrictEqual(s2, { value: "c" }) }) - it('should not record conflicts when writing the same field several times within one change', () => { - s1 = Automerge.change(s1, 'change message', doc => { - doc.value = 'a' - doc.value = 'b' - doc.value = 'c' + it("should not record conflicts when writing the same field several times within one change", () => { + s1 = Automerge.change(s1, "change message", doc => { + doc.value = "a" + doc.value = "b" + doc.value = "c" }) - assert.strictEqual(s1.value, 'c') - assert.strictEqual(Automerge.getConflicts(s1, 'value'), undefined) + assert.strictEqual(s1.value, "c") + assert.strictEqual(Automerge.getConflicts(s1, "value"), undefined) }) - it('should return the unchanged state object if nothing changed', () => { + it("should return the unchanged state object if nothing changed", () => { s2 = Automerge.change(s1, () => {}) assert.strictEqual(s2, s1) }) - it('should ignore field updates that write the existing value', () => { - s1 = Automerge.change(s1, doc => doc.field = 123) - s2 = Automerge.change(s1, doc => doc.field = 123) + it("should ignore field updates that write the existing value", () => { + s1 = Automerge.change(s1, doc => (doc.field = 123)) + s2 = Automerge.change(s1, doc => (doc.field = 123)) assert.strictEqual(s2, s1) }) - it('should not ignore field updates that resolve a conflict', () => { + it("should not ignore field updates that resolve a conflict", () => { s2 = Automerge.merge(Automerge.init(), s1) - s1 = Automerge.change(s1, doc => doc.field = 123) - s2 = Automerge.change(s2, doc => doc.field = 321) + s1 = Automerge.change(s1, doc => (doc.field = 123)) + s2 = Automerge.change(s2, doc => (doc.field = 321)) s1 = Automerge.merge(s1, s2) - assert.strictEqual(Object.keys(Automerge.getConflicts(s1, 'field')!).length, 2) - const resolved = Automerge.change(s1, doc => doc.field = s1.field) + assert.strictEqual( + Object.keys(Automerge.getConflicts(s1, "field")!).length, + 2 + ) + const resolved = Automerge.change(s1, doc => (doc.field = s1.field)) assert.notStrictEqual(resolved, s1) - assert.deepStrictEqual(resolved, {field: s1.field}) - assert.strictEqual(Automerge.getConflicts(resolved, 'field'), undefined) + assert.deepStrictEqual(resolved, { field: s1.field }) + assert.strictEqual(Automerge.getConflicts(resolved, "field"), undefined) }) - it('should ignore list element updates that write the existing value', () => { - s1 = Automerge.change(s1, doc => doc.list = [123]) - s2 = Automerge.change(s1, doc => doc.list[0] = 123) + it("should ignore list element updates that write the existing value", () => { + s1 = Automerge.change(s1, doc => (doc.list = [123])) + s2 = Automerge.change(s1, doc => (doc.list[0] = 123)) assert.strictEqual(s2, s1) }) - it('should not ignore list element updates that resolve a conflict', () => { - s1 = Automerge.change(s1, doc => doc.list = [1]) + it("should not ignore list element updates that resolve a conflict", () => { + s1 = Automerge.change(s1, doc => (doc.list = [1])) s2 = Automerge.merge(Automerge.init(), s1) - s1 = Automerge.change(s1, doc => doc.list[0] = 123) - s2 = Automerge.change(s2, doc => doc.list[0] = 321) + s1 = Automerge.change(s1, doc => (doc.list[0] = 123)) + s2 = Automerge.change(s2, doc => (doc.list[0] = 321)) s1 = Automerge.merge(s1, s2) assert.deepStrictEqual(Automerge.getConflicts(s1.list, 0), { [`3@${Automerge.getActorId(s1)}`]: 123, - [`3@${Automerge.getActorId(s2)}`]: 321 + [`3@${Automerge.getActorId(s2)}`]: 321, }) - const resolved = Automerge.change(s1, doc => doc.list[0] = s1.list[0]) + const resolved = Automerge.change(s1, doc => (doc.list[0] = s1.list[0])) assert.deepStrictEqual(resolved, s1) assert.notStrictEqual(resolved, s1) assert.strictEqual(Automerge.getConflicts(resolved.list, 0), undefined) }) - it('should sanity-check arguments', () => { - s1 = Automerge.change(s1, doc => doc.nested = {}) - // @ts-ignore - assert.throws(() => { Automerge.change({}, doc => doc.foo = 'bar') }, /must be the document root/) - // @ts-ignore - assert.throws(() => { Automerge.change(s1.nested, doc => doc.foo = 'bar') }, /must be the document root/) + it("should sanity-check arguments", () => { + s1 = Automerge.change(s1, doc => (doc.nested = {})) + assert.throws(() => { + // @ts-ignore + Automerge.change({}, doc => (doc.foo = "bar")) + }, /must be the document root/) + assert.throws(() => { + // @ts-ignore + Automerge.change(s1.nested, doc => (doc.foo = "bar")) + }, /must be the document root/) }) - it('should not allow nested change blocks', () => { + it("should not allow nested change blocks", () => { assert.throws(() => { Automerge.change(s1, doc1 => { Automerge.change(doc1, doc2 => { // @ts-ignore - doc2.foo = 'bar' + doc2.foo = "bar" }) }) }, /Calls to Automerge.change cannot be nested/) assert.throws(() => { s1 = Automerge.change(s1, doc1 => { - s2 = Automerge.change(s1, doc2 => doc2.two = 2) + s2 = Automerge.change(s1, doc2 => (doc2.two = 2)) doc1.one = 1 }) }, /Attempting to change an outdated document/) }) - it('should not allow the same base document to be used for multiple changes', () => { + it("should not allow the same base document to be used for multiple changes", () => { assert.throws(() => { - Automerge.change(s1, doc => doc.one = 1) - Automerge.change(s1, doc => doc.two = 2) + Automerge.change(s1, doc => (doc.one = 1)) + Automerge.change(s1, doc => (doc.two = 2)) }, /Attempting to change an outdated document/) }) - it('should allow a document to be cloned', () => { - s1 = Automerge.change(s1, doc => doc.zero = 0) + it("should allow a document to be cloned", () => { + s1 = Automerge.change(s1, doc => (doc.zero = 0)) s2 = Automerge.clone(s1) - s1 = Automerge.change(s1, doc => doc.one = 1) - s2 = Automerge.change(s2, doc => doc.two = 2) - assert.deepStrictEqual(s1, {zero: 0, one: 1}) - assert.deepStrictEqual(s2, {zero: 0, two: 2}) + s1 = Automerge.change(s1, doc => (doc.one = 1)) + s2 = Automerge.change(s2, doc => (doc.two = 2)) + assert.deepStrictEqual(s1, { zero: 0, one: 1 }) + assert.deepStrictEqual(s2, { zero: 0, two: 2 }) Automerge.free(s1) Automerge.free(s2) }) - it('should work with Object.assign merges', () => { + it("should work with Object.assign merges", () => { s1 = Automerge.change(s1, doc1 => { - doc1.stuff = {foo: 'bar', baz: 'blur'} + doc1.stuff = { foo: "bar", baz: "blur" } }) s1 = Automerge.change(s1, doc1 => { - doc1.stuff = Object.assign({}, doc1.stuff, {baz: 'updated!'}) + doc1.stuff = Object.assign({}, doc1.stuff, { baz: "updated!" }) }) - assert.deepStrictEqual(s1, {stuff: {foo: 'bar', baz: 'updated!'}}) + assert.deepStrictEqual(s1, { stuff: { foo: "bar", baz: "updated!" } }) }) - it('should support Date objects in maps', () => { + it("should support Date objects in maps", () => { const now = new Date() - s1 = Automerge.change(s1, doc => doc.now = now) + s1 = Automerge.change(s1, doc => (doc.now = now)) let changes = Automerge.getAllChanges(s1) ;[s2] = Automerge.applyChanges(Automerge.init(), changes) assert.strictEqual(s2.now instanceof Date, true) assert.strictEqual(s2.now.getTime(), now.getTime()) }) - it('should support Date objects in lists', () => { + it("should support Date objects in lists", () => { const now = new Date() - s1 = Automerge.change(s1, doc => doc.list = [now]) + s1 = Automerge.change(s1, doc => (doc.list = [now])) let changes = Automerge.getAllChanges(s1) ;[s2] = Automerge.applyChanges(Automerge.init(), changes) assert.strictEqual(s2.list[0] instanceof Date, true) assert.strictEqual(s2.list[0].getTime(), now.getTime()) }) - it('should call patchCallback if supplied', () => { - const callbacks: Array<{patches: Array, before: Automerge.Doc, after: Automerge.Doc}> = [] - const s2 = Automerge.change(s1, { - patchCallback: (patches, before, after) => callbacks.push({patches, before, after}) - }, doc => { - doc.birds = ['Goldfinch'] - }) + it("should call patchCallback if supplied", () => { + const callbacks: Array<{ + patches: Array + before: Automerge.Doc + after: Automerge.Doc + }> = [] + const s2 = Automerge.change( + s1, + { + patchCallback: (patches, before, after) => + callbacks.push({ patches, before, after }), + }, + doc => { + doc.birds = ["Goldfinch"] + } + ) assert.strictEqual(callbacks.length, 1) - assert.deepStrictEqual(callbacks[0].patches[0], { action: "put", path: ["birds"], value: [] }) - assert.deepStrictEqual(callbacks[0].patches[1], { action: "insert", path: ["birds",0], values: [""] }) - assert.deepStrictEqual(callbacks[0].patches[2], { action: "splice", path: ["birds",0, 0], value: "Goldfinch" }) + assert.deepStrictEqual(callbacks[0].patches[0], { + action: "put", + path: ["birds"], + value: [], + }) + assert.deepStrictEqual(callbacks[0].patches[1], { + action: "insert", + path: ["birds", 0], + values: [""], + }) + assert.deepStrictEqual(callbacks[0].patches[2], { + action: "splice", + path: ["birds", 0, 0], + value: "Goldfinch", + }) assert.strictEqual(callbacks[0].before, s1) assert.strictEqual(callbacks[0].after, s2) }) - it('should call a patchCallback set up on document initialisation', () => { - const callbacks: Array<{patches: Array, before: Automerge.Doc, after: Automerge.Doc}> = [] + it("should call a patchCallback set up on document initialisation", () => { + const callbacks: Array<{ + patches: Array + before: Automerge.Doc + after: Automerge.Doc + }> = [] s1 = Automerge.init({ - patchCallback: (patches, before, after) => callbacks.push({patches, before, after }) + patchCallback: (patches, before, after) => + callbacks.push({ patches, before, after }), }) - const s2 = Automerge.change(s1, doc => doc.bird = 'Goldfinch') + const s2 = Automerge.change(s1, doc => (doc.bird = "Goldfinch")) assert.strictEqual(callbacks.length, 1) assert.deepStrictEqual(callbacks[0].patches[0], { - action: "put", path: ["bird"], value: "" + action: "put", + path: ["bird"], + value: "", }) assert.deepStrictEqual(callbacks[0].patches[1], { - action: "splice", path: ["bird", 0], value: "Goldfinch" + action: "splice", + path: ["bird", 0], + value: "Goldfinch", }) assert.strictEqual(callbacks[0].before, s1) assert.strictEqual(callbacks[0].after, s2) }) }) - describe('emptyChange()', () => { - it('should append an empty change to the history', () => { - s1 = Automerge.change(s1, 'first change', doc => doc.field = 123) - s2 = Automerge.emptyChange(s1, 'empty change') + describe("emptyChange()", () => { + it("should append an empty change to the history", () => { + s1 = Automerge.change(s1, "first change", doc => (doc.field = 123)) + s2 = Automerge.emptyChange(s1, "empty change") assert.notStrictEqual(s2, s1) assert.deepStrictEqual(s2, s1) - assert.deepStrictEqual(Automerge.getHistory(s2).map(state => state.change.message), ['first change', 'empty change']) + assert.deepStrictEqual( + Automerge.getHistory(s2).map(state => state.change.message), + ["first change", "empty change"] + ) }) - it('should reference dependencies', () => { - s1 = Automerge.change(s1, doc => doc.field = 123) + it("should reference dependencies", () => { + s1 = Automerge.change(s1, doc => (doc.field = 123)) s2 = Automerge.merge(Automerge.init(), s1) - s2 = Automerge.change(s2, doc => doc.other = 'hello') + s2 = Automerge.change(s2, doc => (doc.other = "hello")) s1 = Automerge.emptyChange(Automerge.merge(s1, s2)) const history = Automerge.getHistory(s1) const emptyChange = history[2].change - assert.deepStrictEqual(emptyChange.deps, [history[0].change.hash, history[1].change.hash].sort()) + assert.deepStrictEqual( + emptyChange.deps, + [history[0].change.hash, history[1].change.hash].sort() + ) assert.deepStrictEqual(emptyChange.ops, []) }) }) - describe('root object', () => { - it('should handle single-property assignment', () => { - s1 = Automerge.change(s1, 'set bar', doc => doc.foo = 'bar') - s1 = Automerge.change(s1, 'set zap', doc => doc.zip = 'zap') - assert.strictEqual(s1.foo, 'bar') - assert.strictEqual(s1.zip, 'zap') - assert.deepStrictEqual(s1, {foo: 'bar', zip: 'zap'}) + describe("root object", () => { + it("should handle single-property assignment", () => { + s1 = Automerge.change(s1, "set bar", doc => (doc.foo = "bar")) + s1 = Automerge.change(s1, "set zap", doc => (doc.zip = "zap")) + assert.strictEqual(s1.foo, "bar") + assert.strictEqual(s1.zip, "zap") + assert.deepStrictEqual(s1, { foo: "bar", zip: "zap" }) }) - it('should allow floating-point values', () => { - s1 = Automerge.change(s1, doc => doc.number = 1589032171.1) + it("should allow floating-point values", () => { + s1 = Automerge.change(s1, doc => (doc.number = 1589032171.1)) assert.strictEqual(s1.number, 1589032171.1) }) - it('should handle multi-property assignment', () => { - s1 = Automerge.change(s1, 'multi-assign', doc => { - Object.assign(doc, {foo: 'bar', answer: 42}) + it("should handle multi-property assignment", () => { + s1 = Automerge.change(s1, "multi-assign", doc => { + Object.assign(doc, { foo: "bar", answer: 42 }) }) - assert.strictEqual(s1.foo, 'bar') + assert.strictEqual(s1.foo, "bar") assert.strictEqual(s1.answer, 42) - assert.deepStrictEqual(s1, {foo: 'bar', answer: 42}) + assert.deepStrictEqual(s1, { foo: "bar", answer: 42 }) }) - it('should handle root property deletion', () => { - s1 = Automerge.change(s1, 'set foo', doc => { doc.foo = 'bar'; doc.something = null }) - s1 = Automerge.change(s1, 'del foo', doc => { delete doc.foo }) + it("should handle root property deletion", () => { + s1 = Automerge.change(s1, "set foo", doc => { + doc.foo = "bar" + doc.something = null + }) + s1 = Automerge.change(s1, "del foo", doc => { + delete doc.foo + }) assert.strictEqual(s1.foo, undefined) assert.strictEqual(s1.something, null) - assert.deepStrictEqual(s1, {something: null}) + assert.deepStrictEqual(s1, { something: null }) }) - it('should follow JS delete behavior', () => { - s1 = Automerge.change(s1, 'set foo', doc => { doc.foo = 'bar' }) + it("should follow JS delete behavior", () => { + s1 = Automerge.change(s1, "set foo", doc => { + doc.foo = "bar" + }) let deleted - s1 = Automerge.change(s1, 'del foo', doc => { + s1 = Automerge.change(s1, "del foo", doc => { deleted = delete doc.foo }) assert.strictEqual(deleted, true) let deleted2 assert.doesNotThrow(() => { - s1 = Automerge.change(s1, 'del baz', doc => { + s1 = Automerge.change(s1, "del baz", doc => { deleted2 = delete doc.baz }) }) assert.strictEqual(deleted2, true) }) - it('should allow the type of a property to be changed', () => { - s1 = Automerge.change(s1, 'set number', doc => doc.prop = 123) + it("should allow the type of a property to be changed", () => { + s1 = Automerge.change(s1, "set number", doc => (doc.prop = 123)) assert.strictEqual(s1.prop, 123) - s1 = Automerge.change(s1, 'set string', doc => doc.prop = '123') - assert.strictEqual(s1.prop, '123') - s1 = Automerge.change(s1, 'set null', doc => doc.prop = null) + s1 = Automerge.change(s1, "set string", doc => (doc.prop = "123")) + assert.strictEqual(s1.prop, "123") + s1 = Automerge.change(s1, "set null", doc => (doc.prop = null)) assert.strictEqual(s1.prop, null) - s1 = Automerge.change(s1, 'set bool', doc => doc.prop = true) + s1 = Automerge.change(s1, "set bool", doc => (doc.prop = true)) assert.strictEqual(s1.prop, true) }) - it('should require property names to be valid', () => { + it("should require property names to be valid", () => { assert.throws(() => { - Automerge.change(s1, 'foo', doc => doc[''] = 'x') + Automerge.change(s1, "foo", doc => (doc[""] = "x")) }, /must not be an empty string/) }) - it('should not allow assignment of unsupported datatypes', () => { + it("should not allow assignment of unsupported datatypes", () => { Automerge.change(s1, doc => { - assert.throws(() => { doc.foo = undefined }, /Unsupported type of value: undefined/) - assert.throws(() => { doc.foo = {prop: undefined} }, /Unsupported type of value: undefined/) - assert.throws(() => { doc.foo = () => {} }, /Unsupported type of value: function/) - assert.throws(() => { doc.foo = Symbol('foo') }, /Unsupported type of value: symbol/) + assert.throws(() => { + doc.foo = undefined + }, /Unsupported type of value: undefined/) + assert.throws(() => { + doc.foo = { prop: undefined } + }, /Unsupported type of value: undefined/) + assert.throws(() => { + doc.foo = () => {} + }, /Unsupported type of value: function/) + assert.throws(() => { + doc.foo = Symbol("foo") + }, /Unsupported type of value: symbol/) }) }) }) - describe('nested maps', () => { - it('should assign an objectId to nested maps', () => { - s1 = Automerge.change(s1, doc => { doc.nested = {} }) + describe("nested maps", () => { + it("should assign an objectId to nested maps", () => { + s1 = Automerge.change(s1, doc => { + doc.nested = {} + }) let id = Automerge.getObjectId(s1.nested) - assert.strictEqual(OPID_PATTERN.test(Automerge.getObjectId(s1.nested)!), true) - assert.notEqual(Automerge.getObjectId(s1.nested), '_root') + assert.strictEqual( + OPID_PATTERN.test(Automerge.getObjectId(s1.nested)!), + true + ) + assert.notEqual(Automerge.getObjectId(s1.nested), "_root") }) - it('should handle assignment of a nested property', () => { - s1 = Automerge.change(s1, 'first change', doc => { + it("should handle assignment of a nested property", () => { + s1 = Automerge.change(s1, "first change", doc => { doc.nested = {} - doc.nested.foo = 'bar' + doc.nested.foo = "bar" }) - s1 = Automerge.change(s1, 'second change', doc => { + s1 = Automerge.change(s1, "second change", doc => { doc.nested.one = 1 }) - assert.deepStrictEqual(s1, {nested: {foo: 'bar', one: 1}}) - assert.deepStrictEqual(s1.nested, {foo: 'bar', one: 1}) - assert.strictEqual(s1.nested.foo, 'bar') + assert.deepStrictEqual(s1, { nested: { foo: "bar", one: 1 } }) + assert.deepStrictEqual(s1.nested, { foo: "bar", one: 1 }) + assert.strictEqual(s1.nested.foo, "bar") assert.strictEqual(s1.nested.one, 1) }) - it('should handle assignment of an object literal', () => { + it("should handle assignment of an object literal", () => { s1 = Automerge.change(s1, doc => { - doc.textStyle = {bold: false, fontSize: 12} + doc.textStyle = { bold: false, fontSize: 12 } }) - assert.deepStrictEqual(s1, {textStyle: {bold: false, fontSize: 12}}) - assert.deepStrictEqual(s1.textStyle, {bold: false, fontSize: 12}) + assert.deepStrictEqual(s1, { + textStyle: { bold: false, fontSize: 12 }, + }) + assert.deepStrictEqual(s1.textStyle, { bold: false, fontSize: 12 }) assert.strictEqual(s1.textStyle.bold, false) assert.strictEqual(s1.textStyle.fontSize, 12) }) - it('should handle assignment of multiple nested properties', () => { + it("should handle assignment of multiple nested properties", () => { s1 = Automerge.change(s1, doc => { - doc.textStyle = {bold: false, fontSize: 12} - Object.assign(doc.textStyle, {typeface: 'Optima', fontSize: 14}) + doc.textStyle = { bold: false, fontSize: 12 } + Object.assign(doc.textStyle, { typeface: "Optima", fontSize: 14 }) }) - assert.strictEqual(s1.textStyle.typeface, 'Optima') + assert.strictEqual(s1.textStyle.typeface, "Optima") assert.strictEqual(s1.textStyle.bold, false) assert.strictEqual(s1.textStyle.fontSize, 14) - assert.deepStrictEqual(s1.textStyle, {typeface: 'Optima', bold: false, fontSize: 14}) + assert.deepStrictEqual(s1.textStyle, { + typeface: "Optima", + bold: false, + fontSize: 14, + }) }) - it('should handle arbitrary-depth nesting', () => { + it("should handle arbitrary-depth nesting", () => { s1 = Automerge.change(s1, doc => { - doc.a = {b: {c: {d: {e: {f: {g: 'h'}}}}}} + doc.a = { b: { c: { d: { e: { f: { g: "h" } } } } } } }) s1 = Automerge.change(s1, doc => { - doc.a.b.c.d.e.f.i = 'j' + doc.a.b.c.d.e.f.i = "j" }) - assert.deepStrictEqual(s1, {a: { b: { c: { d: { e: { f: { g: 'h', i: 'j'}}}}}}}) - assert.strictEqual(s1.a.b.c.d.e.f.g, 'h') - assert.strictEqual(s1.a.b.c.d.e.f.i, 'j') + assert.deepStrictEqual(s1, { + a: { b: { c: { d: { e: { f: { g: "h", i: "j" } } } } } }, + }) + assert.strictEqual(s1.a.b.c.d.e.f.g, "h") + assert.strictEqual(s1.a.b.c.d.e.f.i, "j") }) - it('should allow an old object to be replaced with a new one', () => { - s1 = Automerge.change(s1, 'change 1', doc => { - doc.myPet = {species: 'dog', legs: 4, breed: 'dachshund'} + it("should allow an old object to be replaced with a new one", () => { + s1 = Automerge.change(s1, "change 1", doc => { + doc.myPet = { species: "dog", legs: 4, breed: "dachshund" } }) - let s2 = Automerge.change(s1, 'change 2', doc => { - doc.myPet = {species: 'koi', variety: '紅白', colors: {red: true, white: true, black: false}} + let s2 = Automerge.change(s1, "change 2", doc => { + doc.myPet = { + species: "koi", + variety: "紅白", + colors: { red: true, white: true, black: false }, + } }) assert.deepStrictEqual(s1.myPet, { - species: 'dog', legs: 4, breed: 'dachshund' + species: "dog", + legs: 4, + breed: "dachshund", }) - assert.strictEqual(s1.myPet.breed, 'dachshund') + assert.strictEqual(s1.myPet.breed, "dachshund") assert.deepStrictEqual(s2.myPet, { - species: 'koi', variety: '紅白', - colors: {red: true, white: true, black: false} + species: "koi", + variety: "紅白", + colors: { red: true, white: true, black: false }, }) // @ts-ignore assert.strictEqual(s2.myPet.breed, undefined) - assert.strictEqual(s2.myPet.variety, '紅白') + assert.strictEqual(s2.myPet.variety, "紅白") }) - it('should allow fields to be changed between primitive and nested map', () => { - s1 = Automerge.change(s1, doc => doc.color = '#ff7f00') - assert.strictEqual(s1.color, '#ff7f00') - s1 = Automerge.change(s1, doc => doc.color = {red: 255, green: 127, blue: 0}) - assert.deepStrictEqual(s1.color, {red: 255, green: 127, blue: 0}) - s1 = Automerge.change(s1, doc => doc.color = '#ff7f00') - assert.strictEqual(s1.color, '#ff7f00') + it("should allow fields to be changed between primitive and nested map", () => { + s1 = Automerge.change(s1, doc => (doc.color = "#ff7f00")) + assert.strictEqual(s1.color, "#ff7f00") + s1 = Automerge.change( + s1, + doc => (doc.color = { red: 255, green: 127, blue: 0 }) + ) + assert.deepStrictEqual(s1.color, { red: 255, green: 127, blue: 0 }) + s1 = Automerge.change(s1, doc => (doc.color = "#ff7f00")) + assert.strictEqual(s1.color, "#ff7f00") }) - it('should not allow several references to the same map object', () => { - s1 = Automerge.change(s1, doc => doc.object = {}) + it("should not allow several references to the same map object", () => { + s1 = Automerge.change(s1, doc => (doc.object = {})) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = doc.object }) + Automerge.change(s1, doc => { + doc.x = doc.object + }) }, /Cannot create a reference to an existing document object/) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = s1.object }) + Automerge.change(s1, doc => { + doc.x = s1.object + }) }, /Cannot create a reference to an existing document object/) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = {}; doc.y = doc.x }) + Automerge.change(s1, doc => { + doc.x = {} + doc.y = doc.x + }) }, /Cannot create a reference to an existing document object/) }) - it('should not allow object-copying idioms', () => { + it("should not allow object-copying idioms", () => { s1 = Automerge.change(s1, doc => { - doc.items = [{id: 'id1', name: 'one'}, {id: 'id2', name: 'two'}] + doc.items = [ + { id: "id1", name: "one" }, + { id: "id2", name: "two" }, + ] }) // People who have previously worked with immutable state in JavaScript may be tempted // to use idioms like this, which don't work well with Automerge -- see e.g. // https://github.com/automerge/automerge/issues/260 assert.throws(() => { Automerge.change(s1, doc => { - doc.items = [...doc.items, {id: 'id3', name: 'three'}] + doc.items = [...doc.items, { id: "id3", name: "three" }] }) }, /Cannot create a reference to an existing document object/) }) - it('should handle deletion of properties within a map', () => { - s1 = Automerge.change(s1, 'set style', doc => { - doc.textStyle = {typeface: 'Optima', bold: false, fontSize: 12} + it("should handle deletion of properties within a map", () => { + s1 = Automerge.change(s1, "set style", doc => { + doc.textStyle = { typeface: "Optima", bold: false, fontSize: 12 } }) - s1 = Automerge.change(s1, 'non-bold', doc => delete doc.textStyle.bold) + s1 = Automerge.change(s1, "non-bold", doc => delete doc.textStyle.bold) assert.strictEqual(s1.textStyle.bold, undefined) - assert.deepStrictEqual(s1.textStyle, {typeface: 'Optima', fontSize: 12}) + assert.deepStrictEqual(s1.textStyle, { + typeface: "Optima", + fontSize: 12, + }) }) - it('should handle deletion of references to a map', () => { - s1 = Automerge.change(s1, 'make rich text doc', doc => { - Object.assign(doc, {title: 'Hello', textStyle: {typeface: 'Optima', fontSize: 12}}) + it("should handle deletion of references to a map", () => { + s1 = Automerge.change(s1, "make rich text doc", doc => { + Object.assign(doc, { + title: "Hello", + textStyle: { typeface: "Optima", fontSize: 12 }, + }) }) s1 = Automerge.change(s1, doc => delete doc.textStyle) assert.strictEqual(s1.textStyle, undefined) - assert.deepStrictEqual(s1, {title: 'Hello'}) + assert.deepStrictEqual(s1, { title: "Hello" }) }) - it('should validate field names', () => { - s1 = Automerge.change(s1, doc => doc.nested = {}) - assert.throws(() => { Automerge.change(s1, doc => doc.nested[''] = 'x') }, /must not be an empty string/) - assert.throws(() => { Automerge.change(s1, doc => doc.nested = {'': 'x'}) }, /must not be an empty string/) + it("should validate field names", () => { + s1 = Automerge.change(s1, doc => (doc.nested = {})) + assert.throws(() => { + Automerge.change(s1, doc => (doc.nested[""] = "x")) + }, /must not be an empty string/) + assert.throws(() => { + Automerge.change(s1, doc => (doc.nested = { "": "x" })) + }, /must not be an empty string/) }) }) - describe('lists', () => { - it('should allow elements to be inserted', () => { - s1 = Automerge.change(s1, doc => doc.noodles = []) - s1 = Automerge.change(s1, doc => doc.noodles.insertAt(0, 'udon', 'soba')) - s1 = Automerge.change(s1, doc => doc.noodles.insertAt(1, 'ramen')) - assert.deepStrictEqual(s1, {noodles: ['udon', 'ramen', 'soba']}) - assert.deepStrictEqual(s1.noodles, ['udon', 'ramen', 'soba']) - assert.strictEqual(s1.noodles[0], 'udon') - assert.strictEqual(s1.noodles[1], 'ramen') - assert.strictEqual(s1.noodles[2], 'soba') + describe("lists", () => { + it("should allow elements to be inserted", () => { + s1 = Automerge.change(s1, doc => (doc.noodles = [])) + s1 = Automerge.change(s1, doc => + doc.noodles.insertAt(0, "udon", "soba") + ) + s1 = Automerge.change(s1, doc => doc.noodles.insertAt(1, "ramen")) + assert.deepStrictEqual(s1, { noodles: ["udon", "ramen", "soba"] }) + assert.deepStrictEqual(s1.noodles, ["udon", "ramen", "soba"]) + assert.strictEqual(s1.noodles[0], "udon") + assert.strictEqual(s1.noodles[1], "ramen") + assert.strictEqual(s1.noodles[2], "soba") assert.strictEqual(s1.noodles.length, 3) }) - it('should handle assignment of a list literal', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'ramen', 'soba']) - assert.deepStrictEqual(s1, {noodles: ['udon', 'ramen', 'soba']}) - assert.deepStrictEqual(s1.noodles, ['udon', 'ramen', 'soba']) - assert.strictEqual(s1.noodles[0], 'udon') - assert.strictEqual(s1.noodles[1], 'ramen') - assert.strictEqual(s1.noodles[2], 'soba') + it("should handle assignment of a list literal", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "ramen", "soba"]) + ) + assert.deepStrictEqual(s1, { noodles: ["udon", "ramen", "soba"] }) + assert.deepStrictEqual(s1.noodles, ["udon", "ramen", "soba"]) + assert.strictEqual(s1.noodles[0], "udon") + assert.strictEqual(s1.noodles[1], "ramen") + assert.strictEqual(s1.noodles[2], "soba") assert.strictEqual(s1.noodles[3], undefined) assert.strictEqual(s1.noodles.length, 3) }) - it('should only allow numeric indexes', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'ramen', 'soba']) - s1 = Automerge.change(s1, doc => doc.noodles[1] = 'Ramen!') - assert.strictEqual(s1.noodles[1], 'Ramen!') - s1 = Automerge.change(s1, doc => doc.noodles['1'] = 'RAMEN!!!') - assert.strictEqual(s1.noodles[1], 'RAMEN!!!') - assert.throws(() => { Automerge.change(s1, doc => doc.noodles.favourite = 'udon') }, /list index must be a number/) - assert.throws(() => { Automerge.change(s1, doc => doc.noodles[''] = 'udon') }, /list index must be a number/) - assert.throws(() => { Automerge.change(s1, doc => doc.noodles['1e6'] = 'udon') }, /list index must be a number/) + it("should only allow numeric indexes", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "ramen", "soba"]) + ) + s1 = Automerge.change(s1, doc => (doc.noodles[1] = "Ramen!")) + assert.strictEqual(s1.noodles[1], "Ramen!") + s1 = Automerge.change(s1, doc => (doc.noodles["1"] = "RAMEN!!!")) + assert.strictEqual(s1.noodles[1], "RAMEN!!!") + assert.throws(() => { + Automerge.change(s1, doc => (doc.noodles.favourite = "udon")) + }, /list index must be a number/) + assert.throws(() => { + Automerge.change(s1, doc => (doc.noodles[""] = "udon")) + }, /list index must be a number/) + assert.throws(() => { + Automerge.change(s1, doc => (doc.noodles["1e6"] = "udon")) + }, /list index must be a number/) }) - it('should handle deletion of list elements', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'ramen', 'soba']) + it("should handle deletion of list elements", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "ramen", "soba"]) + ) s1 = Automerge.change(s1, doc => delete doc.noodles[1]) - assert.deepStrictEqual(s1.noodles, ['udon', 'soba']) + assert.deepStrictEqual(s1.noodles, ["udon", "soba"]) s1 = Automerge.change(s1, doc => doc.noodles.deleteAt(1)) - assert.deepStrictEqual(s1.noodles, ['udon']) - assert.strictEqual(s1.noodles[0], 'udon') + assert.deepStrictEqual(s1.noodles, ["udon"]) + assert.strictEqual(s1.noodles[0], "udon") assert.strictEqual(s1.noodles[1], undefined) assert.strictEqual(s1.noodles[2], undefined) assert.strictEqual(s1.noodles.length, 1) }) - it('should handle assignment of individual list indexes', () => { - s1 = Automerge.change(s1, doc => doc.japaneseFood = ['udon', 'ramen', 'soba']) - s1 = Automerge.change(s1, doc => doc.japaneseFood[1] = 'sushi') - assert.deepStrictEqual(s1.japaneseFood, ['udon', 'sushi', 'soba']) - assert.strictEqual(s1.japaneseFood[0], 'udon') - assert.strictEqual(s1.japaneseFood[1], 'sushi') - assert.strictEqual(s1.japaneseFood[2], 'soba') + it("should handle assignment of individual list indexes", () => { + s1 = Automerge.change( + s1, + doc => (doc.japaneseFood = ["udon", "ramen", "soba"]) + ) + s1 = Automerge.change(s1, doc => (doc.japaneseFood[1] = "sushi")) + assert.deepStrictEqual(s1.japaneseFood, ["udon", "sushi", "soba"]) + assert.strictEqual(s1.japaneseFood[0], "udon") + assert.strictEqual(s1.japaneseFood[1], "sushi") + assert.strictEqual(s1.japaneseFood[2], "soba") assert.strictEqual(s1.japaneseFood[3], undefined) assert.strictEqual(s1.japaneseFood.length, 3) }) - it('concurrent edits insert in reverse actorid order if counters equal', () => { - s1 = Automerge.init('aaaa') - s2 = Automerge.init('bbbb') - s1 = Automerge.change(s1, doc => doc.list = []) + it("concurrent edits insert in reverse actorid order if counters equal", () => { + s1 = Automerge.init("aaaa") + s2 = Automerge.init("bbbb") + s1 = Automerge.change(s1, doc => (doc.list = [])) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.list.splice(0, 0, "2@aaaa")) s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "2@bbbb")) @@ -628,75 +778,112 @@ describe('Automerge', () => { assert.deepStrictEqual(Automerge.toJS(s2).list, ["2@bbbb", "2@aaaa"]) }) - it('concurrent edits insert in reverse counter order if different', () => { - s1 = Automerge.init('aaaa') - s2 = Automerge.init('bbbb') - s1 = Automerge.change(s1, doc => doc.list = []) + it("concurrent edits insert in reverse counter order if different", () => { + s1 = Automerge.init("aaaa") + s2 = Automerge.init("bbbb") + s1 = Automerge.change(s1, doc => (doc.list = [])) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.list.splice(0, 0, "2@aaaa")) - s2 = Automerge.change(s2, doc => doc.foo = "2@bbbb") - s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "3@bbbb")) - s2 = Automerge.merge(s2, s1) - assert.deepStrictEqual(s2.list, ["3@bbbb", "2@aaaa"]) + s2 = Automerge.change(s2, doc => (doc.foo = "2@bbbb")) + s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "3@bbbb")) + s2 = Automerge.merge(s2, s1) + assert.deepStrictEqual(s2.list, ["3@bbbb", "2@aaaa"]) }) - it('should treat out-by-one assignment as insertion', () => { - s1 = Automerge.change(s1, doc => doc.japaneseFood = ['udon']) - s1 = Automerge.change(s1, doc => doc.japaneseFood[1] = 'sushi') - assert.deepStrictEqual(s1.japaneseFood, ['udon', 'sushi']) - assert.strictEqual(s1.japaneseFood[0], 'udon') - assert.strictEqual(s1.japaneseFood[1], 'sushi') + it("should treat out-by-one assignment as insertion", () => { + s1 = Automerge.change(s1, doc => (doc.japaneseFood = ["udon"])) + s1 = Automerge.change(s1, doc => (doc.japaneseFood[1] = "sushi")) + assert.deepStrictEqual(s1.japaneseFood, ["udon", "sushi"]) + assert.strictEqual(s1.japaneseFood[0], "udon") + assert.strictEqual(s1.japaneseFood[1], "sushi") assert.strictEqual(s1.japaneseFood[2], undefined) assert.strictEqual(s1.japaneseFood.length, 2) }) - it('should not allow out-of-range assignment', () => { - s1 = Automerge.change(s1, doc => doc.japaneseFood = ['udon']) - assert.throws(() => { Automerge.change(s1, doc => doc.japaneseFood[4] = 'ramen') }, /is out of bounds/) + it("should not allow out-of-range assignment", () => { + s1 = Automerge.change(s1, doc => (doc.japaneseFood = ["udon"])) + assert.throws(() => { + Automerge.change(s1, doc => (doc.japaneseFood[4] = "ramen")) + }, /is out of bounds/) }) - it('should allow bulk assignment of multiple list indexes', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'ramen', 'soba']) - s1 = Automerge.change(s1, doc => Object.assign(doc.noodles, {0: 'うどん', 2: 'そば'})) - assert.deepStrictEqual(s1.noodles, ['うどん', 'ramen', 'そば']) - assert.strictEqual(s1.noodles[0], 'うどん') - assert.strictEqual(s1.noodles[1], 'ramen') - assert.strictEqual(s1.noodles[2], 'そば') + it("should allow bulk assignment of multiple list indexes", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "ramen", "soba"]) + ) + s1 = Automerge.change(s1, doc => + Object.assign(doc.noodles, { 0: "うどん", 2: "そば" }) + ) + assert.deepStrictEqual(s1.noodles, ["うどん", "ramen", "そば"]) + assert.strictEqual(s1.noodles[0], "うどん") + assert.strictEqual(s1.noodles[1], "ramen") + assert.strictEqual(s1.noodles[2], "そば") assert.strictEqual(s1.noodles.length, 3) }) - it('should handle nested objects', () => { - s1 = Automerge.change(s1, doc => doc.noodles = [{type: 'ramen', dishes: ['tonkotsu', 'shoyu']}]) - s1 = Automerge.change(s1, doc => doc.noodles.push({type: 'udon', dishes: ['tempura udon']})) - s1 = Automerge.change(s1, doc => doc.noodles[0].dishes.push('miso')) - assert.deepStrictEqual(s1, {noodles: [ - {type: 'ramen', dishes: ['tonkotsu', 'shoyu', 'miso']}, - {type: 'udon', dishes: ['tempura udon']} - ]}) + it("should handle nested objects", () => { + s1 = Automerge.change( + s1, + doc => + (doc.noodles = [{ type: "ramen", dishes: ["tonkotsu", "shoyu"] }]) + ) + s1 = Automerge.change(s1, doc => + doc.noodles.push({ type: "udon", dishes: ["tempura udon"] }) + ) + s1 = Automerge.change(s1, doc => doc.noodles[0].dishes.push("miso")) + assert.deepStrictEqual(s1, { + noodles: [ + { type: "ramen", dishes: ["tonkotsu", "shoyu", "miso"] }, + { type: "udon", dishes: ["tempura udon"] }, + ], + }) assert.deepStrictEqual(s1.noodles[0], { - type: 'ramen', dishes: ['tonkotsu', 'shoyu', 'miso'] + type: "ramen", + dishes: ["tonkotsu", "shoyu", "miso"], }) assert.deepStrictEqual(s1.noodles[1], { - type: 'udon', dishes: ['tempura udon'] + type: "udon", + dishes: ["tempura udon"], }) }) - it('should handle nested lists', () => { - s1 = Automerge.change(s1, doc => doc.noodleMatrix = [['ramen', 'tonkotsu', 'shoyu']]) - s1 = Automerge.change(s1, doc => doc.noodleMatrix.push(['udon', 'tempura udon'])) - s1 = Automerge.change(s1, doc => doc.noodleMatrix[0].push('miso')) - assert.deepStrictEqual(s1.noodleMatrix, [['ramen', 'tonkotsu', 'shoyu', 'miso'], ['udon', 'tempura udon']]) - assert.deepStrictEqual(s1.noodleMatrix[0], ['ramen', 'tonkotsu', 'shoyu', 'miso']) - assert.deepStrictEqual(s1.noodleMatrix[1], ['udon', 'tempura udon']) + it("should handle nested lists", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodleMatrix = [["ramen", "tonkotsu", "shoyu"]]) + ) + s1 = Automerge.change(s1, doc => + doc.noodleMatrix.push(["udon", "tempura udon"]) + ) + s1 = Automerge.change(s1, doc => doc.noodleMatrix[0].push("miso")) + assert.deepStrictEqual(s1.noodleMatrix, [ + ["ramen", "tonkotsu", "shoyu", "miso"], + ["udon", "tempura udon"], + ]) + assert.deepStrictEqual(s1.noodleMatrix[0], [ + "ramen", + "tonkotsu", + "shoyu", + "miso", + ]) + assert.deepStrictEqual(s1.noodleMatrix[1], ["udon", "tempura udon"]) }) - it('should handle deep nesting', () => { - s1 = Automerge.change(s1, doc => doc.nesting = { - maps: { m1: { m2: { foo: "bar", baz: {} }, m2a: { } } }, - lists: [ [ 1, 2, 3 ], [ [ 3, 4, 5, [6]], 7 ] ], - mapsinlists: [ { foo: "bar" }, [ { bar: "baz" } ] ], - listsinmaps: { foo: [1, 2, 3], bar: [ [ { baz: "123" } ] ] } - }) + it("should handle deep nesting", () => { + s1 = Automerge.change( + s1, + doc => + (doc.nesting = { + maps: { m1: { m2: { foo: "bar", baz: {} }, m2a: {} } }, + lists: [ + [1, 2, 3], + [[3, 4, 5, [6]], 7], + ], + mapsinlists: [{ foo: "bar" }, [{ bar: "baz" }]], + listsinmaps: { foo: [1, 2, 3], bar: [[{ baz: "123" }]] }, + }) + ) s1 = Automerge.change(s1, doc => { doc.nesting.maps.m1a = "123" doc.nesting.maps.m1.m2.baz.xxx = "123" @@ -711,97 +898,151 @@ describe('Automerge', () => { doc.nesting.listsinmaps.bar[0][0].baz = "456" delete doc.nesting.listsinmaps.bar }) - assert.deepStrictEqual(s1, { nesting: { - maps: { m1: { m2: { foo: "bar", baz: { xxx: "123" } } }, m1a: "123" }, - lists: [ [ [ 3, 4, 5, 100 ], 7 ] ], - mapsinlists: [ { foo: "baz" } ], - listsinmaps: { foo: [1, 2, 3, 4] } - }}) + assert.deepStrictEqual(s1, { + nesting: { + maps: { + m1: { m2: { foo: "bar", baz: { xxx: "123" } } }, + m1a: "123", + }, + lists: [[[3, 4, 5, 100], 7]], + mapsinlists: [{ foo: "baz" }], + listsinmaps: { foo: [1, 2, 3, 4] }, + }, + }) }) - it('should handle replacement of the entire list', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'soba', 'ramen']) - s1 = Automerge.change(s1, doc => doc.japaneseNoodles = doc.noodles.slice()) - s1 = Automerge.change(s1, doc => doc.noodles = ['wonton', 'pho']) + it("should handle replacement of the entire list", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "soba", "ramen"]) + ) + s1 = Automerge.change( + s1, + doc => (doc.japaneseNoodles = doc.noodles.slice()) + ) + s1 = Automerge.change(s1, doc => (doc.noodles = ["wonton", "pho"])) assert.deepStrictEqual(s1, { - noodles: ['wonton', 'pho'], - japaneseNoodles: ['udon', 'soba', 'ramen'] + noodles: ["wonton", "pho"], + japaneseNoodles: ["udon", "soba", "ramen"], }) - assert.deepStrictEqual(s1.noodles, ['wonton', 'pho']) - assert.strictEqual(s1.noodles[0], 'wonton') - assert.strictEqual(s1.noodles[1], 'pho') + assert.deepStrictEqual(s1.noodles, ["wonton", "pho"]) + assert.strictEqual(s1.noodles[0], "wonton") + assert.strictEqual(s1.noodles[1], "pho") assert.strictEqual(s1.noodles[2], undefined) assert.strictEqual(s1.noodles.length, 2) }) - it('should allow assignment to change the type of a list element', () => { - s1 = Automerge.change(s1, doc => doc.noodles = ['udon', 'soba', 'ramen']) - assert.deepStrictEqual(s1.noodles, ['udon', 'soba', 'ramen']) - s1 = Automerge.change(s1, doc => doc.noodles[1] = {type: 'soba', options: ['hot', 'cold']}) - assert.deepStrictEqual(s1.noodles, ['udon', {type: 'soba', options: ['hot', 'cold']}, 'ramen']) - s1 = Automerge.change(s1, doc => doc.noodles[1] = ['hot soba', 'cold soba']) - assert.deepStrictEqual(s1.noodles, ['udon', ['hot soba', 'cold soba'], 'ramen']) - s1 = Automerge.change(s1, doc => doc.noodles[1] = 'soba is the best') - assert.deepStrictEqual(s1.noodles, ['udon', 'soba is the best', 'ramen']) + it("should allow assignment to change the type of a list element", () => { + s1 = Automerge.change( + s1, + doc => (doc.noodles = ["udon", "soba", "ramen"]) + ) + assert.deepStrictEqual(s1.noodles, ["udon", "soba", "ramen"]) + s1 = Automerge.change( + s1, + doc => (doc.noodles[1] = { type: "soba", options: ["hot", "cold"] }) + ) + assert.deepStrictEqual(s1.noodles, [ + "udon", + { type: "soba", options: ["hot", "cold"] }, + "ramen", + ]) + s1 = Automerge.change( + s1, + doc => (doc.noodles[1] = ["hot soba", "cold soba"]) + ) + assert.deepStrictEqual(s1.noodles, [ + "udon", + ["hot soba", "cold soba"], + "ramen", + ]) + s1 = Automerge.change(s1, doc => (doc.noodles[1] = "soba is the best")) + assert.deepStrictEqual(s1.noodles, [ + "udon", + "soba is the best", + "ramen", + ]) }) - it('should allow list creation and assignment in the same change callback', () => { + it("should allow list creation and assignment in the same change callback", () => { s1 = Automerge.change(Automerge.init(), doc => { - doc.letters = ['a', 'b', 'c'] - doc.letters[1] = 'd' + doc.letters = ["a", "b", "c"] + doc.letters[1] = "d" }) - assert.strictEqual(s1.letters[1], 'd') + assert.strictEqual(s1.letters[1], "d") }) - it('should allow adding and removing list elements in the same change callback', () => { - let s1 = Automerge.change(Automerge.init<{noodles: Array}>(), doc => doc.noodles = []) + it("should allow adding and removing list elements in the same change callback", () => { + let s1 = Automerge.change( + Automerge.init<{ noodles: Array }>(), + doc => (doc.noodles = []) + ) s1 = Automerge.change(s1, doc => { - doc.noodles.push('udon') + doc.noodles.push("udon") // @ts-ignore doc.noodles.deleteAt(0) }) - assert.deepStrictEqual(s1, {noodles: []}) + assert.deepStrictEqual(s1, { noodles: [] }) // do the add-remove cycle twice, test for #151 (https://github.com/automerge/automerge/issues/151) s1 = Automerge.change(s1, doc => { // @ts-ignore - doc.noodles.push('soba') + doc.noodles.push("soba") // @ts-ignore doc.noodles.deleteAt(0) }) - assert.deepStrictEqual(s1, {noodles: []}) + assert.deepStrictEqual(s1, { noodles: [] }) }) - it('should handle arbitrary-depth nesting', () => { - s1 = Automerge.change(s1, doc => doc.maze = [[[[[[[['noodles', ['here']]]]]]]]]) - s1 = Automerge.change(s1, doc => doc.maze[0][0][0][0][0][0][0][1].unshift('found')) - assert.deepStrictEqual(s1.maze, [[[[[[[['noodles', ['found', 'here']]]]]]]]]) - assert.deepStrictEqual(s1.maze[0][0][0][0][0][0][0][1][1], 'here') + it("should handle arbitrary-depth nesting", () => { + s1 = Automerge.change( + s1, + doc => (doc.maze = [[[[[[[["noodles", ["here"]]]]]]]]]) + ) + s1 = Automerge.change(s1, doc => + doc.maze[0][0][0][0][0][0][0][1].unshift("found") + ) + assert.deepStrictEqual(s1.maze, [ + [[[[[[["noodles", ["found", "here"]]]]]]]], + ]) + assert.deepStrictEqual(s1.maze[0][0][0][0][0][0][0][1][1], "here") s2 = Automerge.load(Automerge.save(s1)) - assert.deepStrictEqual(s1,s2) + assert.deepStrictEqual(s1, s2) }) - it('should not allow several references to the same list object', () => { - s1 = Automerge.change(s1, doc => doc.list = []) + it("should not allow several references to the same list object", () => { + s1 = Automerge.change(s1, doc => (doc.list = [])) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = doc.list }) + Automerge.change(s1, doc => { + doc.x = doc.list + }) }, /Cannot create a reference to an existing document object/) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = s1.list }) + Automerge.change(s1, doc => { + doc.x = s1.list + }) }, /Cannot create a reference to an existing document object/) assert.throws(() => { - Automerge.change(s1, doc => { doc.x = []; doc.y = doc.x }) + Automerge.change(s1, doc => { + doc.x = [] + doc.y = doc.x + }) }, /Cannot create a reference to an existing document object/) }) }) - describe('counters', () => { + describe("counters", () => { // counter - it('should allow deleting counters from maps', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = {wrens: new Automerge.Counter(1)}) + it("should allow deleting counters from maps", () => { + const s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = { wrens: new Automerge.Counter(1) }) + ) const s2 = Automerge.change(s1, doc => doc.birds.wrens.increment(2)) const s3 = Automerge.change(s2, doc => delete doc.birds.wrens) - assert.deepStrictEqual(s2, {birds: {wrens: new Automerge.Counter(3)}}) - assert.deepStrictEqual(s3, {birds: {}}) + assert.deepStrictEqual(s2, { + birds: { wrens: new Automerge.Counter(3) }, + }) + assert.deepStrictEqual(s3, { birds: {} }) }) // counter @@ -816,8 +1057,11 @@ describe('Automerge', () => { }) }) - describe('concurrent use', () => { - let s1: Automerge.Doc, s2: Automerge.Doc, s3: Automerge.Doc, s4: Automerge.Doc + describe("concurrent use", () => { + let s1: Automerge.Doc, + s2: Automerge.Doc, + s3: Automerge.Doc, + s4: Automerge.Doc beforeEach(() => { s1 = Automerge.init() s2 = Automerge.init() @@ -825,21 +1069,21 @@ describe('Automerge', () => { s4 = Automerge.init() }) - it('should merge concurrent updates of different properties', () => { - s1 = Automerge.change(s1, doc => doc.foo = 'bar') - s2 = Automerge.change(s2, doc => doc.hello = 'world') + it("should merge concurrent updates of different properties", () => { + s1 = Automerge.change(s1, doc => (doc.foo = "bar")) + s2 = Automerge.change(s2, doc => (doc.hello = "world")) s3 = Automerge.merge(s1, s2) - assert.strictEqual(s3.foo, 'bar') - assert.strictEqual(s3.hello, 'world') - assert.deepStrictEqual(s3, {foo: 'bar', hello: 'world'}) - assert.strictEqual(Automerge.getConflicts(s3, 'foo'), undefined) - assert.strictEqual(Automerge.getConflicts(s3, 'hello'), undefined) + assert.strictEqual(s3.foo, "bar") + assert.strictEqual(s3.hello, "world") + assert.deepStrictEqual(s3, { foo: "bar", hello: "world" }) + assert.strictEqual(Automerge.getConflicts(s3, "foo"), undefined) + assert.strictEqual(Automerge.getConflicts(s3, "hello"), undefined) s4 = Automerge.load(Automerge.save(s3)) - assert.deepEqual(s3,s4) + assert.deepEqual(s3, s4) }) - it('should add concurrent increments of the same property', () => { - s1 = Automerge.change(s1, doc => doc.counter = new Automerge.Counter()) + it("should add concurrent increments of the same property", () => { + s1 = Automerge.change(s1, doc => (doc.counter = new Automerge.Counter())) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.counter.increment()) s2 = Automerge.change(s2, doc => doc.counter.increment(2)) @@ -847,391 +1091,523 @@ describe('Automerge', () => { assert.strictEqual(s1.counter.value, 1) assert.strictEqual(s2.counter.value, 2) assert.strictEqual(s3.counter.value, 3) - assert.strictEqual(Automerge.getConflicts(s3, 'counter'), undefined) + assert.strictEqual(Automerge.getConflicts(s3, "counter"), undefined) s4 = Automerge.load(Automerge.save(s3)) - assert.deepEqual(s3,s4) + assert.deepEqual(s3, s4) }) - it('should add increments only to the values they precede', () => { - s1 = Automerge.change(s1, doc => doc.counter = new Automerge.Counter(0)) + it("should add increments only to the values they precede", () => { + s1 = Automerge.change(s1, doc => (doc.counter = new Automerge.Counter(0))) s1 = Automerge.change(s1, doc => doc.counter.increment()) - s2 = Automerge.change(s2, doc => doc.counter = new Automerge.Counter(100)) + s2 = Automerge.change( + s2, + doc => (doc.counter = new Automerge.Counter(100)) + ) s2 = Automerge.change(s2, doc => doc.counter.increment(3)) s3 = Automerge.merge(s1, s2) if (Automerge.getActorId(s1) > Automerge.getActorId(s2)) { - assert.deepStrictEqual(s3, {counter: new Automerge.Counter(1)}) + assert.deepStrictEqual(s3, { counter: new Automerge.Counter(1) }) } else { - assert.deepStrictEqual(s3, {counter: new Automerge.Counter(103)}) + assert.deepStrictEqual(s3, { counter: new Automerge.Counter(103) }) } - assert.deepStrictEqual(Automerge.getConflicts(s3, 'counter'), { + assert.deepStrictEqual(Automerge.getConflicts(s3, "counter"), { [`1@${Automerge.getActorId(s1)}`]: new Automerge.Counter(1), - [`1@${Automerge.getActorId(s2)}`]: new Automerge.Counter(103) + [`1@${Automerge.getActorId(s2)}`]: new Automerge.Counter(103), }) s4 = Automerge.load(Automerge.save(s3)) - assert.deepEqual(s3,s4) + assert.deepEqual(s3, s4) }) - it('should detect concurrent updates of the same field', () => { - s1 = Automerge.change(s1, doc => doc.field = 'one') - s2 = Automerge.change(s2, doc => doc.field = 'two') + it("should detect concurrent updates of the same field", () => { + s1 = Automerge.change(s1, doc => (doc.field = "one")) + s2 = Automerge.change(s2, doc => (doc.field = "two")) s3 = Automerge.merge(s1, s2) if (Automerge.getActorId(s1) > Automerge.getActorId(s2)) { - assert.deepStrictEqual(s3, {field: 'one'}) + assert.deepStrictEqual(s3, { field: "one" }) } else { - assert.deepStrictEqual(s3, {field: 'two'}) + assert.deepStrictEqual(s3, { field: "two" }) } - assert.deepStrictEqual(Automerge.getConflicts(s3, 'field'), { - [`1@${Automerge.getActorId(s1)}`]: 'one', - [`1@${Automerge.getActorId(s2)}`]: 'two' + assert.deepStrictEqual(Automerge.getConflicts(s3, "field"), { + [`1@${Automerge.getActorId(s1)}`]: "one", + [`1@${Automerge.getActorId(s2)}`]: "two", }) }) - it('should detect concurrent updates of the same list element', () => { - s1 = Automerge.change(s1, doc => doc.birds = ['finch']) + it("should detect concurrent updates of the same list element", () => { + s1 = Automerge.change(s1, doc => (doc.birds = ["finch"])) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.birds[0] = 'greenfinch') - s2 = Automerge.change(s2, doc => doc.birds[0] = 'goldfinch_') + s1 = Automerge.change(s1, doc => (doc.birds[0] = "greenfinch")) + s2 = Automerge.change(s2, doc => (doc.birds[0] = "goldfinch_")) s3 = Automerge.merge(s1, s2) if (Automerge.getActorId(s1) > Automerge.getActorId(s2)) { - assert.deepStrictEqual(s3.birds, ['greenfinch']) + assert.deepStrictEqual(s3.birds, ["greenfinch"]) } else { - assert.deepStrictEqual(s3.birds, ['goldfinch_']) + assert.deepStrictEqual(s3.birds, ["goldfinch_"]) } assert.deepStrictEqual(Automerge.getConflicts(s3.birds, 0), { - [`8@${Automerge.getActorId(s1)}`]: 'greenfinch', - [`8@${Automerge.getActorId(s2)}`]: 'goldfinch_' + [`8@${Automerge.getActorId(s1)}`]: "greenfinch", + [`8@${Automerge.getActorId(s2)}`]: "goldfinch_", }) }) - it('should handle assignment conflicts of different types', () => { - s1 = Automerge.change(s1, doc => doc.field = 'string') - s2 = Automerge.change(s2, doc => doc.field = ['list']) - s3 = Automerge.change(s3, doc => doc.field = {thing: 'map'}) + it("should handle assignment conflicts of different types", () => { + s1 = Automerge.change(s1, doc => (doc.field = "string")) + s2 = Automerge.change(s2, doc => (doc.field = ["list"])) + s3 = Automerge.change(s3, doc => (doc.field = { thing: "map" })) s1 = Automerge.merge(Automerge.merge(s1, s2), s3) - assertEqualsOneOf(s1.field, 'string', ['list'], {thing: 'map'}) - assert.deepStrictEqual(Automerge.getConflicts(s1, 'field'), { - [`1@${Automerge.getActorId(s1)}`]: 'string', - [`1@${Automerge.getActorId(s2)}`]: ['list'], - [`1@${Automerge.getActorId(s3)}`]: {thing: 'map'} + assertEqualsOneOf(s1.field, "string", ["list"], { thing: "map" }) + assert.deepStrictEqual(Automerge.getConflicts(s1, "field"), { + [`1@${Automerge.getActorId(s1)}`]: "string", + [`1@${Automerge.getActorId(s2)}`]: ["list"], + [`1@${Automerge.getActorId(s3)}`]: { thing: "map" }, }) }) - it('should handle changes within a conflicting map field', () => { - s1 = Automerge.change(s1, doc => doc.field = 'string') - s2 = Automerge.change(s2, doc => doc.field = {}) - s2 = Automerge.change(s2, doc => doc.field.innerKey = 42) + it("should handle changes within a conflicting map field", () => { + s1 = Automerge.change(s1, doc => (doc.field = "string")) + s2 = Automerge.change(s2, doc => (doc.field = {})) + s2 = Automerge.change(s2, doc => (doc.field.innerKey = 42)) s3 = Automerge.merge(s1, s2) - assertEqualsOneOf(s3.field, 'string', {innerKey: 42}) - assert.deepStrictEqual(Automerge.getConflicts(s3, 'field'), { - [`1@${Automerge.getActorId(s1)}`]: 'string', - [`1@${Automerge.getActorId(s2)}`]: {innerKey: 42} + assertEqualsOneOf(s3.field, "string", { innerKey: 42 }) + assert.deepStrictEqual(Automerge.getConflicts(s3, "field"), { + [`1@${Automerge.getActorId(s1)}`]: "string", + [`1@${Automerge.getActorId(s2)}`]: { innerKey: 42 }, }) }) - it('should handle changes within a conflicting list element', () => { - s1 = Automerge.change(s1, doc => doc.list = ['hello']) + it("should handle changes within a conflicting list element", () => { + s1 = Automerge.change(s1, doc => (doc.list = ["hello"])) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.list[0] = {map1: true}) - s1 = Automerge.change(s1, doc => doc.list[0].key = 1) - s2 = Automerge.change(s2, doc => doc.list[0] = {map2: true}) - s2 = Automerge.change(s2, doc => doc.list[0].key = 2) + s1 = Automerge.change(s1, doc => (doc.list[0] = { map1: true })) + s1 = Automerge.change(s1, doc => (doc.list[0].key = 1)) + s2 = Automerge.change(s2, doc => (doc.list[0] = { map2: true })) + s2 = Automerge.change(s2, doc => (doc.list[0].key = 2)) s3 = Automerge.merge(s1, s2) if (Automerge.getActorId(s1) > Automerge.getActorId(s2)) { - assert.deepStrictEqual(s3.list, [{map1: true, key: 1}]) + assert.deepStrictEqual(s3.list, [{ map1: true, key: 1 }]) } else { - assert.deepStrictEqual(s3.list, [{map2: true, key: 2}]) + assert.deepStrictEqual(s3.list, [{ map2: true, key: 2 }]) } assert.deepStrictEqual(Automerge.getConflicts(s3.list, 0), { - [`8@${Automerge.getActorId(s1)}`]: {map1: true, key: 1}, - [`8@${Automerge.getActorId(s2)}`]: {map2: true, key: 2} + [`8@${Automerge.getActorId(s1)}`]: { map1: true, key: 1 }, + [`8@${Automerge.getActorId(s2)}`]: { map2: true, key: 2 }, }) }) - it('should not merge concurrently assigned nested maps', () => { - s1 = Automerge.change(s1, doc => doc.config = {background: 'blue'}) - s2 = Automerge.change(s2, doc => doc.config = {logo_url: 'logo.png'}) + it("should not merge concurrently assigned nested maps", () => { + s1 = Automerge.change(s1, doc => (doc.config = { background: "blue" })) + s2 = Automerge.change(s2, doc => (doc.config = { logo_url: "logo.png" })) s3 = Automerge.merge(s1, s2) - assertEqualsOneOf(s3.config, {background: 'blue'}, {logo_url: 'logo.png'}) - assert.deepStrictEqual(Automerge.getConflicts(s3, 'config'), { - [`1@${Automerge.getActorId(s1)}`]: {background: 'blue'}, - [`1@${Automerge.getActorId(s2)}`]: {logo_url: 'logo.png'} + assertEqualsOneOf( + s3.config, + { background: "blue" }, + { logo_url: "logo.png" } + ) + assert.deepStrictEqual(Automerge.getConflicts(s3, "config"), { + [`1@${Automerge.getActorId(s1)}`]: { background: "blue" }, + [`1@${Automerge.getActorId(s2)}`]: { logo_url: "logo.png" }, }) }) - it('should clear conflicts after assigning a new value', () => { - s1 = Automerge.change(s1, doc => doc.field = 'one') - s2 = Automerge.change(s2, doc => doc.field = 'two') + it("should clear conflicts after assigning a new value", () => { + s1 = Automerge.change(s1, doc => (doc.field = "one")) + s2 = Automerge.change(s2, doc => (doc.field = "two")) s3 = Automerge.merge(s1, s2) - s3 = Automerge.change(s3, doc => doc.field = 'three') - assert.deepStrictEqual(s3, {field: 'three'}) - assert.strictEqual(Automerge.getConflicts(s3, 'field'), undefined) + s3 = Automerge.change(s3, doc => (doc.field = "three")) + assert.deepStrictEqual(s3, { field: "three" }) + assert.strictEqual(Automerge.getConflicts(s3, "field"), undefined) s2 = Automerge.merge(s2, s3) - assert.deepStrictEqual(s2, {field: 'three'}) - assert.strictEqual(Automerge.getConflicts(s2, 'field'), undefined) + assert.deepStrictEqual(s2, { field: "three" }) + assert.strictEqual(Automerge.getConflicts(s2, "field"), undefined) }) - it('should handle concurrent insertions at different list positions', () => { - s1 = Automerge.change(s1, doc => doc.list = ['one', 'three']) + it("should handle concurrent insertions at different list positions", () => { + s1 = Automerge.change(s1, doc => (doc.list = ["one", "three"])) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.list.splice(1, 0, 'two')) - s2 = Automerge.change(s2, doc => doc.list.push('four')) + s1 = Automerge.change(s1, doc => doc.list.splice(1, 0, "two")) + s2 = Automerge.change(s2, doc => doc.list.push("four")) s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s3, {list: ['one', 'two', 'three', 'four']}) - assert.strictEqual(Automerge.getConflicts(s3, 'list'), undefined) + assert.deepStrictEqual(s3, { list: ["one", "two", "three", "four"] }) + assert.strictEqual(Automerge.getConflicts(s3, "list"), undefined) }) - it('should handle concurrent insertions at the same list position', () => { - s1 = Automerge.change(s1, doc => doc.birds = ['parakeet']) + it("should handle concurrent insertions at the same list position", () => { + s1 = Automerge.change(s1, doc => (doc.birds = ["parakeet"])) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.birds.push('starling')) - s2 = Automerge.change(s2, doc => doc.birds.push('chaffinch')) + s1 = Automerge.change(s1, doc => doc.birds.push("starling")) + s2 = Automerge.change(s2, doc => doc.birds.push("chaffinch")) s3 = Automerge.merge(s1, s2) - assertEqualsOneOf(s3.birds, ['parakeet', 'starling', 'chaffinch'], ['parakeet', 'chaffinch', 'starling']) + assertEqualsOneOf( + s3.birds, + ["parakeet", "starling", "chaffinch"], + ["parakeet", "chaffinch", "starling"] + ) s2 = Automerge.merge(s2, s3) assert.deepStrictEqual(s2, s3) }) - it('should handle concurrent assignment and deletion of a map entry', () => { + it("should handle concurrent assignment and deletion of a map entry", () => { // Add-wins semantics - s1 = Automerge.change(s1, doc => doc.bestBird = 'robin') + s1 = Automerge.change(s1, doc => (doc.bestBird = "robin")) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => delete doc.bestBird) - s2 = Automerge.change(s2, doc => doc.bestBird = 'magpie') + s2 = Automerge.change(s2, doc => (doc.bestBird = "magpie")) s3 = Automerge.merge(s1, s2) assert.deepStrictEqual(s1, {}) - assert.deepStrictEqual(s2, {bestBird: 'magpie'}) - assert.deepStrictEqual(s3, {bestBird: 'magpie'}) - assert.strictEqual(Automerge.getConflicts(s3, 'bestBird'), undefined) + assert.deepStrictEqual(s2, { bestBird: "magpie" }) + assert.deepStrictEqual(s3, { bestBird: "magpie" }) + assert.strictEqual(Automerge.getConflicts(s3, "bestBird"), undefined) }) - it('should handle concurrent assignment and deletion of a list element', () => { + it("should handle concurrent assignment and deletion of a list element", () => { // Concurrent assignment ressurects a deleted list element. Perhaps a little // surprising, but consistent with add-wins semantics of maps (see test above) - s1 = Automerge.change(s1, doc => doc.birds = ['blackbird', 'thrush', 'goldfinch']) + s1 = Automerge.change( + s1, + doc => (doc.birds = ["blackbird", "thrush", "goldfinch"]) + ) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.birds[1] = 'starling') + s1 = Automerge.change(s1, doc => (doc.birds[1] = "starling")) s2 = Automerge.change(s2, doc => doc.birds.splice(1, 1)) s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s1.birds, ['blackbird', 'starling', 'goldfinch']) - assert.deepStrictEqual(s2.birds, ['blackbird', 'goldfinch']) - assert.deepStrictEqual(s3.birds, ['blackbird', 'starling', 'goldfinch']) + assert.deepStrictEqual(s1.birds, ["blackbird", "starling", "goldfinch"]) + assert.deepStrictEqual(s2.birds, ["blackbird", "goldfinch"]) + assert.deepStrictEqual(s3.birds, ["blackbird", "starling", "goldfinch"]) s4 = Automerge.load(Automerge.save(s3)) - assert.deepStrictEqual(s3, s4); + assert.deepStrictEqual(s3, s4) }) - it('should handle insertion after a deleted list element', () => { - s1 = Automerge.change(s1, doc => doc.birds = ['blackbird', 'thrush', 'goldfinch']) + it("should handle insertion after a deleted list element", () => { + s1 = Automerge.change( + s1, + doc => (doc.birds = ["blackbird", "thrush", "goldfinch"]) + ) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.birds.splice(1, 2)) - s2 = Automerge.change(s2, doc => doc.birds.splice(2, 0, 'starling')) + s2 = Automerge.change(s2, doc => doc.birds.splice(2, 0, "starling")) s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s3, {birds: ['blackbird', 'starling']}) - assert.deepStrictEqual(Automerge.merge(s2, s3), {birds: ['blackbird', 'starling']}) + assert.deepStrictEqual(s3, { birds: ["blackbird", "starling"] }) + assert.deepStrictEqual(Automerge.merge(s2, s3), { + birds: ["blackbird", "starling"], + }) }) - it('should handle concurrent deletion of the same element', () => { - s1 = Automerge.change(s1, doc => doc.birds = ['albatross', 'buzzard', 'cormorant']) + it("should handle concurrent deletion of the same element", () => { + s1 = Automerge.change( + s1, + doc => (doc.birds = ["albatross", "buzzard", "cormorant"]) + ) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.birds.deleteAt(1)) // buzzard s2 = Automerge.change(s2, doc => doc.birds.deleteAt(1)) // buzzard s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s3.birds, ['albatross', 'cormorant']) + assert.deepStrictEqual(s3.birds, ["albatross", "cormorant"]) }) - it('should handle concurrent deletion of different elements', () => { - s1 = Automerge.change(s1, doc => doc.birds = ['albatross', 'buzzard', 'cormorant']) + it("should handle concurrent deletion of different elements", () => { + s1 = Automerge.change( + s1, + doc => (doc.birds = ["albatross", "buzzard", "cormorant"]) + ) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => doc.birds.deleteAt(0)) // albatross s2 = Automerge.change(s2, doc => doc.birds.deleteAt(1)) // buzzard s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s3.birds, ['cormorant']) + assert.deepStrictEqual(s3.birds, ["cormorant"]) }) - it('should handle concurrent updates at different levels of the tree', () => { + it("should handle concurrent updates at different levels of the tree", () => { // A delete higher up in the tree overrides an update in a subtree - s1 = Automerge.change(s1, doc => doc.animals = {birds: {pink: 'flamingo', black: 'starling'}, mammals: ['badger']}) + s1 = Automerge.change( + s1, + doc => + (doc.animals = { + birds: { pink: "flamingo", black: "starling" }, + mammals: ["badger"], + }) + ) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.animals.birds.brown = 'sparrow') + s1 = Automerge.change(s1, doc => (doc.animals.birds.brown = "sparrow")) s2 = Automerge.change(s2, doc => delete doc.animals.birds) s3 = Automerge.merge(s1, s2) assert.deepStrictEqual(s1.animals, { birds: { - pink: 'flamingo', brown: 'sparrow', black: 'starling' + pink: "flamingo", + brown: "sparrow", + black: "starling", }, - mammals: ['badger'] + mammals: ["badger"], }) - assert.deepStrictEqual(s2.animals, {mammals: ['badger']}) - assert.deepStrictEqual(s3.animals, {mammals: ['badger']}) + assert.deepStrictEqual(s2.animals, { mammals: ["badger"] }) + assert.deepStrictEqual(s3.animals, { mammals: ["badger"] }) }) - it('should handle updates of concurrently deleted objects', () => { - s1 = Automerge.change(s1, doc => doc.birds = {blackbird: {feathers: 'black'}}) + it("should handle updates of concurrently deleted objects", () => { + s1 = Automerge.change( + s1, + doc => (doc.birds = { blackbird: { feathers: "black" } }) + ) s2 = Automerge.merge(s2, s1) s1 = Automerge.change(s1, doc => delete doc.birds.blackbird) - s2 = Automerge.change(s2, doc => doc.birds.blackbird.beak = 'orange') + s2 = Automerge.change(s2, doc => (doc.birds.blackbird.beak = "orange")) s3 = Automerge.merge(s1, s2) - assert.deepStrictEqual(s1, {birds: {}}) + assert.deepStrictEqual(s1, { birds: {} }) }) - it('should not interleave sequence insertions at the same position', () => { - s1 = Automerge.change(s1, doc => doc.wisdom = []) + it("should not interleave sequence insertions at the same position", () => { + s1 = Automerge.change(s1, doc => (doc.wisdom = [])) s2 = Automerge.merge(s2, s1) - s1 = Automerge.change(s1, doc => doc.wisdom.push('to', 'be', 'is', 'to', 'do')) - s2 = Automerge.change(s2, doc => doc.wisdom.push('to', 'do', 'is', 'to', 'be')) + s1 = Automerge.change(s1, doc => + doc.wisdom.push("to", "be", "is", "to", "do") + ) + s2 = Automerge.change(s2, doc => + doc.wisdom.push("to", "do", "is", "to", "be") + ) s3 = Automerge.merge(s1, s2) - assertEqualsOneOf(s3.wisdom, - ['to', 'be', 'is', 'to', 'do', 'to', 'do', 'is', 'to', 'be'], - ['to', 'do', 'is', 'to', 'be', 'to', 'be', 'is', 'to', 'do']) + assertEqualsOneOf( + s3.wisdom, + ["to", "be", "is", "to", "do", "to", "do", "is", "to", "be"], + ["to", "do", "is", "to", "be", "to", "be", "is", "to", "do"] + ) // In case you're wondering: http://quoteinvestigator.com/2013/09/16/do-be-do/ }) - describe('multiple insertions at the same list position', () => { - it('should handle insertion by greater actor ID', () => { - s1 = Automerge.init('aaaa') - s2 = Automerge.init('bbbb') - s1 = Automerge.change(s1, doc => doc.list = ['two']) + describe("multiple insertions at the same list position", () => { + it("should handle insertion by greater actor ID", () => { + s1 = Automerge.init("aaaa") + s2 = Automerge.init("bbbb") + s1 = Automerge.change(s1, doc => (doc.list = ["two"])) s2 = Automerge.merge(s2, s1) - s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, 'one')) - assert.deepStrictEqual(s2.list, ['one', 'two']) + s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "one")) + assert.deepStrictEqual(s2.list, ["one", "two"]) }) - it('should handle insertion by lesser actor ID', () => { - s1 = Automerge.init('bbbb') - s2 = Automerge.init('aaaa') - s1 = Automerge.change(s1, doc => doc.list = ['two']) + it("should handle insertion by lesser actor ID", () => { + s1 = Automerge.init("bbbb") + s2 = Automerge.init("aaaa") + s1 = Automerge.change(s1, doc => (doc.list = ["two"])) s2 = Automerge.merge(s2, s1) - s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, 'one')) - assert.deepStrictEqual(s2.list, ['one', 'two']) + s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "one")) + assert.deepStrictEqual(s2.list, ["one", "two"]) }) - it('should handle insertion regardless of actor ID', () => { - s1 = Automerge.change(s1, doc => doc.list = ['two']) + it("should handle insertion regardless of actor ID", () => { + s1 = Automerge.change(s1, doc => (doc.list = ["two"])) s2 = Automerge.merge(s2, s1) - s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, 'one')) - assert.deepStrictEqual(s2.list, ['one', 'two']) + s2 = Automerge.change(s2, doc => doc.list.splice(0, 0, "one")) + assert.deepStrictEqual(s2.list, ["one", "two"]) }) - it('should make insertion order consistent with causality', () => { - s1 = Automerge.change(s1, doc => doc.list = ['four']) + it("should make insertion order consistent with causality", () => { + s1 = Automerge.change(s1, doc => (doc.list = ["four"])) s2 = Automerge.merge(s2, s1) - s2 = Automerge.change(s2, doc => doc.list.unshift('three')) + s2 = Automerge.change(s2, doc => doc.list.unshift("three")) s1 = Automerge.merge(s1, s2) - s1 = Automerge.change(s1, doc => doc.list.unshift('two')) + s1 = Automerge.change(s1, doc => doc.list.unshift("two")) s2 = Automerge.merge(s2, s1) - s2 = Automerge.change(s2, doc => doc.list.unshift('one')) - assert.deepStrictEqual(s2.list, ['one', 'two', 'three', 'four']) + s2 = Automerge.change(s2, doc => doc.list.unshift("one")) + assert.deepStrictEqual(s2.list, ["one", "two", "three", "four"]) }) }) }) - describe('saving and loading', () => { - it('should save and restore an empty document', () => { + describe("saving and loading", () => { + it("should save and restore an empty document", () => { let s = Automerge.load(Automerge.save(Automerge.init())) assert.deepStrictEqual(s, {}) }) - it('should generate a new random actor ID', () => { + it("should generate a new random actor ID", () => { let s1 = Automerge.init() let s2 = Automerge.load(Automerge.save(s1)) - assert.strictEqual(UUID_PATTERN.test(Automerge.getActorId(s1).toString()), true) - assert.strictEqual(UUID_PATTERN.test(Automerge.getActorId(s2).toString()), true) + assert.strictEqual( + UUID_PATTERN.test(Automerge.getActorId(s1).toString()), + true + ) + assert.strictEqual( + UUID_PATTERN.test(Automerge.getActorId(s2).toString()), + true + ) assert.notEqual(Automerge.getActorId(s1), Automerge.getActorId(s2)) }) - it('should allow a custom actor ID to be set', () => { - let s = Automerge.load(Automerge.save(Automerge.init()), '333333') - assert.strictEqual(Automerge.getActorId(s), '333333') + it("should allow a custom actor ID to be set", () => { + let s = Automerge.load(Automerge.save(Automerge.init()), "333333") + assert.strictEqual(Automerge.getActorId(s), "333333") }) - it('should reconstitute complex datatypes', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.todos = [{title: 'water plants', done: false}]) + it("should reconstitute complex datatypes", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.todos = [{ title: "water plants", done: false }]) + ) let s2 = Automerge.load(Automerge.save(s1)) - assert.deepStrictEqual(s2, {todos: [{title: 'water plants', done: false}]}) + assert.deepStrictEqual(s2, { + todos: [{ title: "water plants", done: false }], + }) }) - it('should save and load maps with @ symbols in the keys', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc["123@4567"] = "hello") + it("should save and load maps with @ symbols in the keys", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc["123@4567"] = "hello") + ) let s2 = Automerge.load(Automerge.save(s1)) assert.deepStrictEqual(s2, { "123@4567": "hello" }) }) - it('should reconstitute conflicts', () => { - let s1 = Automerge.change(Automerge.init('111111'), doc => doc.x = 3) - let s2 = Automerge.change(Automerge.init('222222'), doc => doc.x = 5) + it("should reconstitute conflicts", () => { + let s1 = Automerge.change( + Automerge.init("111111"), + doc => (doc.x = 3) + ) + let s2 = Automerge.change( + Automerge.init("222222"), + doc => (doc.x = 5) + ) s1 = Automerge.merge(s1, s2) let s3 = Automerge.load(Automerge.save(s1)) assert.strictEqual(s1.x, 5) assert.strictEqual(s3.x, 5) - assert.deepStrictEqual(Automerge.getConflicts(s1, 'x'), {'1@111111': 3, '1@222222': 5}) - assert.deepStrictEqual(Automerge.getConflicts(s3, 'x'), {'1@111111': 3, '1@222222': 5}) - }) - - it('should reconstitute element ID counters', () => { - const s1 = Automerge.init('01234567') - const s2 = Automerge.change(s1, doc => doc.list = ['a']) - const listId = Automerge.getObjectId(s2.list) - const changes12 = Automerge.getAllChanges(s2).map(Automerge.decodeChange) - assert.deepStrictEqual(changes12, [{ - hash: changes12[0].hash, actor: '01234567', seq: 1, startOp: 1, - time: changes12[0].time, message: null, deps: [], ops: [ - {obj: '_root', action: 'makeList', key: 'list', pred: []}, - {obj: listId, action: 'makeText', elemId: '_head', insert: true, pred: []}, - {obj: "2@01234567", action: 'set', elemId: '_head', insert: true, value: 'a', pred: []} - ] - }]) - const s3 = Automerge.change(s2, doc => doc.list.deleteAt(0)) - const s4 = Automerge.load(Automerge.save(s3), '01234567') - const s5 = Automerge.change(s4, doc => doc.list.push('b')) - const changes45 = Automerge.getAllChanges(s5).map(Automerge.decodeChange) - assert.deepStrictEqual(s5, {list: ['b']}) - assert.deepStrictEqual(changes45[2], { - hash: changes45[2].hash, actor: '01234567', seq: 3, startOp: 5, - time: changes45[2].time, message: null, deps: [changes45[1].hash], ops: [ - {obj: listId, action: 'makeText', elemId: '_head', insert: true, pred: []}, - {obj: "5@01234567", action: 'set', elemId: '_head', insert: true, value: 'b', pred: []} - ] + assert.deepStrictEqual(Automerge.getConflicts(s1, "x"), { + "1@111111": 3, + "1@222222": 5, + }) + assert.deepStrictEqual(Automerge.getConflicts(s3, "x"), { + "1@111111": 3, + "1@222222": 5, }) }) - it('should allow a reloaded list to be mutated', () => { - let doc = Automerge.change(Automerge.init(), doc => doc.foo = []) + it("should reconstitute element ID counters", () => { + const s1 = Automerge.init("01234567") + const s2 = Automerge.change(s1, doc => (doc.list = ["a"])) + const listId = Automerge.getObjectId(s2.list) + const changes12 = Automerge.getAllChanges(s2).map(Automerge.decodeChange) + assert.deepStrictEqual(changes12, [ + { + hash: changes12[0].hash, + actor: "01234567", + seq: 1, + startOp: 1, + time: changes12[0].time, + message: null, + deps: [], + ops: [ + { obj: "_root", action: "makeList", key: "list", pred: [] }, + { + obj: listId, + action: "makeText", + elemId: "_head", + insert: true, + pred: [], + }, + { + obj: "2@01234567", + action: "set", + elemId: "_head", + insert: true, + value: "a", + pred: [], + }, + ], + }, + ]) + const s3 = Automerge.change(s2, doc => doc.list.deleteAt(0)) + const s4 = Automerge.load(Automerge.save(s3), "01234567") + const s5 = Automerge.change(s4, doc => doc.list.push("b")) + const changes45 = Automerge.getAllChanges(s5).map(Automerge.decodeChange) + assert.deepStrictEqual(s5, { list: ["b"] }) + assert.deepStrictEqual(changes45[2], { + hash: changes45[2].hash, + actor: "01234567", + seq: 3, + startOp: 5, + time: changes45[2].time, + message: null, + deps: [changes45[1].hash], + ops: [ + { + obj: listId, + action: "makeText", + elemId: "_head", + insert: true, + pred: [], + }, + { + obj: "5@01234567", + action: "set", + elemId: "_head", + insert: true, + value: "b", + pred: [], + }, + ], + }) + }) + + it("should allow a reloaded list to be mutated", () => { + let doc = Automerge.change(Automerge.init(), doc => (doc.foo = [])) doc = Automerge.load(Automerge.save(doc)) - doc = Automerge.change(doc, 'add', doc => doc.foo.push(1)) + doc = Automerge.change(doc, "add", doc => doc.foo.push(1)) doc = Automerge.load(Automerge.save(doc)) assert.deepStrictEqual(doc.foo, [1]) }) - it('should reload a document containing deflated columns', () => { + it("should reload a document containing deflated columns", () => { // In this test, the keyCtr column is long enough for deflate compression to kick in, but the // keyStr column is short. Thus, the deflate bit gets set for keyCtr but not for keyStr. // When checking whether the columns appear in ascending order, we must ignore the deflate bit. let doc = Automerge.change(Automerge.init(), doc => { doc.list = [] - for (let i = 0; i < 200; i++) doc.list.insertAt(Math.floor(Math.random() * i), 'a') + for (let i = 0; i < 200; i++) + doc.list.insertAt(Math.floor(Math.random() * i), "a") }) Automerge.load(Automerge.save(doc)) let expected: Array = [] - for (let i = 0; i < 200; i++) expected.push('a') - assert.deepStrictEqual(doc, {list: expected}) + for (let i = 0; i < 200; i++) expected.push("a") + assert.deepStrictEqual(doc, { list: expected }) }) - it.skip('should call patchCallback if supplied to load', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) - const s2 = Automerge.change(s1, doc => doc.birds.push('Chaffinch')) - const callbacks: Array = [], actor = Automerge.getActorId(s1) + it.skip("should call patchCallback if supplied to load", () => { + const s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Goldfinch"]) + ) + const s2 = Automerge.change(s1, doc => doc.birds.push("Chaffinch")) + const callbacks: Array = [], + actor = Automerge.getActorId(s1) const reloaded = Automerge.load(Automerge.save(s2), { patchCallback(patch, before, after) { - callbacks.push({patch, before, after}) - } + callbacks.push({ patch, before, after }) + }, }) assert.strictEqual(callbacks.length, 1) assert.deepStrictEqual(callbacks[0].patch, { - maxOp: 3, deps: [decodeChange(Automerge.getAllChanges(s2)[1]).hash], clock: {[actor]: 2}, pendingChanges: 0, - diffs: {objectId: '_root', type: 'map', props: {birds: {[`1@${actor}`]: { - objectId: `1@${actor}`, type: 'list', edits: [ - {action: 'multi-insert', index: 0, elemId: `2@${actor}`, values: ['Goldfinch', 'Chaffinch']} - ] - }}}} + maxOp: 3, + deps: [decodeChange(Automerge.getAllChanges(s2)[1]).hash], + clock: { [actor]: 2 }, + pendingChanges: 0, + diffs: { + objectId: "_root", + type: "map", + props: { + birds: { + [`1@${actor}`]: { + objectId: `1@${actor}`, + type: "list", + edits: [ + { + action: "multi-insert", + index: 0, + elemId: `2@${actor}`, + values: ["Goldfinch", "Chaffinch"], + }, + ], + }, + }, + }, + }, }) assert.deepStrictEqual(callbacks[0].before, {}) assert.strictEqual(callbacks[0].after, reloaded) @@ -1239,99 +1615,155 @@ describe('Automerge', () => { }) }) - describe('history API', () => { - it('should return an empty history for an empty document', () => { + describe("history API", () => { + it("should return an empty history for an empty document", () => { assert.deepStrictEqual(Automerge.getHistory(Automerge.init()), []) }) - it('should make past document states accessible', () => { + it("should make past document states accessible", () => { let s = Automerge.init() - s = Automerge.change(s, doc => doc.config = {background: 'blue'}) - s = Automerge.change(s, doc => doc.birds = ['mallard']) - s = Automerge.change(s, doc => doc.birds.unshift('oystercatcher')) - assert.deepStrictEqual(Automerge.getHistory(s).map(state => state.snapshot), [ - {config: {background: 'blue'}}, - {config: {background: 'blue'}, birds: ['mallard']}, - {config: {background: 'blue'}, birds: ['oystercatcher', 'mallard']} - ]) + s = Automerge.change(s, doc => (doc.config = { background: "blue" })) + s = Automerge.change(s, doc => (doc.birds = ["mallard"])) + s = Automerge.change(s, doc => doc.birds.unshift("oystercatcher")) + assert.deepStrictEqual( + Automerge.getHistory(s).map(state => state.snapshot), + [ + { config: { background: "blue" } }, + { config: { background: "blue" }, birds: ["mallard"] }, + { + config: { background: "blue" }, + birds: ["oystercatcher", "mallard"], + }, + ] + ) }) - it('should make change messages accessible', () => { + it("should make change messages accessible", () => { let s = Automerge.init() - s = Automerge.change(s, 'Empty Bookshelf', doc => doc.books = []) - s = Automerge.change(s, 'Add Orwell', doc => doc.books.push('Nineteen Eighty-Four')) - s = Automerge.change(s, 'Add Huxley', doc => doc.books.push('Brave New World')) - assert.deepStrictEqual(s.books, ['Nineteen Eighty-Four', 'Brave New World']) - assert.deepStrictEqual(Automerge.getHistory(s).map(state => state.change.message), - ['Empty Bookshelf', 'Add Orwell', 'Add Huxley']) + s = Automerge.change(s, "Empty Bookshelf", doc => (doc.books = [])) + s = Automerge.change(s, "Add Orwell", doc => + doc.books.push("Nineteen Eighty-Four") + ) + s = Automerge.change(s, "Add Huxley", doc => + doc.books.push("Brave New World") + ) + assert.deepStrictEqual(s.books, [ + "Nineteen Eighty-Four", + "Brave New World", + ]) + assert.deepStrictEqual( + Automerge.getHistory(s).map(state => state.change.message), + ["Empty Bookshelf", "Add Orwell", "Add Huxley"] + ) }) }) - describe('changes API', () => { - it('should return an empty list on an empty document', () => { + describe("changes API", () => { + it("should return an empty list on an empty document", () => { let changes = Automerge.getAllChanges(Automerge.init()) assert.deepStrictEqual(changes, []) }) - it('should return an empty list when nothing changed', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) + it("should return an empty list when nothing changed", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Chaffinch"]) + ) assert.deepStrictEqual(Automerge.getChanges(s1, s1), []) }) - it('should do nothing when applying an empty list of changes', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch']) + it("should do nothing when applying an empty list of changes", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Chaffinch"]) + ) assert.deepStrictEqual(Automerge.applyChanges(s1, [])[0], s1) }) - it('should return all changes when compared to an empty document', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) - let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) + it("should return all changes when compared to an empty document", () => { + let s1 = Automerge.change( + Automerge.init(), + "Add Chaffinch", + doc => (doc.birds = ["Chaffinch"]) + ) + let s2 = Automerge.change(s1, "Add Bullfinch", doc => + doc.birds.push("Bullfinch") + ) let changes = Automerge.getChanges(Automerge.init(), s2) assert.strictEqual(changes.length, 2) }) - it('should allow a document copy to be reconstructed from scratch', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) - let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) + it("should allow a document copy to be reconstructed from scratch", () => { + let s1 = Automerge.change( + Automerge.init(), + "Add Chaffinch", + doc => (doc.birds = ["Chaffinch"]) + ) + let s2 = Automerge.change(s1, "Add Bullfinch", doc => + doc.birds.push("Bullfinch") + ) let changes = Automerge.getAllChanges(s2) let [s3] = Automerge.applyChanges(Automerge.init(), changes) - assert.deepStrictEqual(s3.birds, ['Chaffinch', 'Bullfinch']) + assert.deepStrictEqual(s3.birds, ["Chaffinch", "Bullfinch"]) }) - it('should return changes since the last given version', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + it("should return changes since the last given version", () => { + let s1 = Automerge.change( + Automerge.init(), + "Add Chaffinch", + doc => (doc.birds = ["Chaffinch"]) + ) let changes1 = Automerge.getAllChanges(s1) - let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) + let s2 = Automerge.change(s1, "Add Bullfinch", doc => + doc.birds.push("Bullfinch") + ) let changes2 = Automerge.getChanges(s1, s2) assert.strictEqual(changes1.length, 1) // Add Chaffinch assert.strictEqual(changes2.length, 1) // Add Bullfinch }) - it('should incrementally apply changes since the last given version', () => { - let s1 = Automerge.change(Automerge.init(), 'Add Chaffinch', doc => doc.birds = ['Chaffinch']) + it("should incrementally apply changes since the last given version", () => { + let s1 = Automerge.change( + Automerge.init(), + "Add Chaffinch", + doc => (doc.birds = ["Chaffinch"]) + ) let changes1 = Automerge.getAllChanges(s1) - let s2 = Automerge.change(s1, 'Add Bullfinch', doc => doc.birds.push('Bullfinch')) + let s2 = Automerge.change(s1, "Add Bullfinch", doc => + doc.birds.push("Bullfinch") + ) let changes2 = Automerge.getChanges(s1, s2) let [s3] = Automerge.applyChanges(Automerge.init(), changes1) let [s4] = Automerge.applyChanges(s3, changes2) - assert.deepStrictEqual(s3.birds, ['Chaffinch']) - assert.deepStrictEqual(s4.birds, ['Chaffinch', 'Bullfinch']) + assert.deepStrictEqual(s3.birds, ["Chaffinch"]) + assert.deepStrictEqual(s4.birds, ["Chaffinch", "Bullfinch"]) }) - it('should handle updates to a list element', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Chaffinch', 'Bullfinch']) - let s2 = Automerge.change(s1, doc => doc.birds[0] = 'Goldfinch') - let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) - assert.deepStrictEqual(s3.birds, ['Goldfinch', 'Bullfinch']) + it("should handle updates to a list element", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Chaffinch", "Bullfinch"]) + ) + let s2 = Automerge.change(s1, doc => (doc.birds[0] = "Goldfinch")) + let [s3] = Automerge.applyChanges( + Automerge.init(), + Automerge.getAllChanges(s2) + ) + assert.deepStrictEqual(s3.birds, ["Goldfinch", "Bullfinch"]) assert.strictEqual(Automerge.getConflicts(s3.birds, 0), undefined) }) // TEXT - it('should handle updates to a text object', () => { - let s1 = Automerge.change(Automerge.init(), doc => doc.text = 'ab') - let s2 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 1, "A")) - let [s3] = Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2)) - assert.deepStrictEqual([...s3.text], ['A', 'b']) + it("should handle updates to a text object", () => { + let s1 = Automerge.change(Automerge.init(), doc => (doc.text = "ab")) + let s2 = Automerge.change(s1, doc => + Automerge.splice(doc, "text", 0, 1, "A") + ) + let [s3] = Automerge.applyChanges( + Automerge.init(), + Automerge.getAllChanges(s2) + ) + assert.deepStrictEqual([...s3.text], ["A", "b"]) }) /* @@ -1352,60 +1784,90 @@ describe('Automerge', () => { }) */ - it('should report missing dependencies with out-of-order applyChanges', () => { + it("should report missing dependencies with out-of-order applyChanges", () => { let s0 = Automerge.init() - let s1 = Automerge.change(s0, doc => doc.test = ['a']) + let s1 = Automerge.change(s0, doc => (doc.test = ["a"])) let changes01 = Automerge.getAllChanges(s1) - let s2 = Automerge.change(s1, doc => doc.test = ['b']) + let s2 = Automerge.change(s1, doc => (doc.test = ["b"])) let changes12 = Automerge.getChanges(s1, s2) - let s3 = Automerge.change(s2, doc => doc.test = ['c']) + let s3 = Automerge.change(s2, doc => (doc.test = ["c"])) let changes23 = Automerge.getChanges(s2, s3) let s4 = Automerge.init() let [s5] = Automerge.applyChanges(s4, changes23) let [s6] = Automerge.applyChanges(s5, changes12) - assert.deepStrictEqual(Automerge.getMissingDeps(s6, []), [decodeChange(changes01[0]).hash]) + assert.deepStrictEqual(Automerge.getMissingDeps(s6, []), [ + decodeChange(changes01[0]).hash, + ]) }) - it('should call patchCallback if supplied when applying changes', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) + it("should call patchCallback if supplied when applying changes", () => { + const s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Goldfinch"]) + ) const callbacks: Array = [] const before = Automerge.init() - const [after] = Automerge.applyChanges(before, Automerge.getAllChanges(s1), { - patchCallback(patch, before, after) { - callbacks.push({patch, before, after}) + const [after] = Automerge.applyChanges( + before, + Automerge.getAllChanges(s1), + { + patchCallback(patch, before, after) { + callbacks.push({ patch, before, after }) + }, } - }) + ) assert.strictEqual(callbacks.length, 1) - assert.deepStrictEqual(callbacks[0].patch[0], { action: 'put', path: ["birds"], value: [] }) - assert.deepStrictEqual(callbacks[0].patch[1], { action: 'insert', path: ["birds",0], values: [""] }) - assert.deepStrictEqual(callbacks[0].patch[2], { action: 'splice', path: ["birds",0,0], value: "Goldfinch" }) + assert.deepStrictEqual(callbacks[0].patch[0], { + action: "put", + path: ["birds"], + value: [], + }) + assert.deepStrictEqual(callbacks[0].patch[1], { + action: "insert", + path: ["birds", 0], + values: [""], + }) + assert.deepStrictEqual(callbacks[0].patch[2], { + action: "splice", + path: ["birds", 0, 0], + value: "Goldfinch", + }) assert.strictEqual(callbacks[0].before, before) assert.strictEqual(callbacks[0].after, after) }) - it('should merge multiple applied changes into one patch', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.birds = ['Goldfinch']) - const s2 = Automerge.change(s1, doc => doc.birds.push('Chaffinch')) + it("should merge multiple applied changes into one patch", () => { + const s1 = Automerge.change( + Automerge.init(), + doc => (doc.birds = ["Goldfinch"]) + ) + const s2 = Automerge.change(s1, doc => doc.birds.push("Chaffinch")) const patches: Array = [] - Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2), - {patchCallback: p => patches.push(... p)}) + Automerge.applyChanges(Automerge.init(), Automerge.getAllChanges(s2), { + patchCallback: p => patches.push(...p), + }) assert.deepStrictEqual(patches, [ - { action: 'put', path: [ 'birds' ], value: [] }, - { action: "insert", path: [ "birds", 0 ], values: [ "" ] }, - { action: "splice", path: [ "birds", 0, 0 ], value: "Goldfinch" }, - { action: "insert", path: [ "birds", 1 ], values: [ "" ] }, - { action: "splice", path: [ "birds", 1, 0 ], value: "Chaffinch" } + { action: "put", path: ["birds"], value: [] }, + { action: "insert", path: ["birds", 0], values: [""] }, + { action: "splice", path: ["birds", 0, 0], value: "Goldfinch" }, + { action: "insert", path: ["birds", 1], values: [""] }, + { action: "splice", path: ["birds", 1, 0], value: "Chaffinch" }, ]) }) - it('should call a patchCallback registered on doc initialisation', () => { - const s1 = Automerge.change(Automerge.init(), doc => doc.bird = 'Goldfinch') + it("should call a patchCallback registered on doc initialisation", () => { + const s1 = Automerge.change( + Automerge.init(), + doc => (doc.bird = "Goldfinch") + ) const patches: Array = [] - const before = Automerge.init({patchCallback: p => patches.push(... p)}) + const before = Automerge.init({ + patchCallback: p => patches.push(...p), + }) Automerge.applyChanges(before, Automerge.getAllChanges(s1)) assert.deepStrictEqual(patches, [ - { action: "put", path: [ "bird" ], value: "" }, - { action: "splice", path: [ "bird", 0 ], value: "Goldfinch" } + { action: "put", path: ["bird"], value: "" }, + { action: "splice", path: ["bird", 0], value: "Goldfinch" }, ]) }) }) diff --git a/javascript/test/sync_test.ts b/javascript/test/sync_test.ts index 8e03c18a..5724985c 100644 --- a/javascript/test/sync_test.ts +++ b/javascript/test/sync_test.ts @@ -1,7 +1,13 @@ -import * as assert from 'assert' -import * as Automerge from '../src' -import { BloomFilter } from './legacy/sync' -import { decodeSyncMessage, encodeSyncMessage, decodeSyncState, encodeSyncState, initSyncState } from "../src" +import * as assert from "assert" +import * as Automerge from "../src" +import { BloomFilter } from "./legacy/sync" +import { + decodeSyncMessage, + encodeSyncMessage, + decodeSyncState, + encodeSyncState, + initSyncState, +} from "../src" function getHeads(doc) { return Automerge.getHeads(doc) @@ -11,32 +17,41 @@ function getMissingDeps(doc) { return Automerge.getMissingDeps(doc, []) } -function sync(a, b, aSyncState = initSyncState(), bSyncState = initSyncState()) { +function sync( + a, + b, + aSyncState = initSyncState(), + bSyncState = initSyncState() +) { const MAX_ITER = 10 - let aToBmsg: Automerge.SyncMessage | null = null, bToAmsg: Automerge.SyncMessage | null = null, i = 0 + let aToBmsg: Automerge.SyncMessage | null = null, + bToAmsg: Automerge.SyncMessage | null = null, + i = 0 do { - [aSyncState, aToBmsg] = Automerge.generateSyncMessage(a, aSyncState) + ;[aSyncState, aToBmsg] = Automerge.generateSyncMessage(a, aSyncState) ;[bSyncState, bToAmsg] = Automerge.generateSyncMessage(b, bSyncState) if (aToBmsg) { - [b, bSyncState] = Automerge.receiveSyncMessage(b, bSyncState, aToBmsg) + ;[b, bSyncState] = Automerge.receiveSyncMessage(b, bSyncState, aToBmsg) } if (bToAmsg) { - [a, aSyncState] = Automerge.receiveSyncMessage(a, aSyncState, bToAmsg) + ;[a, aSyncState] = Automerge.receiveSyncMessage(a, aSyncState, bToAmsg) } if (i++ > MAX_ITER) { - throw new Error(`Did not synchronize within ${MAX_ITER} iterations. Do you have a bug causing an infinite loop?`) + throw new Error( + `Did not synchronize within ${MAX_ITER} iterations. Do you have a bug causing an infinite loop?` + ) } } while (aToBmsg || bToAmsg) return [a, b, aSyncState, bSyncState] } -describe('Data sync protocol', () => { - describe('with docs already in sync', () => { - describe('an empty local doc', () => { - it('should send a sync message implying no local data', () => { +describe("Data sync protocol", () => { + describe("with docs already in sync", () => { + describe("an empty local doc", () => { + it("should send a sync message implying no local data", () => { let n1 = Automerge.init() let s1 = initSyncState() let m1 @@ -50,28 +65,35 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(message.changes, []) }) - it('should not reply if we have no data as well', () => { - let n1 = Automerge.init(), n2 = Automerge.init() - let s1 = initSyncState(), s2 = initSyncState() - let m1: Automerge.SyncMessage | null = null, m2: Automerge.SyncMessage | null = null + it("should not reply if we have no data as well", () => { + let n1 = Automerge.init(), + n2 = Automerge.init() + let s1 = initSyncState(), + s2 = initSyncState() + let m1: Automerge.SyncMessage | null = null, + m2: Automerge.SyncMessage | null = null ;[s1, m1] = Automerge.generateSyncMessage(n1, s1) if (m1 != null) { - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) } ;[s2, m2] = Automerge.generateSyncMessage(n2, s2) assert.deepStrictEqual(m2, null) }) }) - describe('documents with data', () => { - it('repos with equal heads do not need a reply message', () => { - let n1 = Automerge.init(), n2 = Automerge.init() - let s1 = initSyncState(), s2 = initSyncState() - let m1: Automerge.SyncMessage | null = null, m2: Automerge.SyncMessage | null = null + describe("documents with data", () => { + it("repos with equal heads do not need a reply message", () => { + let n1 = Automerge.init(), + n2 = Automerge.init() + let s1 = initSyncState(), + s2 = initSyncState() + let m1: Automerge.SyncMessage | null = null, + m2: Automerge.SyncMessage | null = null // make two nodes with the same changes - n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.n.push(i)) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.n = [])) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.n.push(i)) ;[n2] = Automerge.applyChanges(n2, Automerge.getAllChanges(n1)) assert.deepStrictEqual(n1, n2) @@ -81,83 +103,95 @@ describe('Data sync protocol', () => { // heads are equal so this message should be null if (m1 != null) { - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, m1) } ;[s2, m2] = Automerge.generateSyncMessage(n2, s2) assert.strictEqual(m2, null) }) - it('n1 should offer all changes to n2 when starting from nothing', () => { - let n1 = Automerge.init(), n2 = Automerge.init() + it("n1 should offer all changes to n2 when starting from nothing", () => { + let n1 = Automerge.init(), + n2 = Automerge.init() // make changes for n1 that n2 should request - n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.n.push(i)) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.n = [])) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.n.push(i)) assert.notDeepStrictEqual(n1, n2) const [after1, after2] = sync(n1, n2) assert.deepStrictEqual(after1, after2) }) - it('should sync peers where one has commits the other does not', () => { - let n1 = Automerge.init(), n2 = Automerge.init() + it("should sync peers where one has commits the other does not", () => { + let n1 = Automerge.init(), + n2 = Automerge.init() // make changes for n1 that n2 should request - n1 = Automerge.change(n1, {time: 0}, doc => doc.n = []) - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.n.push(i)) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.n = [])) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.n.push(i)) assert.notDeepStrictEqual(n1, n2) ;[n1, n2] = sync(n1, n2) assert.deepStrictEqual(n1, n2) }) - it('should work with prior sync state', () => { + it("should work with prior sync state", () => { // create & synchronize two nodes - let n1 = Automerge.init(), n2 = Automerge.init() - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init(), + n2 = Automerge.init() + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) // modify the first node further - for (let i = 5; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 5; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) assert.notDeepStrictEqual(n1, n2) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) assert.deepStrictEqual(n1, n2) }) - it('should not generate messages once synced', () => { + it("should not generate messages once synced", () => { // create & synchronize two nodes - let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("abc123"), + n2 = Automerge.init("def456") + let s1 = initSyncState(), + s2 = initSyncState() let message - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - for (let i = 0; i < 5; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.y = i) + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) + for (let i = 0; i < 5; i++) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.y = i)) - // n1 reports what it has + // n1 reports what it has ;[s1, message] = Automerge.generateSyncMessage(n1, s1) // n2 receives that message and sends changes along with what it has - ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, message) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, message) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 5) //assert.deepStrictEqual(patch, null) // no changes arrived // n1 receives the changes and replies with the changes it now knows n2 needs - ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, message) + ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, message) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 5) //assert.deepStrictEqual(patch.diffs.props, {y: {'5@def456': {type: 'value', value: 4, datatype: 'int'}}}) // changes arrived // n2 applies the changes and sends confirmation ending the exchange - ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, message) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, message) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) //assert.deepStrictEqual(patch.diffs.props, {x: {'5@abc123': {type: 'value', value: 4, datatype: 'int'}}}) // changes arrived // n1 receives the message and has nothing more to say - ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, message) + ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, message) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) assert.deepStrictEqual(message, null) //assert.deepStrictEqual(patch, null) // no changes arrived @@ -167,27 +201,38 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(message, null) }) - it('should allow simultaneous messages during synchronization', () => { + it("should allow simultaneous messages during synchronization", () => { // create & synchronize two nodes - let n1 = Automerge.init('abc123'), n2 = Automerge.init('def456') - let s1 = initSyncState(), s2 = initSyncState() - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - for (let i = 0; i < 5; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.y = i) - const head1 = getHeads(n1)[0], head2 = getHeads(n2)[0] + let n1 = Automerge.init("abc123"), + n2 = Automerge.init("def456") + let s1 = initSyncState(), + s2 = initSyncState() + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) + for (let i = 0; i < 5; i++) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.y = i)) + const head1 = getHeads(n1)[0], + head2 = getHeads(n2)[0] // both sides report what they have but have no shared peer state let msg1to2, msg2to1 ;[s1, msg1to2] = Automerge.generateSyncMessage(n1, s1) ;[s2, msg2to1] = Automerge.generateSyncMessage(n2, s2) assert.deepStrictEqual(decodeSyncMessage(msg1to2).changes.length, 0) - assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync.length, 0) + assert.deepStrictEqual( + decodeSyncMessage(msg1to2).have[0].lastSync.length, + 0 + ) assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 0) - assert.deepStrictEqual(decodeSyncMessage(msg2to1).have[0].lastSync.length, 0) + assert.deepStrictEqual( + decodeSyncMessage(msg2to1).have[0].lastSync.length, + 0 + ) // n1 and n2 receives that message and update sync state but make no patch - ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) //assert.deepStrictEqual(patch1, null) // no changes arrived, so no patch - ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) //assert.deepStrictEqual(patch2, null) // no changes arrived, so no patch // now both reply with their local changes the other lacks @@ -198,15 +243,14 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 5) // both should now apply the changes and update the frontend - ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) assert.deepStrictEqual(getMissingDeps(n1), []) //assert.notDeepStrictEqual(patch1, null) - assert.deepStrictEqual(n1, {x: 4, y: 4}) - - ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + assert.deepStrictEqual(n1, { x: 4, y: 4 }) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) assert.deepStrictEqual(getMissingDeps(n2), []) //assert.notDeepStrictEqual(patch2, null) - assert.deepStrictEqual(n2, {x: 4, y: 4}) + assert.deepStrictEqual(n2, { x: 4, y: 4 }) // The response acknowledges the changes received, and sends no further changes ;[s1, msg1to2] = Automerge.generateSyncMessage(n1, s1) @@ -215,8 +259,8 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 0) // After receiving acknowledgements, their shared heads should be equal - ;[n1, s1, ] = Automerge.receiveSyncMessage(n1, s1, msg2to1) - ;[n2, s2, ] = Automerge.receiveSyncMessage(n2, s2, msg1to2) + ;[n1, s1] = Automerge.receiveSyncMessage(n1, s1, msg2to1) + ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, msg1to2) assert.deepStrictEqual(s1.sharedHeads, [head1, head2].sort()) assert.deepStrictEqual(s2.sharedHeads, [head1, head2].sort()) //assert.deepStrictEqual(patch1, null) @@ -229,47 +273,56 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(msg2to1, null) // If we make one more change, and start another sync, its lastSync should be updated - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 5) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 5)) ;[s1, msg1to2] = Automerge.generateSyncMessage(n1, s1) - assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync, [head1, head2].sort()) + assert.deepStrictEqual( + decodeSyncMessage(msg1to2).have[0].lastSync, + [head1, head2].sort() + ) }) - it('should assume sent changes were recieved until we hear otherwise', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), message: Automerge.SyncMessage | null = null + it("should assume sent changes were recieved until we hear otherwise", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + message: Automerge.SyncMessage | null = null - n1 = Automerge.change(n1, {time: 0}, doc => doc.items = []) - ;[n1, n2, s1, ] = sync(n1, n2) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.items = [])) + ;[n1, n2, s1] = sync(n1, n2) - n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('x')) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.items.push("x")) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) if (message != null) { - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) } - n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('y')) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.items.push("y")) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) if (message != null) { - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) } - n1 = Automerge.change(n1, {time: 0}, doc => doc.items.push('z')) + n1 = Automerge.change(n1, { time: 0 }, doc => doc.items.push("z")) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) if (message != null) { - assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) + assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) } }) - it('should work regardless of who initiates the exchange', () => { + it("should work regardless of who initiates the exchange", () => { // create & synchronize two nodes - let n1 = Automerge.init(), n2 = Automerge.init() - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init(), + n2 = Automerge.init() + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) // modify the first node further - for (let i = 5; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 5; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) assert.notDeepStrictEqual(n1, n2) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) @@ -278,21 +331,24 @@ describe('Data sync protocol', () => { }) }) - describe('with diverged documents', () => { - it('should work without prior sync state', () => { + describe("with diverged documents", () => { + it("should work without prior sync state", () => { // Scenario: ,-- c10 <-- c11 <-- c12 <-- c13 <-- c14 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- c15 <-- c16 <-- c17 // lastSync is undefined. // create two peers both with divergent commits - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2] = sync(n1, n2) - for (let i = 10; i < 15; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - for (let i = 15; i < 18; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.x = i) + for (let i = 10; i < 15; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) + for (let i = 15; i < 18; i++) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = i)) assert.notDeepStrictEqual(n1, n2) ;[n1, n2] = sync(n1, n2) @@ -300,21 +356,26 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(n1, n2) }) - it('should work with prior sync state', () => { + it("should work with prior sync state", () => { // Scenario: ,-- c10 <-- c11 <-- c12 <-- c13 <-- c14 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- c15 <-- c16 <-- c17 // lastSync is c9. // create two peers both with divergent commits - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) - for (let i = 10; i < 15; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - for (let i = 15; i < 18; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.x = i) + for (let i = 10; i < 15; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) + for (let i = 15; i < 18; i++) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = i)) s1 = decodeSyncState(encodeSyncState(s1)) s2 = decodeSyncState(encodeSyncState(s2)) @@ -324,27 +385,33 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(n1, n2) }) - it('should ensure non-empty state after sync', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + it("should ensure non-empty state after sync", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) assert.deepStrictEqual(s1.sharedHeads, getHeads(n1)) assert.deepStrictEqual(s2.sharedHeads, getHeads(n1)) }) - it('should re-sync after one node crashed with data loss', () => { + it("should re-sync after one node crashed with data loss", () => { // Scenario: (r) (n2) (n1) // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 // n2 has changes {c0, c1, c2}, n1's lastSync is c5, and n2's lastSync is c2. // we want to successfully sync (n1) with (r), even though (n1) believes it's talking to (n2) - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() // n1 makes three changes, which we sync to n2 - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) // save a copy of n2 as "r" to simulate recovering from crash @@ -352,38 +419,43 @@ describe('Data sync protocol', () => { ;[r, rSyncState] = [Automerge.clone(n2), s2] // sync another few commits - for (let i = 3; i < 6; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 3; i < 6; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) // everyone should be on the same page here assert.deepStrictEqual(getHeads(n1), getHeads(n2)) assert.deepStrictEqual(n1, n2) // now make a few more changes, then attempt to sync the fully-up-to-date n1 with the confused r - for (let i = 6; i < 9; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 6; i < 9; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) s1 = decodeSyncState(encodeSyncState(s1)) rSyncState = decodeSyncState(encodeSyncState(rSyncState)) assert.notDeepStrictEqual(getHeads(n1), getHeads(r)) assert.notDeepStrictEqual(n1, r) - assert.deepStrictEqual(n1, {x: 8}) - assert.deepStrictEqual(r, {x: 2}) + assert.deepStrictEqual(n1, { x: 8 }) + assert.deepStrictEqual(r, { x: 2 }) ;[n1, r, s1, rSyncState] = sync(n1, r, s1, rSyncState) assert.deepStrictEqual(getHeads(n1), getHeads(r)) assert.deepStrictEqual(n1, r) }) - it('should resync after one node experiences data loss without disconnecting', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + it("should resync after one node experiences data loss without disconnecting", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() // n1 makes three changes, which we sync to n2 - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) assert.deepStrictEqual(getHeads(n1), getHeads(n2)) assert.deepStrictEqual(n1, n2) - let n2AfterDataLoss = Automerge.init('89abcdef') + let n2AfterDataLoss = Automerge.init("89abcdef") // "n2" now has no data, but n1 still thinks it does. Note we don't do // decodeSyncState(encodeSyncState(s1)) in order to simulate data loss without disconnecting @@ -392,29 +464,35 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(n1, n2) }) - it('should handle changes concurrent to the last sync heads', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') - let s12 = initSyncState(), s21 = initSyncState(), s23 = initSyncState(), s32 = initSyncState() + it("should handle changes concurrent to the last sync heads", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef"), + n3 = Automerge.init("fedcba98") + let s12 = initSyncState(), + s21 = initSyncState(), + s23 = initSyncState(), + s32 = initSyncState() // Change 1 is known to all three nodes - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 1) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 1)) ;[n1, n2, s12, s21] = sync(n1, n2, s12, s21) ;[n2, n3, s23, s32] = sync(n2, n3, s23, s32) // Change 2 is known to n1 and n2 - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 2) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 2)) ;[n1, n2, s12, s21] = sync(n1, n2, s12, s21) // Each of the three nodes makes one change (changes 3, 4, 5) - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 3) - n2 = Automerge.change(n2, {time: 0}, doc => doc.x = 4) - n3 = Automerge.change(n3, {time: 0}, doc => doc.x = 5) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 3)) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = 4)) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = 5)) // Apply n3's latest change to n2. If running in Node, turn the Uint8Array into a Buffer, to // simulate transmission over a network (see https://github.com/automerge/automerge/pull/362) let change = Automerge.getLastLocalChange(n3) - if (typeof Buffer === 'function' && change != null) change = Buffer.from(change) - ;[n2] = change && Automerge.applyChanges(n2, [change]) || [n2] + if (typeof Buffer === "function" && change != null) + change = Buffer.from(change) + ;[n2] = (change && Automerge.applyChanges(n2, [change])) || [n2] // Now sync n1 and n2. n3's change is concurrent to n1 and n2's last sync heads ;[n1, n2, s12, s21] = sync(n1, n2, s12, s21) @@ -422,12 +500,14 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(n1, n2) }) - it('should handle histories with lots of branching and merging', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('fedcba98') - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 0) + it("should handle histories with lots of branching and merging", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef"), + n3 = Automerge.init("fedcba98") + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 0)) ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n1)!]) ;[n3] = Automerge.applyChanges(n3, [Automerge.getLastLocalChange(n1)!]) - n3 = Automerge.change(n3, {time: 0}, doc => doc.x = 1) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = 1)) // - n1c1 <------ n1c2 <------ n1c3 <-- etc. <-- n1c20 <------ n1c21 // / \/ \/ \/ @@ -436,29 +516,29 @@ describe('Data sync protocol', () => { // \ / // ---------------------------------------------- n3c1 <----- for (let i = 1; i < 20; i++) { - n1 = Automerge.change(n1, {time: 0}, doc => doc.n1 = i) - n2 = Automerge.change(n2, {time: 0}, doc => doc.n2 = i) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.n1 = i)) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.n2 = i)) const change1 = Automerge.getLastLocalChange(n1) const change2 = Automerge.getLastLocalChange(n2) ;[n1] = Automerge.applyChanges(n1, [change2!]) ;[n2] = Automerge.applyChanges(n2, [change1!]) } - let s1 = initSyncState(), s2 = initSyncState() + let s1 = initSyncState(), + s2 = initSyncState() ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) // Having n3's last change concurrent to the last sync heads forces us into the slower code path ;[n2] = Automerge.applyChanges(n2, [Automerge.getLastLocalChange(n3)!]) - n1 = Automerge.change(n1, {time: 0}, doc => doc.n1 = 'final') - n2 = Automerge.change(n2, {time: 0}, doc => doc.n2 = 'final') - + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.n1 = "final")) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.n2 = "final")) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) assert.deepStrictEqual(getHeads(n1), getHeads(n2)) assert.deepStrictEqual(n1, n2) }) }) - describe('with false positives', () => { + describe("with false positives", () => { // NOTE: the following tests use brute force to search for Bloom filter false positives. The // tests make change hashes deterministic by fixing the actorId and change timestamp to be // constants. The loop that searches for false positives is then initialised such that it finds @@ -467,22 +547,36 @@ describe('Data sync protocol', () => { // then the false positive will no longer be the first loop iteration. The tests should still // pass because the loop will run until a false positive is found, but they will be slower. - it('should handle a false-positive head', () => { + it("should handle a false-positive head", () => { // Scenario: ,-- n1 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- n2 // where n2 is a false positive in the Bloom filter containing {n1}. // lastSync is c9. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) - for (let i = 1; ; i++) { // search for false positive; see comment above - const n1up = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2up = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + for (let i = 1; ; i++) { + // search for false positive; see comment above + const n1up = Automerge.change( + Automerge.clone(n1, { actor: "01234567" }), + { time: 0 }, + doc => (doc.x = `${i} @ n1`) + ) + const n2up = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + doc => (doc.x = `${i} @ n2`) + ) if (new BloomFilter(getHeads(n1up)).containsHash(getHeads(n2up)[0])) { - n1 = n1up; n2 = n2up; break + n1 = n1up + n2 = n2up + break } } const allHeads = [...getHeads(n1), ...getHeads(n2)].sort() @@ -493,7 +587,7 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(getHeads(n2), allHeads) }) - describe('with a false-positive dependency', () => { + describe("with a false-positive dependency", () => { let n1, n2, s1, s2, n1hash2, n2hash2 beforeEach(() => { @@ -502,35 +596,57 @@ describe('Data sync protocol', () => { // `-- n2c1 <-- n2c2 // where n2c1 is a false positive in the Bloom filter containing {n1c1, n1c2}. // lastSync is c9. - n1 = Automerge.init('01234567') - n2 = Automerge.init('89abcdef') + n1 = Automerge.init("01234567") + n2 = Automerge.init("89abcdef") s1 = initSyncState() s2 = initSyncState() - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, (doc: any) => doc.x = i) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, (doc: any) => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) let n1hash1, n2hash1 - for (let i = 29; ; i++) { // search for false positive; see comment above - const n1us1 = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, (doc: any) => doc.x = `${i} @ n1`) - const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, (doc: any) => doc.x = `${i} @ n2`) - n1hash1 = getHeads(n1us1)[0]; n2hash1 = getHeads(n2us1)[0] - const n1us2 = Automerge.change(n1us1, {time: 0}, (doc: any) => doc.x = 'final @ n1') - const n2us2 = Automerge.change(n2us1, {time: 0}, (doc: any) => doc.x = 'final @ n2') - n1hash2 = getHeads(n1us2)[0]; n2hash2 = getHeads(n2us2)[0] + for (let i = 29; ; i++) { + // search for false positive; see comment above + const n1us1 = Automerge.change( + Automerge.clone(n1, { actor: "01234567" }), + { time: 0 }, + (doc: any) => (doc.x = `${i} @ n1`) + ) + const n2us1 = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + (doc: any) => (doc.x = `${i} @ n2`) + ) + n1hash1 = getHeads(n1us1)[0] + n2hash1 = getHeads(n2us1)[0] + const n1us2 = Automerge.change( + n1us1, + { time: 0 }, + (doc: any) => (doc.x = "final @ n1") + ) + const n2us2 = Automerge.change( + n2us1, + { time: 0 }, + (doc: any) => (doc.x = "final @ n2") + ) + n1hash2 = getHeads(n1us2)[0] + n2hash2 = getHeads(n2us2)[0] if (new BloomFilter([n1hash1, n1hash2]).containsHash(n2hash1)) { - n1 = n1us2; n2 = n2us2; break + n1 = n1us2 + n2 = n2us2 + break } } }) - it('should sync two nodes without connection reset', () => { - [n1, n2, s1, s2] = sync(n1, n2, s1, s2) + it("should sync two nodes without connection reset", () => { + ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) assert.deepStrictEqual(getHeads(n1), [n1hash2, n2hash2].sort()) assert.deepStrictEqual(getHeads(n2), [n1hash2, n2hash2].sort()) }) // FIXME - this has a periodic failure - it('should sync two nodes with connection reset', () => { + it("should sync two nodes with connection reset", () => { s1 = decodeSyncState(encodeSyncState(s1)) s2 = decodeSyncState(encodeSyncState(s2)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) @@ -538,7 +654,7 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(getHeads(n2), [n1hash2, n2hash2].sort()) }) - it.skip('should sync three nodes', () => { + it.skip("should sync three nodes", () => { s1 = decodeSyncState(encodeSyncState(s1)) s2 = decodeSyncState(encodeSyncState(s2)) @@ -558,37 +674,73 @@ describe('Data sync protocol', () => { assert.strictEqual(decodeSyncMessage(m2).changes.length, 1) // only n2c2; change n2c1 is not sent // n3 is a node that doesn't have the missing change. Nevertheless n1 is going to ask n3 for it - let n3 = Automerge.init('fedcba98'), s13 = initSyncState(), s31 = initSyncState() + let n3 = Automerge.init("fedcba98"), + s13 = initSyncState(), + s31 = initSyncState() ;[n1, n3, s13, s31] = sync(n1, n3, s13, s31) assert.deepStrictEqual(getHeads(n1), [n1hash2]) assert.deepStrictEqual(getHeads(n3), [n1hash2]) }) }) - it('should not require an additional request when a false-positive depends on a true-negative', () => { + it("should not require an additional request when a false-positive depends on a true-negative", () => { // Scenario: ,-- n1c1 <-- n1c2 <-- n1c3 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-+ // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c2 is a false positive in the Bloom filter containing {n1c1, n1c2, n1c3}. // lastSync is c4. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() let n1hash3, n2hash3 - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) - for (let i = 86; ; i++) { // search for false positive; see comment above - const n1us1 = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + for (let i = 86; ; i++) { + // search for false positive; see comment above + const n1us1 = Automerge.change( + Automerge.clone(n1, { actor: "01234567" }), + { time: 0 }, + doc => (doc.x = `${i} @ n1`) + ) + const n2us1 = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + doc => (doc.x = `${i} @ n2`) + ) const n1hash1 = getHeads(n1us1)[0] - const n1us2 = Automerge.change(n1us1, {time: 0}, doc => doc.x = `${i + 1} @ n1`) - const n2us2 = Automerge.change(n2us1, {time: 0}, doc => doc.x = `${i + 1} @ n2`) - const n1hash2 = getHeads(n1us2)[0], n2hash2 = getHeads(n2us2)[0] - const n1up3 = Automerge.change(n1us2, {time: 0}, doc => doc.x = 'final @ n1') - const n2up3 = Automerge.change(n2us2, {time: 0}, doc => doc.x = 'final @ n2') - n1hash3 = getHeads(n1up3)[0]; n2hash3 = getHeads(n2up3)[0] - if (new BloomFilter([n1hash1, n1hash2, n1hash3]).containsHash(n2hash2)) { - n1 = n1up3; n2 = n2up3; break + const n1us2 = Automerge.change( + n1us1, + { time: 0 }, + doc => (doc.x = `${i + 1} @ n1`) + ) + const n2us2 = Automerge.change( + n2us1, + { time: 0 }, + doc => (doc.x = `${i + 1} @ n2`) + ) + const n1hash2 = getHeads(n1us2)[0], + n2hash2 = getHeads(n2us2)[0] + const n1up3 = Automerge.change( + n1us2, + { time: 0 }, + doc => (doc.x = "final @ n1") + ) + const n2up3 = Automerge.change( + n2us2, + { time: 0 }, + doc => (doc.x = "final @ n2") + ) + n1hash3 = getHeads(n1up3)[0] + n2hash3 = getHeads(n2up3)[0] + if ( + new BloomFilter([n1hash1, n1hash2, n1hash3]).containsHash(n2hash2) + ) { + n1 = n1up3 + n2 = n2up3 + break } } const bothHeads = [n1hash3, n2hash3].sort() @@ -599,31 +751,46 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(getHeads(n2), bothHeads) }) - it('should handle chains of false-positives', () => { + it("should handle chains of false-positives", () => { // Scenario: ,-- c5 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-+ // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c1 and n2c2 are both false positives in the Bloom filter containing {c5}. // lastSync is c4. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() - for (let i = 0; i < 5; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 5; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2, s1, s2) - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 5) - for (let i = 2; ; i++) { // search for false positive; see comment above - const n2us1 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 5)) + for (let i = 2; ; i++) { + // search for false positive; see comment above + const n2us1 = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + doc => (doc.x = `${i} @ n2`) + ) if (new BloomFilter(getHeads(n1)).containsHash(getHeads(n2us1)[0])) { - n2 = n2us1; break + n2 = n2us1 + break } } - for (let i = 141; ; i++) { // search for false positive; see comment above - const n2us2 = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} again`) + for (let i = 141; ; i++) { + // search for false positive; see comment above + const n2us2 = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + doc => (doc.x = `${i} again`) + ) if (new BloomFilter(getHeads(n1)).containsHash(getHeads(n2us2)[0])) { - n2 = n2us2; break + n2 = n2us2 + break } } - n2 = Automerge.change(n2, {time: 0}, doc => doc.x = 'final @ n2') + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = "final @ n2")) const allHeads = [...getHeads(n1), ...getHeads(n2)].sort() s1 = decodeSyncState(encodeSyncState(s1)) @@ -633,32 +800,46 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(getHeads(n2), allHeads) }) - it('should allow the false-positive hash to be explicitly requested', () => { + it("should allow the false-positive hash to be explicitly requested", () => { // Scenario: ,-- n1 // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- n2 // where n2 causes a false positive in the Bloom filter containing {n1}. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() let message - for (let i = 0; i < 10; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 10; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) s1 = decodeSyncState(encodeSyncState(s1)) s2 = decodeSyncState(encodeSyncState(s2)) - for (let i = 1; ; i++) { // brute-force search for false positive; see comment above - const n1up = Automerge.change(Automerge.clone(n1, {actor: '01234567'}), {time: 0}, doc => doc.x = `${i} @ n1`) - const n2up = Automerge.change(Automerge.clone(n2, {actor: '89abcdef'}), {time: 0}, doc => doc.x = `${i} @ n2`) + for (let i = 1; ; i++) { + // brute-force search for false positive; see comment above + const n1up = Automerge.change( + Automerge.clone(n1, { actor: "01234567" }), + { time: 0 }, + doc => (doc.x = `${i} @ n1`) + ) + const n2up = Automerge.change( + Automerge.clone(n2, { actor: "89abcdef" }), + { time: 0 }, + doc => (doc.x = `${i} @ n2`) + ) // check if the bloom filter on n2 will believe n1 already has a particular hash // this will mean n2 won't offer that data to n2 by receiving a sync message from n1 if (new BloomFilter(getHeads(n1up)).containsHash(getHeads(n2up)[0])) { - n1 = n1up; n2 = n2up; break + n1 = n1up + n2 = n2up + break } } // n1 creates a sync message for n2 with an ill-fated bloom - [s1, message] = Automerge.generateSyncMessage(n1, s1) + ;[s1, message] = Automerge.generateSyncMessage(n1, s1) assert.strictEqual(decodeSyncMessage(message).changes.length, 0) // n2 receives it and DOESN'T send a change back @@ -682,32 +863,42 @@ describe('Data sync protocol', () => { }) }) - describe('protocol features', () => { - it('should allow multiple Bloom filters', () => { + describe("protocol features", () => { + it("should allow multiple Bloom filters", () => { // Scenario: ,-- n1c1 <-- n1c2 <-- n1c3 // c0 <-- c1 <-- c2 <-+--- n2c1 <-- n2c2 <-- n2c3 // `-- n3c1 <-- n3c2 <-- n3c3 // n1 has {c0, c1, c2, n1c1, n1c2, n1c3, n2c1, n2c2}; // n2 has {c0, c1, c2, n1c1, n1c2, n2c1, n2c2, n2c3}; // n3 has {c0, c1, c2, n3c1, n3c2, n3c3}. - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef"), + n3 = Automerge.init("76543210") let s13 = initSyncState() - let s32 = initSyncState(), s31 = initSyncState(), s23 = initSyncState() + let s32 = initSyncState(), + s31 = initSyncState(), + s23 = initSyncState() let message1, message2, message3 - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - // sync all 3 nodes - ;[n1, n2, , ] = sync(n1, n2) // eslint-disable-line no-unused-vars -- kept for consistency + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) + // sync all 3 nodes + ;[n1, n2, ,] = sync(n1, n2) // eslint-disable-line no-unused-vars -- kept for consistency ;[n1, n3, s13, s31] = sync(n1, n3) ;[n3, n2, s32, s23] = sync(n3, n2) - for (let i = 0; i < 2; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = `${i} @ n1`) - for (let i = 0; i < 2; i++) n2 = Automerge.change(n2, {time: 0}, doc => doc.x = `${i} @ n2`) + for (let i = 0; i < 2; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = `${i} @ n1`)) + for (let i = 0; i < 2; i++) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = `${i} @ n2`)) ;[n1] = Automerge.applyChanges(n1, Automerge.getAllChanges(n2)) ;[n2] = Automerge.applyChanges(n2, Automerge.getAllChanges(n1)) - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = `3 @ n1`) - n2 = Automerge.change(n2, {time: 0}, doc => doc.x = `3 @ n2`) - for (let i = 0; i < 3; i++) n3 = Automerge.change(n3, {time: 0}, doc => doc.x = `${i} @ n3`) - const n1c3 = getHeads(n1)[0], n2c3 = getHeads(n2)[0], n3c3 = getHeads(n3)[0] + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = `3 @ n1`)) + n2 = Automerge.change(n2, { time: 0 }, doc => (doc.x = `3 @ n2`)) + for (let i = 0; i < 3; i++) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = `${i} @ n3`)) + const n1c3 = getHeads(n1)[0], + n2c3 = getHeads(n2)[0], + n3c3 = getHeads(n3)[0] s13 = decodeSyncState(encodeSyncState(s13)) s31 = decodeSyncState(encodeSyncState(s31)) s23 = decodeSyncState(encodeSyncState(s23)) @@ -729,7 +920,11 @@ describe('Data sync protocol', () => { const modifiedMessage = decodeSyncMessage(message3) modifiedMessage.have.push(decodeSyncMessage(message1).have[0]) assert.strictEqual(modifiedMessage.changes.length, 0) - ;[n2, s23] = Automerge.receiveSyncMessage(n2, s23, encodeSyncMessage(modifiedMessage)) + ;[n2, s23] = Automerge.receiveSyncMessage( + n2, + s23, + encodeSyncMessage(modifiedMessage) + ) // n2 replies to n3, sending only n2c3 (the one change that n2 has but n1 doesn't) ;[s23, message2] = Automerge.generateSyncMessage(n2, s23) @@ -743,55 +938,76 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(getHeads(n3), [n1c3, n2c3, n3c3].sort()) }) - it('should allow any change to be requested', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + it("should allow any change to be requested", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() let message: Automerge.SyncMessage | null = null - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) const lastSync = getHeads(n1) - for (let i = 3; i < 6; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) - + for (let i = 3; i < 6; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n1, n2, s1, s2] = sync(n1, n2) s1.lastSentHeads = [] // force generateSyncMessage to return a message even though nothing changed ;[s1, message] = Automerge.generateSyncMessage(n1, s1) const modMsg = decodeSyncMessage(message!) modMsg.need = lastSync // re-request change 2 - ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, encodeSyncMessage(modMsg)) + ;[n2, s2] = Automerge.receiveSyncMessage( + n2, + s2, + encodeSyncMessage(modMsg) + ) ;[s1, message] = Automerge.generateSyncMessage(n2, s2) assert.strictEqual(decodeSyncMessage(message!).changes.length, 1) - assert.strictEqual(Automerge.decodeChange(decodeSyncMessage(message!).changes[0]).hash, lastSync[0]) + assert.strictEqual( + Automerge.decodeChange(decodeSyncMessage(message!).changes[0]).hash, + lastSync[0] + ) }) - it('should ignore requests for a nonexistent change', () => { - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef') - let s1 = initSyncState(), s2 = initSyncState() + it("should ignore requests for a nonexistent change", () => { + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef") + let s1 = initSyncState(), + s2 = initSyncState() let message: Automerge.SyncMessage | null = null - for (let i = 0; i < 3; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) + for (let i = 0; i < 3; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) ;[n2] = Automerge.applyChanges(n2, Automerge.getAllChanges(n1)) ;[s1, message] = Automerge.generateSyncMessage(n1, s1) const decoded = Automerge.decodeSyncMessage(message!) - decoded.need = ['0000000000000000000000000000000000000000000000000000000000000000'] + decoded.need = [ + "0000000000000000000000000000000000000000000000000000000000000000", + ] message = Automerge.encodeSyncMessage(decoded) ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, message!) ;[s2, message] = Automerge.generateSyncMessage(n2, s2) assert.strictEqual(message, null) }) - it('should allow a subset of changes to be sent', () => { + it("should allow a subset of changes to be sent", () => { // ,-- c1 <-- c2 // c0 <-+ // `-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 - let n1 = Automerge.init('01234567'), n2 = Automerge.init('89abcdef'), n3 = Automerge.init('76543210') - let s1 = initSyncState(), s2 = initSyncState() + let n1 = Automerge.init("01234567"), + n2 = Automerge.init("89abcdef"), + n3 = Automerge.init("76543210") + let s1 = initSyncState(), + s2 = initSyncState() let msg, decodedMsg - n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 0) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = 0)) n3 = Automerge.merge(n3, n1) - for (let i = 1; i <= 2; i++) n1 = Automerge.change(n1, {time: 0}, doc => doc.x = i) // n1 has {c0, c1, c2} - for (let i = 3; i <= 4; i++) n3 = Automerge.change(n3, {time: 0}, doc => doc.x = i) // n3 has {c0, c3, c4} - const c2 = getHeads(n1)[0], c4 = getHeads(n3)[0] + for (let i = 1; i <= 2; i++) + n1 = Automerge.change(n1, { time: 0 }, doc => (doc.x = i)) // n1 has {c0, c1, c2} + for (let i = 3; i <= 4; i++) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = i)) // n3 has {c0, c3, c4} + const c2 = getHeads(n1)[0], + c4 = getHeads(n3)[0] n2 = Automerge.merge(n2, n3) // n2 has {c0, c3, c4} // Sync n1 and n2, so their shared heads are {c2, c4} @@ -802,11 +1018,13 @@ describe('Data sync protocol', () => { assert.deepStrictEqual(s2.sharedHeads, [c2, c4].sort()) // n2 and n3 apply {c5, c6, c7, c8} - n3 = Automerge.change(n3, {time: 0}, doc => doc.x = 5) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = 5)) const change5 = Automerge.getLastLocalChange(n3) - n3 = Automerge.change(n3, {time: 0}, doc => doc.x = 6) - const change6 = Automerge.getLastLocalChange(n3), c6 = getHeads(n3)[0] - for (let i = 7; i <= 8; i++) n3 = Automerge.change(n3, {time: 0}, doc => doc.x = i) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = 6)) + const change6 = Automerge.getLastLocalChange(n3), + c6 = getHeads(n3)[0] + for (let i = 7; i <= 8; i++) + n3 = Automerge.change(n3, { time: 0 }, doc => (doc.x = i)) const c8 = getHeads(n3)[0] n2 = Automerge.merge(n2, n3) @@ -829,7 +1047,10 @@ describe('Data sync protocol', () => { ;[s1, msg] = Automerge.generateSyncMessage(n1, s1) ;[n2, s2] = Automerge.receiveSyncMessage(n2, s2, msg) assert.deepStrictEqual(decodeSyncMessage(msg).need, [c8]) - assert.deepStrictEqual(decodeSyncMessage(msg).have[0].lastSync, [c2, c6].sort()) + assert.deepStrictEqual( + decodeSyncMessage(msg).have[0].lastSync, + [c2, c6].sort() + ) assert.deepStrictEqual(s1.sharedHeads, [c2, c6].sort()) assert.deepStrictEqual(s2.sharedHeads, [c2, c6].sort()) diff --git a/javascript/test/text_test.ts b/javascript/test/text_test.ts index dd66e108..076e20b2 100644 --- a/javascript/test/text_test.ts +++ b/javascript/test/text_test.ts @@ -1,34 +1,34 @@ -import * as assert from 'assert' -import * as Automerge from '../src' -import { assertEqualsOneOf } from './helpers' +import * as assert from "assert" +import * as Automerge from "../src" +import { assertEqualsOneOf } from "./helpers" type DocType = { text: string [key: string]: any } -describe('Automerge.Text', () => { +describe("Automerge.Text", () => { let s1: Automerge.Doc, s2: Automerge.Doc beforeEach(() => { - s1 = Automerge.change(Automerge.init(), doc => doc.text = "") + s1 = Automerge.change(Automerge.init(), doc => (doc.text = "")) s2 = Automerge.merge(Automerge.init(), s1) }) - it('should support insertion', () => { + it("should support insertion", () => { s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "a")) assert.strictEqual(s1.text.length, 1) - assert.strictEqual(s1.text[0], 'a') - assert.strictEqual(s1.text, 'a') + assert.strictEqual(s1.text[0], "a") + assert.strictEqual(s1.text, "a") //assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`) }) - it('should support deletion', () => { + it("should support deletion", () => { s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 1, 1)) assert.strictEqual(s1.text.length, 2) - assert.strictEqual(s1.text[0], 'a') - assert.strictEqual(s1.text[1], 'c') - assert.strictEqual(s1.text, 'ac') + assert.strictEqual(s1.text[0], "a") + assert.strictEqual(s1.text[1], "c") + assert.strictEqual(s1.text, "ac") }) it("should support implicit and explicit deletion", () => { @@ -41,70 +41,71 @@ describe('Automerge.Text', () => { assert.strictEqual(s1.text, "ac") }) - it('should handle concurrent insertion', () => { + it("should handle concurrent insertion", () => { s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) s2 = Automerge.change(s2, doc => Automerge.splice(doc, "text", 0, 0, "xyz")) s1 = Automerge.merge(s1, s2) assert.strictEqual(s1.text.length, 6) - assertEqualsOneOf(s1.text, 'abcxyz', 'xyzabc') + assertEqualsOneOf(s1.text, "abcxyz", "xyzabc") }) - it('should handle text and other ops in the same change', () => { + it("should handle text and other ops in the same change", () => { s1 = Automerge.change(s1, doc => { - doc.foo = 'bar' - Automerge.splice(doc, "text", 0, 0, 'a') + doc.foo = "bar" + Automerge.splice(doc, "text", 0, 0, "a") }) - assert.strictEqual(s1.foo, 'bar') - assert.strictEqual(s1.text, 'a') - assert.strictEqual(s1.text, 'a') + assert.strictEqual(s1.foo, "bar") + assert.strictEqual(s1.text, "a") + assert.strictEqual(s1.text, "a") }) - it('should serialize to JSON as a simple string', () => { + it("should serialize to JSON as a simple string", () => { s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, 'a"b')) assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}') }) - it('should allow modification after an object is assigned to a document', () => { + it("should allow modification after an object is assigned to a document", () => { s1 = Automerge.change(Automerge.init(), doc => { doc.text = "" - Automerge.splice(doc ,"text", 0, 0, 'abcd') - Automerge.splice(doc ,"text", 2, 1) - assert.strictEqual(doc.text, 'abd') + Automerge.splice(doc, "text", 0, 0, "abcd") + Automerge.splice(doc, "text", 2, 1) + assert.strictEqual(doc.text, "abd") }) - assert.strictEqual(s1.text, 'abd') + assert.strictEqual(s1.text, "abd") }) - it('should not allow modification outside of a change callback', () => { - assert.throws(() => Automerge.splice(s1 ,"text", 0, 0, 'a'), /object cannot be modified outside of a change block/) + it("should not allow modification outside of a change callback", () => { + assert.throws( + () => Automerge.splice(s1, "text", 0, 0, "a"), + /object cannot be modified outside of a change block/ + ) }) - describe('with initial value', () => { - - it('should initialize text in Automerge.from()', () => { - let s1 = Automerge.from({text: 'init'}) + describe("with initial value", () => { + it("should initialize text in Automerge.from()", () => { + let s1 = Automerge.from({ text: "init" }) assert.strictEqual(s1.text.length, 4) - assert.strictEqual(s1.text[0], 'i') - assert.strictEqual(s1.text[1], 'n') - assert.strictEqual(s1.text[2], 'i') - assert.strictEqual(s1.text[3], 't') - assert.strictEqual(s1.text, 'init') + assert.strictEqual(s1.text[0], "i") + assert.strictEqual(s1.text[1], "n") + assert.strictEqual(s1.text[2], "i") + assert.strictEqual(s1.text[3], "t") + assert.strictEqual(s1.text, "init") }) - it('should encode the initial value as a change', () => { - const s1 = Automerge.from({text: 'init'}) + it("should encode the initial value as a change", () => { + const s1 = Automerge.from({ text: "init" }) const changes = Automerge.getAllChanges(s1) assert.strictEqual(changes.length, 1) const [s2] = Automerge.applyChanges(Automerge.init(), changes) - assert.strictEqual(s2.text, 'init') - assert.strictEqual(s2.text, 'init') + assert.strictEqual(s2.text, "init") + assert.strictEqual(s2.text, "init") }) - }) - it('should support unicode when creating text', () => { + it("should support unicode when creating text", () => { s1 = Automerge.from({ - text: '🐦' + text: "🐦", }) - assert.strictEqual(s1.text, '🐦') + assert.strictEqual(s1.text, "🐦") }) }) diff --git a/javascript/test/uuid_test.ts b/javascript/test/uuid_test.ts index 4182a8c4..f6a0bde4 100644 --- a/javascript/test/uuid_test.ts +++ b/javascript/test/uuid_test.ts @@ -1,20 +1,20 @@ -import * as assert from 'assert' -import * as Automerge from '../src' +import * as assert from "assert" +import * as Automerge from "../src" const uuid = Automerge.uuid -describe('uuid', () => { +describe("uuid", () => { afterEach(() => { uuid.reset() }) - describe('default implementation', () => { - it('generates unique values', () => { + describe("default implementation", () => { + it("generates unique values", () => { assert.notEqual(uuid(), uuid()) }) }) - describe('custom implementation', () => { + describe("custom implementation", () => { let counter function customUuid() { @@ -22,11 +22,11 @@ describe('uuid', () => { } before(() => uuid.setFactory(customUuid)) - beforeEach(() => counter = 0) + beforeEach(() => (counter = 0)) - it('invokes the custom factory', () => { - assert.equal(uuid(), 'custom-uuid-0') - assert.equal(uuid(), 'custom-uuid-1') + it("invokes the custom factory", () => { + assert.equal(uuid(), "custom-uuid-0") + assert.equal(uuid(), "custom-uuid-1") }) }) }) diff --git a/javascript/tsconfig.json b/javascript/tsconfig.json index 8e934416..c6684ca0 100644 --- a/javascript/tsconfig.json +++ b/javascript/tsconfig.json @@ -1,22 +1,19 @@ { - "compilerOptions": { - "target": "es2016", - "sourceMap": false, - "declaration": true, - "resolveJsonModule": true, - "module": "commonjs", - "moduleResolution": "node", - "noImplicitAny": false, - "allowSyntheticDefaultImports": true, - "forceConsistentCasingInFileNames": true, - "strict": true, - "noFallthroughCasesInSwitch": true, - "skipLibCheck": true, - "outDir": "./dist" - }, - "include": [ "src/**/*", "test/**/*" ], - "exclude": [ - "./dist/**/*", - "./node_modules" - ] + "compilerOptions": { + "target": "es2016", + "sourceMap": false, + "declaration": true, + "resolveJsonModule": true, + "module": "commonjs", + "moduleResolution": "node", + "noImplicitAny": false, + "allowSyntheticDefaultImports": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "noFallthroughCasesInSwitch": true, + "skipLibCheck": true, + "outDir": "./dist" + }, + "include": ["src/**/*", "test/**/*"], + "exclude": ["./dist/**/*", "./node_modules"] } diff --git a/javascript/typedoc-readme.md b/javascript/typedoc-readme.md index 05025ac1..258b9e20 100644 --- a/javascript/typedoc-readme.md +++ b/javascript/typedoc-readme.md @@ -74,24 +74,32 @@ import * as automerge from "@automerge/automerge" import * as assert from "assert" let doc = automerge.from({ - "key1": "value1" + key1: "value1", }) // Make a clone of the document at this point, maybe this is actually on another // peer. -let doc2 = automerge.clone(doc) +let doc2 = automerge.clone < any > doc let heads = automerge.getHeads(doc) -doc = automerge.change(doc, d => { +doc = + automerge.change < + any > + (doc, + d => { d.key2 = "value2" -}) + }) -doc = automerge.change(doc, d => { +doc = + automerge.change < + any > + (doc, + d => { d.key3 = "value3" -}) + }) -// At this point we've generated two separate changes, now we want to send +// At this point we've generated two separate changes, now we want to send // just those changes to someone else // view is a cheap reference based copy of a document at a given set of heads @@ -99,18 +107,18 @@ let before = automerge.view(doc, heads) // This view doesn't show the last two changes in the document state assert.deepEqual(before, { - key1: "value1" + key1: "value1", }) // Get the changes to send to doc2 let changes = automerge.getChanges(before, doc) // Apply the changes at doc2 -doc2 = automerge.applyChanges(doc2, changes)[0] +doc2 = automerge.applyChanges < any > (doc2, changes)[0] assert.deepEqual(doc2, { - key1: "value1", - key2: "value2", - key3: "value3" + key1: "value1", + key2: "value2", + key3: "value3", }) ``` @@ -126,23 +134,22 @@ generateSyncMessage}. When we receive a message from the peer we call {@link receiveSyncMessage}. Here's a simple example of a loop which just keeps two peers in sync. - ```javascript let sync1 = automerge.initSyncState() let msg: Uint8Array | null -[sync1, msg] = automerge.generateSyncMessage(doc1, sync1) +;[sync1, msg] = automerge.generateSyncMessage(doc1, sync1) while (true) { - if (msg != null) { - network.send(msg) - } - let resp: Uint8Array = network.receive() - [doc1, sync1, _ignore] = automerge.receiveSyncMessage(doc1, sync1, resp) - [sync1, msg] = automerge.generateSyncMessage(doc1, sync1) + if (msg != null) { + network.send(msg) + } + let resp: Uint8Array = + (network.receive()[(doc1, sync1, _ignore)] = + automerge.receiveSyncMessage(doc1, sync1, resp)[(sync1, msg)] = + automerge.generateSyncMessage(doc1, sync1)) } ``` - ## Conflicts The only time conflicts occur in automerge documents is in concurrent @@ -187,8 +194,7 @@ By default automerge will generate a random actor ID for you, but most methods for creating a document allow you to set the actor ID. You can get the actor ID associated with the document by calling {@link getActorId}. Actor IDs must not be used in concurrent threads of executiong - all changes by a given actor ID -are expected to be sequential. - +are expected to be sequential. ## Listening to patches @@ -203,18 +209,18 @@ document which you have two pointers to. For example, in this code: ```javascript let doc1 = automerge.init() -let doc2 = automerge.change(doc1, d => d.key = "value") +let doc2 = automerge.change(doc1, d => (d.key = "value")) ``` `doc1` and `doc2` are both pointers to the same state. Any attempt to call mutating methods on `doc1` will now result in an error like Attempting to change an out of date document - + If you encounter this you need to clone the original document, the above sample would work as: ```javascript let doc1 = automerge.init() -let doc2 = automerge.change(automerge.clone(doc1), d => d.key = "value") +let doc2 = automerge.change(automerge.clone(doc1), d => (d.key = "value")) ``` diff --git a/scripts/ci/fmt_js b/scripts/ci/fmt_js new file mode 100755 index 00000000..acaf1e08 --- /dev/null +++ b/scripts/ci/fmt_js @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -eoux pipefail + +yarn --cwd javascript prettier -c . + diff --git a/scripts/ci/run b/scripts/ci/run index db3f1aaf..aebfe4c4 100755 --- a/scripts/ci/run +++ b/scripts/ci/run @@ -2,6 +2,7 @@ set -eou pipefail ./scripts/ci/fmt +./scripts/ci/fmt_js ./scripts/ci/lint ./scripts/ci/build-test ./scripts/ci/rust-docs From 0306ade93903800332fb539c5ba826b537b0cb00 Mon Sep 17 00:00:00 2001 From: Alex Currie-Clark Date: Fri, 6 Jan 2023 12:47:23 +0000 Subject: [PATCH 16/72] Update action name on `IncPatch` type --- rust/automerge-wasm/index.d.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/automerge-wasm/index.d.ts b/rust/automerge-wasm/index.d.ts index 0e0c38e6..06399f0a 100644 --- a/rust/automerge-wasm/index.d.ts +++ b/rust/automerge-wasm/index.d.ts @@ -104,7 +104,7 @@ export type PutPatch = { } export type IncPatch = { - action: 'put' + action: 'inc' path: Prop[], value: number } From 18a3f617043fd53bd05fdea96ff5d079a8654509 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 10 Jan 2023 12:14:30 +0000 Subject: [PATCH 17/72] Update rust toolchain to 1.66 --- .github/workflows/ci.yaml | 12 ++++++------ rust/automerge-c/build.rs | 2 +- rust/automerge-cli/src/examine_sync.rs | 2 +- rust/automerge-cli/src/export.rs | 2 +- rust/automerge-cli/src/main.rs | 6 +++--- rust/automerge-wasm/src/interop.rs | 4 ++-- rust/automerge-wasm/src/lib.rs | 3 --- rust/automerge/src/automerge/tests.rs | 2 +- rust/automerge/src/columnar/column_range/obj_id.rs | 2 +- rust/automerge/src/lib.rs | 1 - .../src/storage/change/change_op_columns.rs | 2 +- rust/automerge/src/storage/chunk.rs | 2 +- .../automerge/src/storage/document/doc_op_columns.rs | 2 +- rust/automerge/src/sync/bloom.rs | 2 +- 14 files changed, 20 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 361320a0..a5d42010 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true components: rustfmt - uses: Swatinem/rust-cache@v1 @@ -28,7 +28,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true components: clippy - uses: Swatinem/rust-cache@v1 @@ -42,7 +42,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true - uses: Swatinem/rust-cache@v1 - name: Build rust docs @@ -118,7 +118,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true - uses: Swatinem/rust-cache@v1 - name: Install CMocka @@ -157,7 +157,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true - uses: Swatinem/rust-cache@v1 - run: ./scripts/ci/build-test @@ -170,7 +170,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.64.0 + toolchain: 1.66.0 default: true - uses: Swatinem/rust-cache@v1 - run: ./scripts/ci/build-test diff --git a/rust/automerge-c/build.rs b/rust/automerge-c/build.rs index 00fd0f87..bf12a105 100644 --- a/rust/automerge-c/build.rs +++ b/rust/automerge-c/build.rs @@ -10,7 +10,7 @@ fn main() { let config = cbindgen::Config::from_file("cbindgen.toml") .expect("Unable to find cbindgen.toml configuration file"); - if let Ok(writer) = cbindgen::generate_with_config(&crate_dir, config) { + if let Ok(writer) = cbindgen::generate_with_config(crate_dir, config) { // \note CMake sets this environment variable before invoking Cargo so // that it can direct the generated header file into its // out-of-source build directory for post-processing. diff --git a/rust/automerge-cli/src/examine_sync.rs b/rust/automerge-cli/src/examine_sync.rs index ad6699d4..c0d5df97 100644 --- a/rust/automerge-cli/src/examine_sync.rs +++ b/rust/automerge-cli/src/examine_sync.rs @@ -28,7 +28,7 @@ pub(crate) fn examine_sync( .map_err(ExamineSyncError::ReadMessage)?; let message = automerge::sync::Message::decode(&buf)?; - let json = serde_json::to_value(&message).unwrap(); + let json = serde_json::to_value(message).unwrap(); if is_tty { print_colored_json(&json).map_err(ExamineSyncError::WriteMessage)?; } else { diff --git a/rust/automerge-cli/src/export.rs b/rust/automerge-cli/src/export.rs index 2a7b4130..45fd7b3b 100644 --- a/rust/automerge-cli/src/export.rs +++ b/rust/automerge-cli/src/export.rs @@ -30,7 +30,7 @@ fn list_to_json(doc: &am::Automerge, obj: &am::ObjId) -> serde_json::Value { let len = doc.length(obj); let mut array = Vec::new(); for i in 0..len { - let val = doc.get(obj, i as usize); + let val = doc.get(obj, i); match val { Ok(Some((am::Value::Object(o), exid))) if o == am::ObjType::Map || o == am::ObjType::Table => diff --git a/rust/automerge-cli/src/main.rs b/rust/automerge-cli/src/main.rs index b0b456c8..8f3f816d 100644 --- a/rust/automerge-cli/src/main.rs +++ b/rust/automerge-cli/src/main.rs @@ -132,7 +132,7 @@ enum Command { fn open_file_or_stdin(maybe_path: Option) -> Result> { if std::io::stdin().is_terminal() { if let Some(path) = maybe_path { - Ok(Box::new(File::open(&path).unwrap())) + Ok(Box::new(File::open(path).unwrap())) } else { Err(anyhow!( "Must provide file path if not providing input via stdin" @@ -146,7 +146,7 @@ fn open_file_or_stdin(maybe_path: Option) -> Result) -> Result> { if std::io::stdout().is_terminal() { if let Some(path) = maybe_path { - Ok(Box::new(File::create(&path).unwrap())) + Ok(Box::new(File::create(path).unwrap())) } else { Err(anyhow!("Must provide file path if not piping to stdout")) } @@ -166,7 +166,7 @@ fn main() -> Result<()> { skip_verifying_heads, } => { let output: Box = if let Some(output_file) = output_file { - Box::new(File::create(&output_file)?) + Box::new(File::create(output_file)?) } else { Box::new(std::io::stdout()) }; diff --git a/rust/automerge-wasm/src/interop.rs b/rust/automerge-wasm/src/interop.rs index 20b42bf1..540722df 100644 --- a/rust/automerge-wasm/src/interop.rs +++ b/rust/automerge-wasm/src/interop.rs @@ -589,9 +589,9 @@ impl Automerge { let array = Array::new(); for i in 0..len { let val_and_id = if let Some(heads) = heads { - self.doc.get_at(obj, i as usize, heads) + self.doc.get_at(obj, i, heads) } else { - self.doc.get(obj, i as usize) + self.doc.get(obj, i) }; if let Ok(Some((val, id))) = val_and_id { let subval = match val { diff --git a/rust/automerge-wasm/src/lib.rs b/rust/automerge-wasm/src/lib.rs index ce57f66f..e6f5bed8 100644 --- a/rust/automerge-wasm/src/lib.rs +++ b/rust/automerge-wasm/src/lib.rs @@ -9,7 +9,6 @@ rust_2018_idioms, unreachable_pub, bad_style, - const_err, dead_code, improper_ctypes, non_shorthand_field_patterns, @@ -264,7 +263,6 @@ impl Automerge { datatype: JsValue, ) -> Result<(), error::Insert> { let (obj, _) = self.import(obj)?; - let index = index as f64; let value = self .import_scalar(&value, &datatype.as_string()) .ok_or(error::Insert::ValueNotPrimitive)?; @@ -280,7 +278,6 @@ impl Automerge { value: JsValue, ) -> Result, error::InsertObject> { let (obj, _) = self.import(obj)?; - let index = index as f64; let imported_obj = import_obj(&value, &None)?; let opid = self .doc diff --git a/rust/automerge/src/automerge/tests.rs b/rust/automerge/src/automerge/tests.rs index 050b1fa9..7eadaedd 100644 --- a/rust/automerge/src/automerge/tests.rs +++ b/rust/automerge/src/automerge/tests.rs @@ -1368,7 +1368,7 @@ fn get_path_to_object() { ] ); assert_eq!( - doc.path_to_object(&text).unwrap(), + doc.path_to_object(text).unwrap(), vec![ (ROOT, Prop::Map("a".into())), (map, Prop::Map("b".into())), diff --git a/rust/automerge/src/columnar/column_range/obj_id.rs b/rust/automerge/src/columnar/column_range/obj_id.rs index 6a3e2ef0..d282563e 100644 --- a/rust/automerge/src/columnar/column_range/obj_id.rs +++ b/rust/automerge/src/columnar/column_range/obj_id.rs @@ -166,7 +166,7 @@ impl ObjIdEncoder { } convert::ObjId::Op(o) => { self.actor.append_value(o.actor() as u64); - self.counter.append_value(o.counter() as u64); + self.counter.append_value(o.counter()); } } } diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index b8604c95..97ff0650 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -8,7 +8,6 @@ rust_2018_idioms, unreachable_pub, bad_style, - const_err, dead_code, improper_ctypes, non_shorthand_field_patterns, diff --git a/rust/automerge/src/storage/change/change_op_columns.rs b/rust/automerge/src/storage/change/change_op_columns.rs index c50c67ae..7c3a65ec 100644 --- a/rust/automerge/src/storage/change/change_op_columns.rs +++ b/rust/automerge/src/storage/change/change_op_columns.rs @@ -177,7 +177,7 @@ impl ChangeOpsColumns { obj.append(op.obj()); key.append(op.key()); insert.append(op.insert()); - action.append_value(op.action() as u64); + action.append_value(op.action()); val.append(&op.val()); pred.append(op.pred()); } diff --git a/rust/automerge/src/storage/chunk.rs b/rust/automerge/src/storage/chunk.rs index 821c2c55..06e31973 100644 --- a/rust/automerge/src/storage/chunk.rs +++ b/rust/automerge/src/storage/chunk.rs @@ -258,7 +258,7 @@ impl Header { Header { checksum: checksum_bytes.into(), chunk_type, - data_len: data.len() as usize, + data_len: data.len(), header_size: header.len(), hash, }, diff --git a/rust/automerge/src/storage/document/doc_op_columns.rs b/rust/automerge/src/storage/document/doc_op_columns.rs index 5f61dff8..82de17eb 100644 --- a/rust/automerge/src/storage/document/doc_op_columns.rs +++ b/rust/automerge/src/storage/document/doc_op_columns.rs @@ -116,7 +116,7 @@ impl DocOpColumns { let key = KeyRange::encode(ops.clone().map(|o| o.key()), out); let id = OpIdRange::encode(ops.clone().map(|o| o.id()), out); let insert = BooleanRange::encode(ops.clone().map(|o| o.insert()), out); - let action = RleRange::encode(ops.clone().map(|o| Some(o.action() as u64)), out); + let action = RleRange::encode(ops.clone().map(|o| Some(o.action())), out); let val = ValueRange::encode(ops.clone().map(|o| o.val()), out); let succ = OpIdListRange::encode(ops.map(|o| o.succ()), out); Self { diff --git a/rust/automerge/src/sync/bloom.rs b/rust/automerge/src/sync/bloom.rs index c02acbc0..8523061e 100644 --- a/rust/automerge/src/sync/bloom.rs +++ b/rust/automerge/src/sync/bloom.rs @@ -126,7 +126,7 @@ impl BloomFilter { let num_entries = hashes.len() as u32; let num_bits_per_entry = BITS_PER_ENTRY; let num_probes = NUM_PROBES; - let bits = vec![0; bits_capacity(num_entries, num_bits_per_entry) as usize]; + let bits = vec![0; bits_capacity(num_entries, num_bits_per_entry)]; let mut filter = Self { num_entries, num_bits_per_entry, From 5763210b079edf2de53fd337590a26d6bb775f53 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Wed, 21 Dec 2022 17:42:33 +0000 Subject: [PATCH 18/72] wasm: Allow a choice of text representations The wasm codebase assumed that clients want to represent text as a string of characters. This is faster, but in order to enable backwards compatibility we add a `TextRepresentation` argument to `automerge_wasm::Automerge::new` to allow clients to choose between a `string` or `Array` representation. The `automerge_wasm::Observer` will consult this setting to determine what kind of diffs to generate. --- javascript/src/index.ts | 4 +- javascript/src/low_level.ts | 4 +- javascript/test/basic_test.ts | 2 +- .../test/ported_wasm/basic_tests.c | 25 -- rust/automerge-wasm/deno-tests/deno.ts | 2 +- rust/automerge-wasm/index.d.ts | 8 +- rust/automerge-wasm/src/interop.rs | 116 +++++-- rust/automerge-wasm/src/lib.rs | 141 +++++++-- rust/automerge-wasm/src/observer.rs | 55 +++- rust/automerge-wasm/test/apply.ts | 22 +- rust/automerge-wasm/test/readme.ts | 34 +- rust/automerge-wasm/test/test.ts | 294 ++++++++++++------ rust/automerge/src/op_observer.rs | 7 + rust/automerge/src/transaction/inner.rs | 20 +- rust/automerge/tests/test.rs | 4 +- 15 files changed, 510 insertions(+), 228 deletions(-) diff --git a/javascript/src/index.ts b/javascript/src/index.ts index 23df47ce..a5b3a0bb 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -177,7 +177,7 @@ export function init(_opts?: ActorId | InitOptions): Doc { const opts = importOpts(_opts) const freeze = !!opts.freeze const patchCallback = opts.patchCallback - const handle = ApiHandler.create(opts.actor) + const handle = ApiHandler.create(true, opts.actor) handle.enablePatches(true) handle.enableFreeze(!!opts.freeze) handle.registerDatatype("counter", n => new Counter(n)) @@ -460,7 +460,7 @@ export function load( const opts = importOpts(_opts) const actor = opts.actor const patchCallback = opts.patchCallback - const handle = ApiHandler.load(data, actor) + const handle = ApiHandler.load(data, true, actor) handle.enablePatches(true) handle.enableFreeze(!!opts.freeze) handle.registerDatatype("counter", n => new Counter(n)) diff --git a/javascript/src/low_level.ts b/javascript/src/low_level.ts index 51017cb3..94ac63db 100644 --- a/javascript/src/low_level.ts +++ b/javascript/src/low_level.ts @@ -20,10 +20,10 @@ export function UseApi(api: API) { /* eslint-disable */ export const ApiHandler: API = { - create(actor?: Actor): Automerge { + create(textV2: boolean, actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called") }, - load(data: Uint8Array, actor?: Actor): Automerge { + load(data: Uint8Array, textV2: boolean, actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called (load)") }, encodeChange(change: ChangeToEncode): Change { diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index 8bf30914..c14c0e20 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -237,7 +237,7 @@ describe("Automerge", () => { }) it("handle non-text strings", () => { - let doc1 = WASM.create() + let doc1 = WASM.create(true) doc1.put("_root", "text", "hello world") let doc2 = Automerge.load(doc1.save()) assert.throws(() => { diff --git a/rust/automerge-c/test/ported_wasm/basic_tests.c b/rust/automerge-c/test/ported_wasm/basic_tests.c index 4b275300..e2659d62 100644 --- a/rust/automerge-c/test/ported_wasm/basic_tests.c +++ b/rust/automerge-c/test/ported_wasm/basic_tests.c @@ -757,30 +757,6 @@ static void test_should_be_able_to_splice_text(void** state) { assert_memory_equal(str.src, "?", str.count); } -/** - * \brief should NOT be able to insert objects into text - */ -static void test_should_be_unable_to_insert_objects_into_text(void** state) { - AMresultStack* stack = *state; - /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - /* const text = doc.putObject("/", "text", "Hello world"); */ - AMobjId const* const text = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMspliceText(doc, text, 0, 0, AMstr("Hello world"))); - /* assert.throws(() => { - doc.insertObject(text, 6, { hello: "world" }); - }) */ - AMpush(&stack, - AMlistPutObject(doc, text, 6, true, AM_OBJ_TYPE_MAP), - AM_VALUE_VOID, - NULL); - assert_int_not_equal(AMresultStatus(stack->result), AM_STATUS_OK); -} - /** * \brief should be able to save all or incrementally */ @@ -1848,7 +1824,6 @@ int run_ported_wasm_basic_tests(void) { cmocka_unit_test_setup_teardown(test_should_be_able_to_del, setup_stack, teardown_stack), cmocka_unit_test_setup_teardown(test_should_be_able_to_use_counters, setup_stack, teardown_stack), cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_unable_to_insert_objects_into_text, setup_stack, teardown_stack), cmocka_unit_test_setup_teardown(test_should_be_able_to_save_all_or_incrementally, setup_stack, teardown_stack), cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text_2, setup_stack, teardown_stack), cmocka_unit_test_setup_teardown(test_local_inc_increments_all_visible_counters_in_a_map, setup_stack, teardown_stack), diff --git a/rust/automerge-wasm/deno-tests/deno.ts b/rust/automerge-wasm/deno-tests/deno.ts index 1b4c2e07..b346435a 100644 --- a/rust/automerge-wasm/deno-tests/deno.ts +++ b/rust/automerge-wasm/deno-tests/deno.ts @@ -2,7 +2,7 @@ import { create } from '../deno/automerge_wasm.js' Deno.test("It should create, clone and free", () => { - const doc1 = create() + const doc1 = create(false) const doc2 = doc1.clone() doc2.free() }); diff --git a/rust/automerge-wasm/index.d.ts b/rust/automerge-wasm/index.d.ts index 06399f0a..29586b47 100644 --- a/rust/automerge-wasm/index.d.ts +++ b/rust/automerge-wasm/index.d.ts @@ -121,9 +121,9 @@ export type SplicePatch = { values: Value[], } -export function create(actor?: Actor): Automerge; -export function load(data: Uint8Array, actor?: Actor): Automerge; export function encodeChange(change: ChangeToEncode): Change; +export function create(text_v2: boolean, actor?: Actor): Automerge; +export function load(data: Uint8Array, text_v2: boolean, actor?: Actor): Automerge; export function decodeChange(change: Change): DecodedChange; export function initSyncState(): SyncState; export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage; @@ -134,8 +134,8 @@ export function exportSyncState(state: SyncState): JsSyncState; export function importSyncState(state: JsSyncState): SyncState; export interface API { - create(actor?: Actor): Automerge; - load(data: Uint8Array, actor?: Actor): Automerge; + create(text_v2: boolean, actor?: Actor): Automerge; + load(data: Uint8Array, text_v2: boolean, actor?: Actor): Automerge; encodeChange(change: ChangeToEncode): Change; decodeChange(change: Change): DecodedChange; initSyncState(): SyncState; diff --git a/rust/automerge-wasm/src/interop.rs b/rust/automerge-wasm/src/interop.rs index 540722df..2881209a 100644 --- a/rust/automerge-wasm/src/interop.rs +++ b/rust/automerge-wasm/src/interop.rs @@ -1,11 +1,12 @@ use crate::error::InsertObject; use crate::value::Datatype; -use crate::Automerge; +use crate::{Automerge, TextRepresentation}; use automerge as am; use automerge::transaction::Transactable; use automerge::ROOT; use automerge::{Change, ChangeHash, ObjType, Prop}; use js_sys::{Array, Function, JsString, Object, Reflect, Symbol, Uint8Array}; +use std::borrow::Cow; use std::collections::{BTreeSet, HashSet}; use std::fmt::Display; use wasm_bindgen::prelude::*; @@ -445,11 +446,32 @@ impl JsObjType { } } - pub(crate) fn subvals(&self) -> &[(Prop, JsValue)] { + pub(crate) fn subvals(&self) -> impl Iterator, JsValue)> + '_ + Clone { match self { - Self::Text(_) => &[], - Self::Map(sub) => sub.as_slice(), - Self::List(sub) => sub.as_slice(), + Self::Text(s) => SubValIter::Str(s.chars().enumerate()), + Self::Map(sub) => SubValIter::Slice(sub.as_slice().iter()), + Self::List(sub) => SubValIter::Slice(sub.as_slice().iter()), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) enum SubValIter<'a> { + Slice(std::slice::Iter<'a, (Prop, JsValue)>), + Str(std::iter::Enumerate>), +} + +impl<'a> Iterator for SubValIter<'a> { + type Item = (std::borrow::Cow<'a, Prop>, JsValue); + + fn next(&mut self) -> Option { + match self { + Self::Slice(i) => i + .next() + .map(|(p, v)| (std::borrow::Cow::Borrowed(p), v.clone())), + Self::Str(i) => i + .next() + .map(|(n, c)| (std::borrow::Cow::Owned(Prop::Seq(n)), c.to_string().into())), } } } @@ -536,13 +558,18 @@ impl Automerge { meta: &JsValue, ) -> Result { let result = match datatype { - Datatype::Text => { - if let Some(heads) = heads { - self.doc.text_at(obj, heads)?.into() - } else { - self.doc.text(obj)?.into() + Datatype::Text => match self.text_rep { + TextRepresentation::String => { + if let Some(heads) = heads { + self.doc.text_at(obj, heads)?.into() + } else { + self.doc.text(obj)?.into() + } } - } + TextRepresentation::Array => self + .wrap_object(self.export_list(obj, heads, meta)?, datatype, obj, meta)? + .into(), + }, Datatype::List => self .wrap_object(self.export_list(obj, heads, meta)?, datatype, obj, meta)? .into(), @@ -570,7 +597,7 @@ impl Automerge { if let Ok(Some((val, id))) = val_and_id { let subval = match val { Value::Object(o) => self.export_object(&id, o.into(), heads, meta)?, - Value::Scalar(_) => self.export_value(alloc(&val))?, + Value::Scalar(_) => self.export_value(alloc(&val, self.text_rep))?, }; js_set(&map, &k, &subval)?; }; @@ -596,7 +623,7 @@ impl Automerge { if let Ok(Some((val, id))) = val_and_id { let subval = match val { Value::Object(o) => self.export_object(&id, o.into(), heads, meta)?, - Value::Scalar(_) => self.export_value(alloc(&val))?, + Value::Scalar(_) => self.export_value(alloc(&val, self.text_rep))?, }; array.push(&subval); }; @@ -699,7 +726,9 @@ impl Automerge { } else { value }; - if matches!(datatype, Datatype::Map | Datatype::List) { + if matches!(datatype, Datatype::Map | Datatype::List) + || (datatype == Datatype::Text && self.text_rep == TextRepresentation::Array) + { set_hidden_value( &value, &Symbol::for_(RAW_OBJECT_SYMBOL), @@ -733,7 +762,8 @@ impl Automerge { exposed.insert(value.1.clone()); js_set(&result, *index as f64, &JsValue::null())?; } else { - let sub_val = self.maybe_wrap_object(alloc(&value.0), &value.1, meta)?; + let sub_val = + self.maybe_wrap_object(alloc(&value.0, self.text_rep), &value.1, meta)?; js_set(&result, *index as f64, &sub_val)?; } Ok(result.into()) @@ -752,7 +782,11 @@ impl Automerge { if let Some(old) = old_val.as_f64() { let new_value: Value<'_> = am::ScalarValue::counter(old as i64 + *value).into(); - js_set(&result, index, &self.export_value(alloc(&new_value))?)?; + js_set( + &result, + index, + &self.export_value(alloc(&new_value, self.text_rep))?, + )?; Ok(result.into()) } else { Err(error::ApplyPatch::IncrementNonNumeric) @@ -763,8 +797,28 @@ impl Automerge { } Patch::DeleteMap { .. } => Err(error::ApplyPatch::DeleteKeyFromSeq), Patch::PutMap { .. } => Err(error::ApplyPatch::PutKeyInSeq), - //Patch::SpliceText { .. } => Err(to_js_err("cannot splice text in seq")), - Patch::SpliceText { .. } => Err(error::ApplyPatch::SpliceTextInSeq), + Patch::SpliceText { index, value, .. } => { + match self.text_rep { + TextRepresentation::String => Err(error::ApplyPatch::SpliceTextInSeq), + TextRepresentation::Array => { + let bytes: Vec = value.iter().cloned().collect(); + let val = String::from_utf16_lossy(bytes.as_slice()); + let elems = val + .chars() + .map(|c| { + ( + Value::Scalar(std::borrow::Cow::Owned(am::ScalarValue::Str( + c.to_string().into(), + ))), + ObjId::Root, // Using ROOT is okay because this ID is never used as + // we're producing ScalarValue::Str + ) + }) + .collect::>(); + Ok(self.sub_splice(result, *index, 0, &elems, meta)?) + } + } + } } } @@ -784,7 +838,8 @@ impl Automerge { exposed.insert(value.1.clone()); js_set(&result, key, &JsValue::null())?; } else { - let sub_val = self.maybe_wrap_object(alloc(&value.0), &value.1, meta)?; + let sub_val = + self.maybe_wrap_object(alloc(&value.0, self.text_rep), &value.1, meta)?; js_set(&result, key, &sub_val)?; } Ok(result) @@ -805,7 +860,11 @@ impl Automerge { if let Some(old) = old_val.as_f64() { let new_value: Value<'_> = am::ScalarValue::counter(old as i64 + *value).into(); - js_set(&result, key, &self.export_value(alloc(&new_value))?)?; + js_set( + &result, + key, + &self.export_value(alloc(&new_value, self.text_rep))?, + )?; Ok(result) } else { Err(error::ApplyPatch::IncrementNonNumeric) @@ -908,7 +967,7 @@ impl Automerge { ) -> Result { let args: Array = values .into_iter() - .map(|v| self.maybe_wrap_object(alloc(&v.0), &v.1, meta)) + .map(|v| self.maybe_wrap_object(alloc(&v.0, self.text_rep), &v.1, meta)) .collect::>()?; args.unshift(&(num_del as u32).into()); args.unshift(&(index as u32).into()); @@ -1054,7 +1113,13 @@ impl Automerge { Some(val) => Ok((val.into(), vec![])), None => { if let Ok(js_obj) = import_obj(value, &datatype) { - Ok((js_obj.objtype().into(), js_obj.subvals().to_vec())) + Ok(( + js_obj.objtype().into(), + js_obj + .subvals() + .map(|(p, v)| (p.into_owned(), v)) + .collect::>(), + )) } else { web_sys::console::log_2(&"Invalid value".into(), value); Err(error::InvalidValue) @@ -1093,13 +1158,16 @@ impl Automerge { } } -pub(crate) fn alloc(value: &Value<'_>) -> (Datatype, JsValue) { +pub(crate) fn alloc(value: &Value<'_>, text_rep: TextRepresentation) -> (Datatype, JsValue) { match value { am::Value::Object(o) => match o { ObjType::Map => (Datatype::Map, Object::new().into()), ObjType::Table => (Datatype::Table, Object::new().into()), ObjType::List => (Datatype::List, Array::new().into()), - ObjType::Text => (Datatype::Text, "".into()), + ObjType::Text => match text_rep { + TextRepresentation::String => (Datatype::Text, "".into()), + TextRepresentation::Array => (Datatype::Text, Array::new().into()), + }, }, am::Value::Scalar(s) => match s.as_ref() { am::ScalarValue::Bytes(v) => (Datatype::Bytes, Uint8Array::from(v.as_slice()).into()), diff --git a/rust/automerge-wasm/src/lib.rs b/rust/automerge-wasm/src/lib.rs index e6f5bed8..d6ccc8c8 100644 --- a/rust/automerge-wasm/src/lib.rs +++ b/rust/automerge-wasm/src/lib.rs @@ -27,10 +27,12 @@ #![allow(clippy::unused_unit)] use am::transaction::CommitOptions; use am::transaction::{Observed, Transactable, UnObserved}; +use am::ScalarValue; use automerge as am; use automerge::{Change, ObjId, Prop, TextEncoding, Value, ROOT}; use js_sys::{Array, Function, Object, Uint8Array}; use serde::ser::Serialize; +use std::borrow::Cow; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -48,6 +50,8 @@ use interop::{alloc, get_heads, import_obj, js_set, to_js_err, to_prop, AR, JS}; use sync::SyncState; use value::Datatype; +use crate::interop::SubValIter; + #[allow(unused_macros)] macro_rules! log { ( $( $t:tt )* ) => { @@ -61,17 +65,37 @@ type AutoCommit = am::AutoCommitWithObs>; #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; +/// How text is represented in materialized objects on the JS side +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +#[wasm_bindgen] +pub enum TextRepresentation { + /// As an array of characters and objects + Array, + /// As a single JS string + String, +} + +impl std::default::Default for TextRepresentation { + fn default() -> Self { + TextRepresentation::Array + } +} + #[wasm_bindgen] #[derive(Debug)] pub struct Automerge { doc: AutoCommit, freeze: bool, external_types: HashMap, + text_rep: TextRepresentation, } #[wasm_bindgen] impl Automerge { - pub fn new(actor: Option) -> Result { + pub fn new( + actor: Option, + text_rep: TextRepresentation, + ) -> Result { let mut doc = AutoCommit::default().with_encoding(TextEncoding::Utf16); if let Some(a) = actor { let a = automerge::ActorId::from(hex::decode(a)?.to_vec()); @@ -81,6 +105,7 @@ impl Automerge { doc, freeze: false, external_types: HashMap::default(), + text_rep, }) } @@ -90,6 +115,7 @@ impl Automerge { doc: self.doc.clone(), freeze: self.freeze, external_types: self.external_types.clone(), + text_rep: self.text_rep, }; if let Some(s) = actor { let actor = automerge::ActorId::from(hex::decode(s)?.to_vec()); @@ -113,6 +139,7 @@ impl Automerge { doc, freeze: self.freeze, external_types: self.external_types.clone(), + text_rep: self.text_rep, }; if let Some(s) = actor { let actor = @@ -187,21 +214,27 @@ impl Automerge { let (obj, obj_type) = self.import(obj)?; let start = start as usize; let delete_count = delete_count as usize; - if let Some(t) = text.as_string() { - if obj_type == am::ObjType::Text { + let vals = if let Some(t) = text.as_string() { + if obj_type == am::ObjType::Text && self.text_rep == TextRepresentation::String { self.doc.splice_text(&obj, start, delete_count, &t)?; return Ok(()); + } else { + t.chars() + .map(|c| ScalarValue::Str(c.to_string().into())) + .collect::>() } - } - let mut vals = vec![]; - if let Ok(array) = text.dyn_into::() { - for (index, i) in array.iter().enumerate() { - let value = self - .import_scalar(&i, &None) - .ok_or(error::Splice::ValueNotPrimitive(index))?; - vals.push(value); + } else { + let mut vals = vec![]; + if let Ok(array) = text.dyn_into::() { + for (index, i) in array.iter().enumerate() { + let value = self + .import_scalar(&i, &None) + .ok_or(error::Splice::ValueNotPrimitive(index))?; + vals.push(value); + } } - } + vals + }; if !vals.is_empty() { self.doc.splice(&obj, start, delete_count, vals)?; } else { @@ -211,9 +244,14 @@ impl Automerge { am::ObjType::List => { self.doc.splice(&obj, start, delete_count, vals)?; } - am::ObjType::Text => { - self.doc.splice_text(&obj, start, delete_count, "")?; - } + am::ObjType::Text => match self.text_rep { + TextRepresentation::String => { + self.doc.splice_text(&obj, start, delete_count, "")?; + } + TextRepresentation::Array => { + self.doc.splice(&obj, start, delete_count, vals)?; + } + }, _ => {} } } @@ -248,9 +286,16 @@ impl Automerge { .doc .insert_object(&obj, index, imported_obj.objtype())?; if let Some(s) = imported_obj.text() { - self.doc.splice_text(&opid, 0, 0, s)?; + match self.text_rep { + TextRepresentation::String => { + self.doc.splice_text(&opid, 0, 0, s)?; + } + TextRepresentation::Array => { + self.subset::(&opid, imported_obj.subvals())?; + } + } } else { - self.subset::(&opid, imported_obj.subvals())?; + self.subset::(&opid, imported_obj.subvals())?; } Ok(opid.to_string().into()) } @@ -283,9 +328,16 @@ impl Automerge { .doc .insert_object(&obj, index as usize, imported_obj.objtype())?; if let Some(s) = imported_obj.text() { - self.doc.splice_text(&opid, 0, 0, s)?; + match self.text_rep { + TextRepresentation::String => { + self.doc.splice_text(&opid, 0, 0, s)?; + } + TextRepresentation::Array => { + self.subset::(&opid, imported_obj.subvals())?; + } + } } else { - self.subset::(&opid, imported_obj.subvals())?; + self.subset::(&opid, imported_obj.subvals())?; } Ok(opid.to_string().into()) } @@ -318,23 +370,31 @@ impl Automerge { let imported_obj = import_obj(&value, &None)?; let opid = self.doc.put_object(&obj, prop, imported_obj.objtype())?; if let Some(s) = imported_obj.text() { - self.doc.splice_text(&opid, 0, 0, s)?; + match self.text_rep { + TextRepresentation::String => { + self.doc.splice_text(&opid, 0, 0, s)?; + } + TextRepresentation::Array => { + self.subset::(&opid, imported_obj.subvals())?; + } + } } else { - self.subset::(&opid, imported_obj.subvals())?; + self.subset::(&opid, imported_obj.subvals())?; } Ok(opid.to_string().into()) } - fn subset(&mut self, obj: &am::ObjId, vals: &[(am::Prop, JsValue)]) -> Result<(), E> + fn subset<'a, E, I>(&mut self, obj: &am::ObjId, vals: I) -> Result<(), E> where + I: IntoIterator, JsValue)>, E: From + From + From, { for (p, v) in vals { - let (value, subvals) = self.import_value(v, None)?; + let (value, subvals) = self.import_value(v.as_ref(), None)?; //let opid = self.0.set(id, p, value)?; - let opid = match (p, value) { + let opid = match (p.as_ref(), value) { (Prop::Map(s), Value::Object(objtype)) => { Some(self.doc.put_object(obj, s, objtype)?) } @@ -351,7 +411,7 @@ impl Automerge { } }; if let Some(opid) = opid { - self.subset::(&opid, &subvals)?; + self.subset::(&opid, SubValIter::Slice(subvals.as_slice().iter()))?; } } Ok(()) @@ -387,7 +447,7 @@ impl Automerge { self.doc.get(&obj, prop)? }; if let Some((value, id)) = value { - match alloc(&value) { + match alloc(&value, self.text_rep) { (datatype, js_value) if datatype.is_scalar() => Ok(js_value), _ => Ok(id.to_string().into()), } @@ -425,7 +485,7 @@ impl Automerge { } (Value::Scalar(_), _) => { let result = Array::new(); - let (datatype, value) = alloc(&value.0); + let (datatype, value) = alloc(&value.0, self.text_rep); result.push(&datatype.into()); result.push(&value); Ok(result.into()) @@ -457,7 +517,7 @@ impl Automerge { }?; for (value, id) in values { let sub = Array::new(); - let (datatype, js_value) = alloc(&value); + let (datatype, js_value) = alloc(&value, self.text_rep); sub.push(&datatype.into()); if value.is_scalar() { sub.push(&js_value); @@ -485,6 +545,7 @@ impl Automerge { .as_bool() .ok_or_else(|| to_js_err("must pass a bool to enablePatches"))?; let old_enabled = self.doc.observer().enable(enable); + self.doc.observer().set_text_rep(self.text_rep); Ok(old_enabled.into()) } @@ -714,6 +775,7 @@ impl Automerge { let _patches = self.doc.observer().take_patches(); // throw away patches Ok(self.export_object(&obj, obj_type.into(), heads.as_ref(), &meta)?) } + #[wasm_bindgen(js_name = emptyChange)] pub fn empty_change(&mut self, message: Option, time: Option) -> JsValue { let time = time.map(|f| f as i64); @@ -724,16 +786,30 @@ impl Automerge { } #[wasm_bindgen(js_name = create)] -pub fn init(actor: Option) -> Result { +pub fn init(text_v2: bool, actor: Option) -> Result { console_error_panic_hook::set_once(); - Automerge::new(actor) + let text_rep = if text_v2 { + TextRepresentation::String + } else { + TextRepresentation::Array + }; + Automerge::new(actor, text_rep) } #[wasm_bindgen(js_name = load)] -pub fn load(data: Uint8Array, actor: Option) -> Result { +pub fn load( + data: Uint8Array, + text_v2: bool, + actor: Option, +) -> Result { let data = data.to_vec(); + let text_rep = if text_v2 { + TextRepresentation::String + } else { + TextRepresentation::Array + }; let mut doc = am::AutoCommitWithObs::::load(&data)? - .with_observer(Observer::default()) + .with_observer(Observer::default().with_text_rep(text_rep)) .with_encoding(TextEncoding::Utf16); if let Some(s) = actor { let actor = @@ -744,6 +820,7 @@ pub fn load(data: Uint8Array, actor: Option) -> Result, + text_rep: TextRepresentation, } impl Observer { @@ -33,6 +39,15 @@ impl Observer { } } } + + pub(crate) fn with_text_rep(mut self, text_rep: TextRepresentation) -> Self { + self.text_rep = text_rep; + self + } + + pub(crate) fn set_text_rep(&mut self, text_rep: TextRepresentation) { + self.text_rep = text_rep; + } } #[derive(Debug, Clone)] @@ -121,6 +136,20 @@ impl OpObserver for Observer { fn splice_text(&mut self, doc: &Automerge, obj: ObjId, index: usize, value: &str) { if self.enabled { + if self.text_rep == TextRepresentation::Array { + for (i, c) in value.chars().enumerate() { + self.insert( + doc, + obj.clone(), + index + i, + ( + Value::Scalar(Cow::Owned(ScalarValue::Str(c.to_string().into()))), + ObjId::Root, // We hope this is okay + ), + ); + } + return; + } if let Some(Patch::SpliceText { obj: tail_obj, index: tail_index, @@ -316,8 +345,13 @@ impl OpObserver for Observer { Observer { patches: vec![], enabled: self.enabled, + text_rep: self.text_rep, } } + + fn text_as_seq(&self) -> bool { + self.text_rep == TextRepresentation::Array + } } fn prop_to_js(p: &Prop) -> JsValue { @@ -377,7 +411,11 @@ impl TryFrom for JsValue { "path", export_path(path.as_slice(), &Prop::Map(key)), )?; - js_set(&result, "value", alloc(&value.0).1)?; + js_set( + &result, + "value", + alloc(&value.0, TextRepresentation::String).1, + )?; Ok(result.into()) } Patch::PutSeq { @@ -389,7 +427,11 @@ impl TryFrom for JsValue { "path", export_path(path.as_slice(), &Prop::Seq(index)), )?; - js_set(&result, "value", alloc(&value.0).1)?; + js_set( + &result, + "value", + alloc(&value.0, TextRepresentation::String).1, + )?; Ok(result.into()) } Patch::Insert { @@ -407,7 +449,10 @@ impl TryFrom for JsValue { js_set( &result, "values", - values.iter().map(|v| alloc(&v.0).1).collect::(), + values + .iter() + .map(|v| alloc(&v.0, TextRepresentation::String).1) + .collect::(), )?; Ok(result.into()) } diff --git a/rust/automerge-wasm/test/apply.ts b/rust/automerge-wasm/test/apply.ts index d4b8c95e..453b4c26 100644 --- a/rust/automerge-wasm/test/apply.ts +++ b/rust/automerge-wasm/test/apply.ts @@ -24,10 +24,10 @@ describe('Automerge', () => { describe('Patch Apply', () => { it('apply nested sets on maps', () => { const start = { hello: { mellow: { yellow: "world", x: 1 }, y : 2 } } - const doc1 = create() + const doc1 = create(true) doc1.putObject("/", "hello", start.hello); let mat = doc1.materialize("/") - const doc2 = create() + const doc2 = create(true) doc2.enablePatches(true) doc2.merge(doc1) @@ -47,10 +47,10 @@ describe('Automerge', () => { it('apply patches on lists', () => { const start = { list: [1,2,3,4] } - const doc1 = create() + const doc1 = create(true) doc1.putObject("/", "list", start.list); let mat = doc1.materialize("/") - const doc2 = create() + const doc2 = create(true) doc2.enablePatches(true) doc2.merge(doc1) mat = doc1.materialize("/") @@ -78,7 +78,7 @@ describe('Automerge', () => { ] ] } - const doc1 = create() + const doc1 = create(true) doc1.enablePatches(true) doc1.putObject("/", "list", start.list); let base = doc1.applyPatches({}) @@ -99,7 +99,7 @@ describe('Automerge', () => { }) it('large inserts should make one splice patch', () => { - const doc1 = create() + const doc1 = create(true) doc1.enablePatches(true) doc1.putObject("/", "list", "abc"); const patches = doc1.popPatches() @@ -109,7 +109,7 @@ describe('Automerge', () => { }) it('it should allow registering type wrappers', () => { - const doc1 = create() + const doc1 = create(true) doc1.enablePatches(true) doc1.registerDatatype("counter", (n: number) => new Counter(n)) const doc2 = doc1.fork() @@ -133,7 +133,7 @@ describe('Automerge', () => { }) it('text can be managed as an array or a string', () => { - const doc1 = create("aaaa") + const doc1 = create(true, "aaaa") doc1.enablePatches(true) doc1.putObject("/", "notes", "hello world") @@ -142,7 +142,7 @@ describe('Automerge', () => { assert.deepEqual( mat, { notes: "hello world" } ) - const doc2 = create() + const doc2 = create(true) let apply : any = doc2.materialize("/") doc2.enablePatches(true) apply = doc2.applyPatches(apply) @@ -163,7 +163,7 @@ describe('Automerge', () => { }) it('should set the OBJECT_ID property on lists, maps, and text objects and not on scalars', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') const mat: any = doc1.materialize("/") doc1.enablePatches(true) doc1.registerDatatype("counter", (n: number) => new Counter(n)) @@ -193,7 +193,7 @@ describe('Automerge', () => { }) it('should set the root OBJECT_ID to "_root"', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') const mat: any = doc1.materialize("/") assert.equal(_obj(mat), "_root") doc1.enablePatches(true) diff --git a/rust/automerge-wasm/test/readme.ts b/rust/automerge-wasm/test/readme.ts index 18c55055..e5823556 100644 --- a/rust/automerge-wasm/test/readme.ts +++ b/rust/automerge-wasm/test/readme.ts @@ -6,13 +6,13 @@ import { create, load, initSyncState } from '..' describe('Automerge', () => { describe('Readme Examples', () => { it('Using the Library and Creating a Document', () => { - const doc = create() + const doc = create(true) const sync = initSyncState() doc.free() sync.free() }) it('Automerge Scalar Types (1)', () => { - const doc = create() + const doc = create(true) doc.put("/", "prop1", 100) // int doc.put("/", "prop2", 3.14) // f64 doc.put("/", "prop3", "hello world") @@ -32,7 +32,7 @@ describe('Automerge', () => { }) }) it('Automerge Scalar Types (2)', () => { - const doc = create() + const doc = create(true) doc.put("/", "prop1", 100, "int") doc.put("/", "prop2", 100, "uint") doc.put("/", "prop3", 100.5, "f64") @@ -45,7 +45,7 @@ describe('Automerge', () => { doc.put("/", "prop10", null, "null") }) it('Automerge Object Types (1)', () => { - const doc = create() + const doc = create(true) // you can create an object by passing in the inital state - if blank pass in `{}` // the return value is the Object Id @@ -64,7 +64,7 @@ describe('Automerge', () => { const notes = doc.putObject("/", "notes", "Hello world!") }) it('Automerge Object Types (2)', () => { - const doc = create() + const doc = create(true) const config = doc.putObject("/", "config", { align: "left", archived: false, cycles: [10, 19, 21] }) @@ -85,7 +85,7 @@ describe('Automerge', () => { }) }) it('Maps (1)', () => { - const doc = create() + const doc = create(true) const mymap = doc.putObject("_root", "mymap", { foo: "bar"}) // make a new map with the foo key @@ -99,7 +99,7 @@ describe('Automerge', () => { assert.deepEqual(doc.materialize("_root"), { mymap: { bytes: new Uint8Array([1,2,3]), foo: "bar", sub: {} }}) }) it('Lists (1)', () => { - const doc = create() + const doc = create(true) const items = doc.putObject("_root", "items", [10,"box"]) // init a new list with two elements doc.push(items, true) // push `true` to the end of the list @@ -113,14 +113,14 @@ describe('Automerge', () => { assert.deepEqual(doc.length(items),6) }) it('Text (1)', () => { - const doc = create("aaaaaa") + const doc = create(true, "aaaaaa") const notes = doc.putObject("_root", "notes", "Hello world") doc.splice(notes, 6, 5, "everyone") assert.deepEqual(doc.text(notes), "Hello everyone") }) it('Querying Data (1)', () => { - const doc1 = create("aabbcc") + const doc1 = create(true, "aabbcc") doc1.put("_root", "key1", "val1") const key2 = doc1.putObject("_root", "key2", []) @@ -140,7 +140,7 @@ describe('Automerge', () => { assert.deepEqual(doc1.getAll("_root","key3"),[[ "str", "doc1val", "3@aabbcc"], ["str", "doc2val", "3@ffaaff"]]) }) it('Counters (1)', () => { - const doc1 = create("aaaaaa") + const doc1 = create(true, "aaaaaa") doc1.put("_root", "number", 0) doc1.put("_root", "total", 0, "counter") @@ -156,7 +156,7 @@ describe('Automerge', () => { assert.deepEqual(doc1.materialize("_root"), { number: 10, total: 33 }) }) it('Transactions (1)', () => { - const doc = create() + const doc = create(true) doc.put("_root", "key", "val1") @@ -178,7 +178,7 @@ describe('Automerge', () => { assert.deepEqual(doc.pendingOps(),0) }) it('Viewing Old Versions of the Document (1)', () => { - const doc = create() + const doc = create(true) doc.put("_root", "key", "val1") const heads1 = doc.getHeads() @@ -194,7 +194,7 @@ describe('Automerge', () => { assert.deepEqual(doc.get("_root","key",[]), undefined) }) it('Forking And Merging (1)', () => { - const doc1 = create() + const doc1 = create(true) doc1.put("_root", "key1", "val1") const doc2 = doc1.fork() @@ -208,13 +208,13 @@ describe('Automerge', () => { assert.deepEqual(doc2.materialize("_root"), { key1: "val1", key3: "val3" }) }) it('Saving And Loading (1)', () => { - const doc1 = create() + const doc1 = create(true) doc1.put("_root", "key1", "value1") const save1 = doc1.save() - const doc2 = load(save1) + const doc2 = load(save1, true) doc2.materialize("_root") // returns { key1: "value1" } @@ -230,9 +230,9 @@ describe('Automerge', () => { doc2.loadIncremental(saveIncremental) - const doc3 = load(save2) + const doc3 = load(save2, true) - const doc4 = load(save3) + const doc4 = load(save3, true) assert.deepEqual(doc1.materialize("_root"), { key1: "value1", key2: "value2" }) assert.deepEqual(doc2.materialize("_root"), { key1: "value1", key2: "value2" }) diff --git a/rust/automerge-wasm/test/test.ts b/rust/automerge-wasm/test/test.ts index 70b56c55..56aaae74 100644 --- a/rust/automerge-wasm/test/test.ts +++ b/rust/automerge-wasm/test/test.ts @@ -4,6 +4,7 @@ import assert from 'assert' import { BloomFilter } from './helpers/sync' import { create, load, SyncState, Automerge, encodeChange, decodeChange, initSyncState, decodeSyncMessage, decodeSyncState, encodeSyncState, encodeSyncMessage } from '..' import { Value, DecodedSyncMessage, Hash } from '..'; +import {kill} from 'process'; function sync(a: Automerge, b: Automerge, aSyncState = initSyncState(), bSyncState = initSyncState()) { const MAX_ITER = 10 @@ -29,25 +30,25 @@ describe('Automerge', () => { describe('basics', () => { it('should create, clone and free', () => { - const doc1 = create() + const doc1 = create(true) const doc2 = doc1.clone() doc2.free() }) it('should be able to start and commit', () => { - const doc = create() + const doc = create(true) doc.commit() }) it('getting a nonexistent prop does not throw an error', () => { - const doc = create() + const doc = create(true) const root = "_root" const result = doc.getWithType(root, "hello") assert.deepEqual(result, undefined) }) it('should be able to set and get a simple value', () => { - const doc: Automerge = create("aabbcc") + const doc: Automerge = create(true, "aabbcc") const root = "_root" let result @@ -105,7 +106,7 @@ describe('Automerge', () => { }) it('should be able to use bytes', () => { - const doc = create() + const doc = create(true) doc.put("_root", "data1", new Uint8Array([10, 11, 12])); doc.put("_root", "data2", new Uint8Array([13, 14, 15]), "bytes"); const value1 = doc.getWithType("_root", "data1") @@ -115,7 +116,7 @@ describe('Automerge', () => { }) it('should be able to make subobjects', () => { - const doc = create() + const doc = create(true) const root = "_root" let result @@ -131,7 +132,7 @@ describe('Automerge', () => { }) it('should be able to make lists', () => { - const doc = create() + const doc = create(true) const root = "_root" const sublist = doc.putObject(root, "numbers", []) @@ -153,7 +154,7 @@ describe('Automerge', () => { }) it('lists have insert, set, splice, and push ops', () => { - const doc = create() + const doc = create(true) const root = "_root" const sublist = doc.putObject(root, "letters", []) @@ -175,7 +176,7 @@ describe('Automerge', () => { }) it('should be able delete non-existent props', () => { - const doc = create() + const doc = create(true) doc.put("_root", "foo", "bar") doc.put("_root", "bip", "bap") @@ -195,7 +196,7 @@ describe('Automerge', () => { }) it('should be able to del', () => { - const doc = create() + const doc = create(true) const root = "_root" doc.put(root, "xxx", "xxx"); @@ -205,7 +206,7 @@ describe('Automerge', () => { }) it('should be able to use counters', () => { - const doc = create() + const doc = create(true) const root = "_root" doc.put(root, "counter", 10, "counter"); @@ -217,7 +218,7 @@ describe('Automerge', () => { }) it('should be able to splice text', () => { - const doc = create() + const doc = create(true) const root = "_root"; const text = doc.putObject(root, "text", ""); @@ -232,8 +233,8 @@ describe('Automerge', () => { assert.deepEqual(doc.getWithType(text, 12), ["str", "?"]) }) - it('should NOT be able to insert objects into text', () => { - const doc = create() + it.skip('should NOT be able to insert objects into text', () => { + const doc = create(true) const text = doc.putObject("/", "text", "Hello world"); assert.throws(() => { doc.insertObject(text, 6, { hello: "world" }); @@ -241,7 +242,7 @@ describe('Automerge', () => { }) it('should be able save all or incrementally', () => { - const doc = create() + const doc = create(true) doc.put("_root", "foo", 1) @@ -262,9 +263,9 @@ describe('Automerge', () => { assert.notDeepEqual(saveA, saveB); - const docA = load(saveA); - const docB = load(saveB); - const docC = load(saveMidway) + const docA = load(saveA, true); + const docB = load(saveB, true); + const docC = load(saveMidway, true) docC.loadIncremental(save3) assert.deepEqual(docA.keys("_root"), docB.keys("_root")); @@ -273,7 +274,7 @@ describe('Automerge', () => { }) it('should be able to splice text', () => { - const doc = create() + const doc = create(true) const text = doc.putObject("_root", "text", ""); doc.splice(text, 0, 0, "hello world"); const hash1 = doc.commit(); @@ -291,10 +292,10 @@ describe('Automerge', () => { }) it('local inc increments all visible counters in a map', () => { - const doc1 = create("aaaa") + const doc1 = create(true, "aaaa") doc1.put("_root", "hello", "world") - const doc2 = load(doc1.save(), "bbbb"); - const doc3 = load(doc1.save(), "cccc"); + const doc2 = load(doc1.save(), true, "bbbb"); + const doc3 = load(doc1.save(), true, "cccc"); const heads = doc1.getHeads() doc1.put("_root", "cnt", 20) doc2.put("_root", "cnt", 0, "counter") @@ -315,16 +316,16 @@ describe('Automerge', () => { ]) const save1 = doc1.save() - const doc4 = load(save1) + const doc4 = load(save1, true) assert.deepEqual(doc4.save(), save1); }) it('local inc increments all visible counters in a sequence', () => { - const doc1 = create("aaaa") + const doc1 = create(true, "aaaa") const seq = doc1.putObject("_root", "seq", []) doc1.insert(seq, 0, "hello") - const doc2 = load(doc1.save(), "bbbb"); - const doc3 = load(doc1.save(), "cccc"); + const doc2 = load(doc1.save(), true, "bbbb"); + const doc3 = load(doc1.save(), true, "cccc"); const heads = doc1.getHeads() doc1.put(seq, 0, 20) doc2.put(seq, 0, 0, "counter") @@ -345,12 +346,12 @@ describe('Automerge', () => { ]) const save = doc1.save() - const doc4 = load(save) + const doc4 = load(save, true) assert.deepEqual(doc4.save(), save); }) it('paths can be used instead of objids', () => { - const doc = create("aaaa") + const doc = create(true, "aaaa") doc.putObject("_root", "list", [{ foo: "bar" }, [1, 2, 3]]) assert.deepEqual(doc.materialize("/"), { list: [{ foo: "bar" }, [1, 2, 3]] }) assert.deepEqual(doc.materialize("/list"), [{ foo: "bar" }, [1, 2, 3]]) @@ -358,8 +359,8 @@ describe('Automerge', () => { }) it('should be able to fetch changes by hash', () => { - const doc1 = create("aaaa") - const doc2 = create("bbbb") + const doc1 = create(true, "aaaa") + const doc2 = create(true, "bbbb") doc1.put("/", "a", "b") doc2.put("/", "b", "c") const head1 = doc1.getHeads() @@ -372,7 +373,7 @@ describe('Automerge', () => { }) it('recursive sets are possible', () => { - const doc = create("aaaa") + const doc = create(true, "aaaa") const l1 = doc.putObject("_root", "list", [{ foo: "bar" }, [1, 2, 3]]) const l2 = doc.insertObject(l1, 0, { zip: ["a", "b"] }) doc.putObject("_root", "info1", "hello world") // 'text' object @@ -390,7 +391,7 @@ describe('Automerge', () => { }) it('only returns an object id when objects are created', () => { - const doc = create("aaaa") + const doc = create(true, "aaaa") const r1 = doc.put("_root", "foo", "bar") const r2 = doc.putObject("_root", "list", []) const r3 = doc.put("_root", "counter", 10, "counter") @@ -412,13 +413,13 @@ describe('Automerge', () => { }) it('objects without properties are preserved', () => { - const doc1 = create("aaaa") + const doc1 = create(true, "aaaa") const a = doc1.putObject("_root", "a", {}); const b = doc1.putObject("_root", "b", {}); const c = doc1.putObject("_root", "c", {}); doc1.put(c, "d", "dd"); const saved = doc1.save(); - const doc2 = load(saved); + const doc2 = load(saved, true); assert.deepEqual(doc2.getWithType("_root", "a"), ["map", a]) assert.deepEqual(doc2.keys(a), []) assert.deepEqual(doc2.getWithType("_root", "b"), ["map", b]) @@ -429,7 +430,7 @@ describe('Automerge', () => { }) it('should allow you to fork at a heads', () => { - const A = create("aaaaaa") + const A = create(true, "aaaaaa") A.put("/", "key1", "val1"); A.put("/", "key2", "val2"); const heads1 = A.getHeads(); @@ -444,7 +445,7 @@ describe('Automerge', () => { }) it('should handle merging text conflicts then saving & loading', () => { - const A = create("aabbcc") + const A = create(true, "aabbcc") const At = A.putObject('_root', 'text', "") A.splice(At, 0, 0, 'hello') @@ -461,7 +462,7 @@ describe('Automerge', () => { const binary = A.save() - const C = load(binary) + const C = load(binary, true) assert.deepEqual(C.getWithType('_root', 'text'), ['text', '1@aabbcc']) assert.deepEqual(C.text(At), 'hell! world') @@ -470,7 +471,7 @@ describe('Automerge', () => { describe('patch generation', () => { it('should include root object key updates', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.put('_root', 'hello', 'world') doc2.enablePatches(true) doc2.loadIncremental(doc1.saveIncremental()) @@ -480,7 +481,7 @@ describe('Automerge', () => { }) it('should include nested object creation', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.putObject('_root', 'birds', { friday: { robins: 3 } }) doc2.enablePatches(true) doc2.loadIncremental(doc1.saveIncremental()) @@ -492,7 +493,7 @@ describe('Automerge', () => { }) it('should delete map keys', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.put('_root', 'favouriteBird', 'Robin') doc2.enablePatches(true) doc2.loadIncremental(doc1.saveIncremental()) @@ -505,7 +506,7 @@ describe('Automerge', () => { }) it('should include list element insertion', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.putObject('_root', 'birds', ['Goldfinch', 'Chaffinch']) doc2.enablePatches(true) doc2.loadIncremental(doc1.saveIncremental()) @@ -516,7 +517,7 @@ describe('Automerge', () => { }) it('should insert nested maps into a list', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.putObject('_root', 'birds', []) doc2.loadIncremental(doc1.saveIncremental()) doc1.insertObject('1@aaaa', 0, { species: 'Goldfinch', count: 3 }) @@ -530,7 +531,7 @@ describe('Automerge', () => { }) it('should calculate list indexes based on visible elements', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.putObject('_root', 'birds', ['Goldfinch', 'Chaffinch']) doc2.loadIncremental(doc1.saveIncremental()) doc1.delete('1@aaaa', 0) @@ -546,7 +547,7 @@ describe('Automerge', () => { }) it('should handle concurrent insertions at the head of a list', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc'), doc4 = create('dddd') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc'), doc4 = create(true, 'dddd') doc1.putObject('_root', 'values', []) const change1 = doc1.saveIncremental() doc2.loadIncremental(change1) @@ -572,7 +573,7 @@ describe('Automerge', () => { }) it('should handle concurrent insertions beyond the head', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc'), doc4 = create('dddd') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc'), doc4 = create(true, 'dddd') doc1.putObject('_root', 'values', ['a', 'b']) const change1 = doc1.saveIncremental() doc2.loadIncremental(change1) @@ -598,7 +599,7 @@ describe('Automerge', () => { }) it('should handle conflicts on root object keys', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc'), doc4 = create('dddd') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc'), doc4 = create(true, 'dddd') doc1.put('_root', 'bird', 'Greenfinch') doc2.put('_root', 'bird', 'Goldfinch') const change1 = doc1.saveIncremental(), change2 = doc2.saveIncremental() @@ -620,7 +621,7 @@ describe('Automerge', () => { }) it('should handle three-way conflicts', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc') doc1.put('_root', 'bird', 'Greenfinch') doc2.put('_root', 'bird', 'Chaffinch') doc3.put('_root', 'bird', 'Goldfinch') @@ -654,7 +655,7 @@ describe('Automerge', () => { }) it('should allow a conflict to be resolved', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc') doc1.put('_root', 'bird', 'Greenfinch') doc2.put('_root', 'bird', 'Chaffinch') doc3.enablePatches(true) @@ -672,7 +673,7 @@ describe('Automerge', () => { }) it('should handle a concurrent map key overwrite and delete', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.put('_root', 'bird', 'Greenfinch') doc2.loadIncremental(doc1.saveIncremental()) doc1.put('_root', 'bird', 'Goldfinch') @@ -695,7 +696,7 @@ describe('Automerge', () => { }) it('should handle a conflict on a list element', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc'), doc4 = create('dddd') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc'), doc4 = create(true, 'dddd') doc1.putObject('_root', 'birds', ['Thrush', 'Magpie']) const change1 = doc1.saveIncremental() doc2.loadIncremental(change1) @@ -722,7 +723,7 @@ describe('Automerge', () => { }) it('should handle a concurrent list element overwrite and delete', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc'), doc4 = create('dddd') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc'), doc4 = create(true, 'dddd') doc1.putObject('_root', 'birds', ['Parakeet', 'Magpie', 'Thrush']) const change1 = doc1.saveIncremental() doc2.loadIncremental(change1) @@ -755,7 +756,7 @@ describe('Automerge', () => { }) it('should handle deletion of a conflict value', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), doc3 = create('cccc') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), doc3 = create(true, 'cccc') doc1.put('_root', 'bird', 'Robin') doc2.put('_root', 'bird', 'Wren') const change1 = doc1.saveIncremental(), change2 = doc2.saveIncremental() @@ -778,7 +779,7 @@ describe('Automerge', () => { }) it('should handle conflicting nested objects', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc1.putObject('_root', 'birds', ['Parakeet']) doc2.putObject('_root', 'birds', { 'Sparrowhawk': 1 }) const change1 = doc1.saveIncremental(), change2 = doc2.saveIncremental() @@ -796,7 +797,7 @@ describe('Automerge', () => { }) it('should support date objects', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb'), now = new Date() + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb'), now = new Date() doc1.put('_root', 'createdAt', now) doc2.enablePatches(true) doc2.loadIncremental(doc1.saveIncremental()) @@ -807,7 +808,7 @@ describe('Automerge', () => { }) it('should capture local put ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) doc1.put('_root', 'key1', 1) doc1.put('_root', 'key1', 2) @@ -825,7 +826,7 @@ describe('Automerge', () => { }) it('should capture local insert ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) const list = doc1.putObject('_root', 'list', []) doc1.insert(list, 0, 1) @@ -841,7 +842,7 @@ describe('Automerge', () => { }) it('should capture local push ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) const list = doc1.putObject('_root', 'list', []) doc1.push(list, 1) @@ -855,7 +856,7 @@ describe('Automerge', () => { }) it('should capture local splice ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) const list = doc1.putObject('_root', 'list', []) doc1.splice(list, 0, 0, [1, 2, 3, 4]) @@ -868,7 +869,7 @@ describe('Automerge', () => { }) it('should capture local increment ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) doc1.put('_root', 'counter', 2, 'counter') doc1.increment('_root', 'counter', 4) @@ -881,7 +882,7 @@ describe('Automerge', () => { it('should capture local delete ops', () => { - const doc1 = create('aaaa') + const doc1 = create(true, 'aaaa') doc1.enablePatches(true) doc1.put('_root', 'key1', 1) doc1.put('_root', 'key2', 2) @@ -896,7 +897,7 @@ describe('Automerge', () => { }) it('should support counters in a map', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc2.enablePatches(true) doc1.put('_root', 'starlings', 2, 'counter') doc2.loadIncremental(doc1.saveIncremental()) @@ -910,7 +911,7 @@ describe('Automerge', () => { }) it('should support counters in a list', () => { - const doc1 = create('aaaa'), doc2 = create('bbbb') + const doc1 = create(true, 'aaaa'), doc2 = create(true, 'bbbb') doc2.enablePatches(true) const list = doc1.putObject('_root', 'list', []) doc2.loadIncremental(doc1.saveIncremental()) @@ -934,7 +935,7 @@ describe('Automerge', () => { describe('sync', () => { it('should send a sync message implying no local data', () => { - const doc = create() + const doc = create(true) const s1 = initSyncState() const m1 = doc.generateSyncMessage(s1) if (m1 === null) { throw new RangeError("message should not be null") } @@ -948,7 +949,7 @@ describe('Automerge', () => { }) it('should not reply if we have no data as well', () => { - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) const s1 = initSyncState(), s2 = initSyncState() const m1 = n1.generateSyncMessage(s1) if (m1 === null) { throw new RangeError("message should not be null") } @@ -958,7 +959,7 @@ describe('Automerge', () => { }) it('repos with equal heads do not need a reply message', () => { - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) const s1 = initSyncState(), s2 = initSyncState() // make two nodes with the same changes @@ -983,7 +984,7 @@ describe('Automerge', () => { }) it('n1 should offer all changes to n2 when starting from nothing', () => { - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) // make changes for n1 that n2 should request const list = n1.putObject("_root", "n", []) @@ -999,7 +1000,7 @@ describe('Automerge', () => { }) it('should sync peers where one has commits the other does not', () => { - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) // make changes for n1 that n2 should request const list = n1.putObject("_root", "n", []) @@ -1016,7 +1017,7 @@ describe('Automerge', () => { it('should work with prior sync state', () => { // create & synchronize two nodes - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) const s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) { @@ -1039,7 +1040,7 @@ describe('Automerge', () => { it('should not generate messages once synced', () => { // create & synchronize two nodes - const n1 = create('abc123'), n2 = create('def456') + const n1 = create(true, 'abc123'), n2 = create(true, 'def456') const s1 = initSyncState(), s2 = initSyncState() let message @@ -1087,7 +1088,7 @@ describe('Automerge', () => { it('should allow simultaneous messages during synchronization', () => { // create & synchronize two nodes - const n1 = create('abc123'), n2 = create('def456') + const n1 = create(true, 'abc123'), n2 = create(true, 'def456') const s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) { @@ -1166,7 +1167,7 @@ describe('Automerge', () => { }) it('should assume sent changes were received until we hear otherwise', () => { - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') const s1 = initSyncState(), s2 = initSyncState() let message = null @@ -1197,7 +1198,7 @@ describe('Automerge', () => { it('should work regardless of who initiates the exchange', () => { // create & synchronize two nodes - const n1 = create(), n2 = create() + const n1 = create(true), n2 = create(true) const s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) { @@ -1225,7 +1226,7 @@ describe('Automerge', () => { // lastSync is undefined. // create two peers both with divergent commits - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') //const s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 10; i++) { @@ -1258,7 +1259,7 @@ describe('Automerge', () => { // lastSync is c9. // create two peers both with divergent commits - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 10; i++) { @@ -1287,7 +1288,7 @@ describe('Automerge', () => { }) it('should ensure non-empty state after sync', () => { - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') const s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 3; i++) { @@ -1306,7 +1307,7 @@ describe('Automerge', () => { // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 // n2 has changes {c0, c1, c2}, n1's lastSync is c5, and n2's lastSync is c2. // we want to successfully sync (n1) with (r), even though (n1) believes it's talking to (n2) - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') let s1 = initSyncState() const s2 = initSyncState() @@ -1355,7 +1356,7 @@ describe('Automerge', () => { }) it('should re-sync after one node experiences data loss without disconnecting', () => { - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') const s1 = initSyncState(), s2 = initSyncState() // n1 makes three changes, which we sync to n2 @@ -1369,7 +1370,7 @@ describe('Automerge', () => { assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) assert.deepStrictEqual(n1.materialize(), n2.materialize()) - const n2AfterDataLoss = create('89abcdef') + const n2AfterDataLoss = create(true, '89abcdef') // "n2" now has no data, but n1 still thinks it does. Note we don't do // decodeSyncState(encodeSyncState(s1)) in order to simulate data loss without disconnecting @@ -1379,7 +1380,7 @@ describe('Automerge', () => { }) it('should handle changes concurrent to the last sync heads', () => { - const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('fedcba98') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef'), n3 = create(true, 'fedcba98') const s12 = initSyncState(), s21 = initSyncState(), s23 = initSyncState(), s32 = initSyncState() // Change 1 is known to all three nodes @@ -1415,7 +1416,7 @@ describe('Automerge', () => { }) it('should handle histories with lots of branching and merging', () => { - const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('fedcba98') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef'), n3 = create(true, 'fedcba98') n1.put("_root", "x", 0); n1.commit("", 0) const change1 = n1.getLastLocalChange() if (change1 === null) throw new RangeError("no local change") @@ -1463,7 +1464,7 @@ describe('Automerge', () => { // `-- n2 // where n2 is a false positive in the Bloom filter containing {n1}. // lastSync is c9. - let n1 = create('01234567'), n2 = create('89abcdef') + let n1 = create(true, '01234567'), n2 = create(true, '89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 10; i++) { @@ -1498,8 +1499,8 @@ describe('Automerge', () => { // `-- n2c1 <-- n2c2 // where n2c1 is a false positive in the Bloom filter containing {n1c1, n1c2}. // lastSync is c9. - n1 = create('01234567') - n2 = create('89abcdef') + n1 = create(true, '01234567') + n2 = create(true, '89abcdef') s1 = initSyncState() s2 = initSyncState() for (let i = 0; i < 10; i++) { @@ -1568,7 +1569,7 @@ describe('Automerge', () => { assert.strictEqual(decodeSyncMessage(m2).changes.length, 1) // only n2c2; change n2c1 is not sent // n3 is a node that doesn't have the missing change. Nevertheless n1 is going to ask n3 for it - const n3 = create('fedcba98'), s13 = initSyncState(), s31 = initSyncState() + const n3 = create(true, 'fedcba98'), s13 = initSyncState(), s31 = initSyncState() sync(n1, n3, s13, s31) assert.deepStrictEqual(n1.getHeads(), [n1hash2]) assert.deepStrictEqual(n3.getHeads(), [n1hash2]) @@ -1581,7 +1582,7 @@ describe('Automerge', () => { // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c2 is a false positive in the Bloom filter containing {n1c1, n1c2, n1c3}. // lastSync is c4. - let n1 = create('01234567'), n2 = create('89abcdef') + let n1 = create(true, '01234567'), n2 = create(true, '89abcdef') let s1 = initSyncState(), s2 = initSyncState() let n1hash3, n2hash3 @@ -1634,8 +1635,8 @@ describe('Automerge', () => { // `-- n2c1 <-- n2c2 <-- n2c3 // where n2c1 and n2c2 are both false positives in the Bloom filter containing {c5}. // lastSync is c4. - const n1 = create('01234567') - let n2 = create('89abcdef') + const n1 = create(true, '01234567') + let n2 = create(true, '89abcdef') let s1 = initSyncState(), s2 = initSyncState() for (let i = 0; i < 5; i++) { @@ -1675,7 +1676,7 @@ describe('Automerge', () => { // c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ // `-- n2 // where n2 causes a false positive in the Bloom filter containing {n1}. - let n1 = create('01234567'), n2 = create('89abcdef') + let n1 = create(true, '01234567'), n2 = create(true, '89abcdef') let s1 = initSyncState(), s2 = initSyncState() let message @@ -1735,7 +1736,7 @@ describe('Automerge', () => { // n1 has {c0, c1, c2, n1c1, n1c2, n1c3, n2c1, n2c2}; // n2 has {c0, c1, c2, n1c1, n1c2, n2c1, n2c2, n2c3}; // n3 has {c0, c1, c2, n3c1, n3c2, n3c3}. - const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('76543210') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef'), n3 = create(true, '76543210') let s13 = initSyncState() const s12 = initSyncState() const s21 = initSyncState() @@ -1807,7 +1808,7 @@ describe('Automerge', () => { }) it('should allow any change to be requested', () => { - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') const s1 = initSyncState(), s2 = initSyncState() let message = null @@ -1835,7 +1836,7 @@ describe('Automerge', () => { }) it('should ignore requests for a nonexistent change', () => { - const n1 = create('01234567'), n2 = create('89abcdef') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef') const s1 = initSyncState(), s2 = initSyncState() let message = null @@ -1858,7 +1859,7 @@ describe('Automerge', () => { // ,-- c1 <-- c2 // c0 <-+ // `-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 - const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('76543210') + const n1 = create(true, '01234567'), n2 = create(true, '89abcdef'), n3 = create(true, '76543210') let s1 = initSyncState(), s2 = initSyncState() let msg @@ -1930,7 +1931,7 @@ describe('Automerge', () => { }) it('can handle overlappying splices', () => { - const doc = create() + const doc = create(true) doc.enablePatches(true) let mat : any = doc.materialize("/") doc.putObject("/", "text", "abcdefghij") @@ -1941,7 +1942,7 @@ describe('Automerge', () => { }) it('can handle utf16 text', () => { - const doc = create() + const doc = create(true) doc.enablePatches(true) let mat : any = doc.materialize("/") @@ -1957,7 +1958,7 @@ describe('Automerge', () => { mat = doc.applyPatches(mat) - const remote = load(doc.save()) + const remote = load(doc.save(), true) remote.enablePatches(true) let r_mat : any = remote.materialize("/") @@ -2028,7 +2029,7 @@ describe('Automerge', () => { message: null, deps: [] } - const doc = load(encodeChange(change)); + const doc = load(encodeChange(change), true); doc.enablePatches(true) const mat : any = doc.materialize("/") @@ -2068,4 +2069,105 @@ describe('Automerge', () => { assert.deepEqual(doc5.getAll("/bad_text", 2, doc.getHeads()), [['str', 'BBBBB', '3@aaaa' ]]) }) }) + + describe("the legacy text implementation", () => { + const root = "_root" + class FakeText { + elems: Array + constructor(elems: string | Array) { + if (typeof elems === "string") { + this.elems = Array.from(elems) + } else { + this.elems = elems + } + } + } + it("should materialize old style text", () => { + let doc = create(false); + doc.registerDatatype("text", (e: any) => new FakeText(e)) + doc.enablePatches(true) + let txt = doc.putObject(root, "text", "") + doc.splice(txt, 0, 0, "hello") + let mat: any = doc.materialize() + assert.deepEqual(mat.text, new FakeText("hello")) + }) + + it("should apply patches to old style text", () => { + let doc = create(false); + doc.registerDatatype("text", (e: any) => new FakeText(e)) + doc.enablePatches(true) + let mat : any = doc.materialize("/") + doc.putObject("/", "text", "abcdefghij") + doc.splice("/text", 2, 2, "00") + doc.splice("/text", 3, 5, "11") + mat = doc.applyPatches(mat) + assert.deepEqual(mat.text, new FakeText("ab011ij")) + }) + + it("should apply list patches to old style text", () => { + let doc = create(false); + doc.registerDatatype("text", (e: any) => new FakeText(e)) + doc.enablePatches(true) + let mat : any = doc.materialize("/") + doc.putObject("/", "text", "abc") + doc.insert("/text", 0, "0") + doc.insert("/text", 1, "1") + mat = doc.applyPatches(mat) + assert.deepEqual(mat.text, new FakeText("01abc")) + }) + + it("should allow inserting using list methods", () => { + let doc = create(false); + doc.registerDatatype("text", (e: any) => new FakeText(e)) + doc.enablePatches(true) + let mat : any = doc.materialize("/") + const txt = doc.putObject("/", "text", "abc") + doc.insert(txt, 3, "d") + doc.insert(txt, 0, "0") + mat = doc.applyPatches(mat) + assert.deepEqual(mat.text, new FakeText("0abcd")) + }) + + it("should allow inserting objects in old style text", () => { + let doc = create(false); + doc.registerDatatype("text", (e: any) => new FakeText(e)) + doc.enablePatches(true) + let mat : any = doc.materialize("/") + const txt = doc.putObject("/", "text", "abc") + doc.insertObject(txt, 0, {"key": "value"}) + doc.insertObject(txt, 2, ["elem"]) + doc.insert(txt, 2, "m") + mat = doc.applyPatches(mat) + assert.deepEqual(mat.text, new FakeText([ + {"key": "value"}, "a", "m", ["elem"], "b", "c" + ])) + }) + + class RawString { + val: string; + constructor(s: string) { + this.val = s + } + } + + it("should allow registering a different type for strings", () => { + let doc = create(false); + doc.registerDatatype("str", (e: any) => new RawString(e)) + doc.enablePatches(true) + doc.put("/", "key", "value") + let mat: any = doc.materialize() + assert.deepStrictEqual(mat.key, new RawString("value")) + }) + + it("should generate patches correctly for raw strings", () => { + let doc = create(false); + doc.registerDatatype("str", (e: any) => new RawString(e)) + doc.enablePatches(true) + let mat: any = doc.materialize() + doc.put("/", "key", "value") + mat = doc.applyPatches(mat) + assert.deepStrictEqual(mat.key, new RawString("value")) + }) + + }) }) diff --git a/rust/automerge/src/op_observer.rs b/rust/automerge/src/op_observer.rs index 2150b1de..0d082219 100644 --- a/rust/automerge/src/op_observer.rs +++ b/rust/automerge/src/op_observer.rs @@ -114,6 +114,13 @@ pub trait OpObserver { /// /// - `other`: Another Op Observer of the same type fn merge(&mut self, other: &Self); + + /// Whether to call sequence methods or `splice_text` when encountering changes in text + /// + /// Returns `false` by default + fn text_as_seq(&self) -> bool { + false + } } impl OpObserver for () { diff --git a/rust/automerge/src/transaction/inner.rs b/rust/automerge/src/transaction/inner.rs index 2099acef..cba4e723 100644 --- a/rust/automerge/src/transaction/inner.rs +++ b/rust/automerge/src/transaction/inner.rs @@ -198,6 +198,7 @@ impl TransactionInner { match (&prop, obj_type) { (Prop::Map(_), ObjType::Map) => Ok(()), (Prop::Seq(_), ObjType::List) => Ok(()), + (Prop::Seq(_), ObjType::Text) => Ok(()), _ => Err(AutomergeError::InvalidOp(obj_type)), }?; self.local_op(doc, op_observer, obj, prop, value.into())?; @@ -294,7 +295,7 @@ impl TransactionInner { value: V, ) -> Result<(), AutomergeError> { let (obj, obj_type) = doc.exid_to_obj(ex_obj)?; - if obj_type != ObjType::List { + if !matches!(obj_type, ObjType::List | ObjType::Text) { return Err(AutomergeError::InvalidOp(obj_type)); } let value = value.into(); @@ -312,7 +313,7 @@ impl TransactionInner { value: ObjType, ) -> Result { let (obj, obj_type) = doc.exid_to_obj(ex_obj)?; - if obj_type != ObjType::List { + if !matches!(obj_type, ObjType::List | ObjType::Text) { return Err(AutomergeError::InvalidOp(obj_type)); } let id = self.do_insert(doc, op_observer, obj, index, value.into())?; @@ -510,7 +511,7 @@ impl TransactionInner { vals: impl IntoIterator, ) -> Result<(), AutomergeError> { let (obj, obj_type) = doc.exid_to_obj(ex_obj)?; - if obj_type != ObjType::List { + if !matches!(obj_type, ObjType::List | ObjType::Text) { return Err(AutomergeError::InvalidOp(obj_type)); } let values = vals.into_iter().collect(); @@ -631,7 +632,10 @@ impl TransactionInner { // handle the observer if let Some(obs) = op_observer.as_mut() { match splice_type { - SpliceType::List => { + SpliceType::Text(text, _) if !obs.text_as_seq() => { + obs.splice_text(doc, ex_obj, index, text) + } + SpliceType::List | SpliceType::Text(..) => { let start = self.operations.len() - values.len(); for (offset, v) in values.iter().enumerate() { let op = &self.operations[start + offset].1; @@ -639,7 +643,6 @@ impl TransactionInner { obs.insert(doc, ex_obj.clone(), index + offset, value) } } - SpliceType::Text(text, _) => obs.splice_text(doc, ex_obj, index, text), } } } @@ -668,7 +671,12 @@ impl TransactionInner { } (Some(ObjType::Text), Prop::Seq(index)) => { // FIXME - op_observer.splice_text(doc, ex_obj, index, op.to_str()) + if op_observer.text_as_seq() { + let value = (op.value(), doc.ops.id_to_exid(op.id)); + op_observer.insert(doc, ex_obj, index, value) + } else { + op_observer.splice_text(doc, ex_obj, index, op.to_str()) + } } _ => {} } diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index 069a664d..6ab797f0 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -1393,8 +1393,8 @@ fn ops_on_wrong_objets() -> Result<(), AutomergeError> { doc.splice_text(&text, 0, 0, "hello world")?; let e5 = doc.put(&text, "a", "AAA"); assert_eq!(e5, Err(AutomergeError::InvalidOp(ObjType::Text))); - let e6 = doc.insert(&text, 0, "b"); - assert_eq!(e6, Err(AutomergeError::InvalidOp(ObjType::Text))); + //let e6 = doc.insert(&text, 0, "b"); + //assert_eq!(e6, Err(AutomergeError::InvalidOp(ObjType::Text))); Ok(()) } From 6c0d102032c066166cc4dab7770360d51d67504e Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 22 Dec 2022 09:17:10 +0000 Subject: [PATCH 19/72] automerge-js: Add backwards compatibility text layer The new text features are faster and more ergonomic but not backwards compatible. In order to make them backwards compatible re-expose the original functionality and move the new API under a `future` export. This allows users to interoperably use both implementations. --- javascript/.gitignore | 1 + javascript/config/cjs.json | 1 + javascript/config/mjs.json | 1 + javascript/package.json | 6 +- javascript/src/counter.ts | 6 +- javascript/src/index.ts | 1001 +------------------- javascript/src/internal_state.ts | 43 + javascript/src/proxies.ts | 343 +++++-- javascript/src/raw_string.ts | 6 + javascript/src/stable.ts | 955 +++++++++++++++++++ javascript/src/text.ts | 218 +++++ javascript/src/types.ts | 26 + javascript/src/unstable.ts | 292 ++++++ javascript/test/basic_test.ts | 2 +- javascript/test/extra_api_tests.ts | 2 +- javascript/test/legacy_tests.ts | 2 +- javascript/test/stable_unstable_interop.ts | 41 + javascript/test/text_test.ts | 2 +- javascript/test/text_v1.ts | 281 ++++++ 19 files changed, 2159 insertions(+), 1070 deletions(-) create mode 100644 javascript/src/internal_state.ts create mode 100644 javascript/src/raw_string.ts create mode 100644 javascript/src/stable.ts create mode 100644 javascript/src/text.ts create mode 100644 javascript/src/unstable.ts create mode 100644 javascript/test/stable_unstable_interop.ts create mode 100644 javascript/test/text_v1.ts diff --git a/javascript/.gitignore b/javascript/.gitignore index bf2aad08..ab4ec70d 100644 --- a/javascript/.gitignore +++ b/javascript/.gitignore @@ -2,3 +2,4 @@ /yarn.lock dist docs/ +.vim diff --git a/javascript/config/cjs.json b/javascript/config/cjs.json index 9cfceed5..fc500311 100644 --- a/javascript/config/cjs.json +++ b/javascript/config/cjs.json @@ -1,5 +1,6 @@ { "extends": "../tsconfig.json", + "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], "compilerOptions": { "outDir": "../dist/cjs" } diff --git a/javascript/config/mjs.json b/javascript/config/mjs.json index 5b02ee0e..2ee7a8b8 100644 --- a/javascript/config/mjs.json +++ b/javascript/config/mjs.json @@ -1,5 +1,6 @@ { "extends": "../tsconfig.json", + "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], "compilerOptions": { "target": "es6", "module": "es6", diff --git a/javascript/package.json b/javascript/package.json index b7afb5b7..33523370 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -21,17 +21,21 @@ "dist/cjs/uuid.js", "dist/cjs/counter.js", "dist/cjs/low_level.js", + "dist/cjs/next.js", "dist/cjs/text.js", "dist/cjs/proxies.js", + "dist/cjs/raw_string.js", "dist/mjs/constants.js", "dist/mjs/types.js", "dist/mjs/numbers.js", + "dist/mjs/next.js", "dist/mjs/index.js", "dist/mjs/uuid.js", "dist/mjs/counter.js", "dist/mjs/low_level.js", "dist/mjs/text.js", - "dist/mjs/proxies.js" + "dist/mjs/proxies.js", + "dist/mjs/raw_string.js" ], "types": "./dist/index.d.ts", "module": "./dist/mjs/index.js", diff --git a/javascript/src/counter.ts b/javascript/src/counter.ts index d94a3034..6b9ad277 100644 --- a/javascript/src/counter.ts +++ b/javascript/src/counter.ts @@ -49,14 +49,14 @@ export class Counter { */ class WriteableCounter extends Counter { context: Automerge - path: string[] + path: Prop[] objectId: ObjID key: Prop constructor( value: number, context: Automerge, - path: string[], + path: Prop[], objectId: ObjID, key: Prop ) { @@ -97,7 +97,7 @@ class WriteableCounter extends Counter { export function getWriteableCounter( value: number, context: Automerge, - path: string[], + path: Prop[], objectId: ObjID, key: Prop ) { diff --git a/javascript/src/index.ts b/javascript/src/index.ts index a5b3a0bb..7d4a68ba 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -1,998 +1,3 @@ -/** @hidden **/ -export { /** @hidden */ uuid } from "./uuid" - -import { rootProxy, listProxy, mapProxy } from "./proxies" -import { STATE, TRACE, IS_PROXY, OBJECT_ID } from "./constants" - -import { AutomergeValue, Counter } from "./types" -export { - AutomergeValue, - Counter, - Int, - Uint, - Float64, - ScalarValue, -} from "./types" - -import { type API, type Patch } from "@automerge/automerge-wasm" -export { - type Patch, - PutPatch, - DelPatch, - SplicePatch, - IncPatch, - SyncMessage, -} from "@automerge/automerge-wasm" -import { ApiHandler, ChangeToEncode, UseApi } from "./low_level" - -import { - Actor as ActorId, - Prop, - ObjID, - Change, - DecodedChange, - Heads, - Automerge, - MaterializeValue, -} from "@automerge/automerge-wasm" -import { - JsSyncState as SyncState, - SyncMessage, - DecodedSyncMessage, -} from "@automerge/automerge-wasm" - -/** Options passed to {@link change}, and {@link emptyChange} - * @typeParam T - The type of value contained in the document - */ -export type ChangeOptions = { - /** A message which describes the changes */ - message?: string - /** The unix timestamp of the change (purely advisory, not used in conflict resolution) */ - time?: number - /** A callback which will be called to notify the caller of any changes to the document */ - patchCallback?: PatchCallback -} - -/** Options passed to {@link loadIncremental}, {@link applyChanges}, and {@link receiveSyncMessage} - * @typeParam T - The type of value contained in the document - */ -export type ApplyOptions = { patchCallback?: PatchCallback } - -/** - * An automerge document. - * @typeParam T - The type of the value contained in this document - * - * Note that this provides read only access to the fields of the value. To - * modify the value use {@link change} - */ -export type Doc = { readonly [P in keyof T]: T[P] } - -/** - * Function which is called by {@link change} when making changes to a `Doc` - * @typeParam T - The type of value contained in the document - * - * This function may mutate `doc` - */ -export type ChangeFn = (doc: T) => void - -/** - * Callback which is called by various methods in this library to notify the - * user of what changes have been made. - * @param patch - A description of the changes made - * @param before - The document before the change was made - * @param after - The document after the change was made - */ -export type PatchCallback = ( - patches: Array, - before: Doc, - after: Doc -) => void - -/** @hidden **/ -export interface State { - change: DecodedChange - snapshot: T -} - -/** @hidden **/ -export function use(api: API) { - UseApi(api) -} - -import * as wasm from "@automerge/automerge-wasm" -use(wasm) - -/** - * Options to be passed to {@link init} or {@link load} - * @typeParam T - The type of the value the document contains - */ -export type InitOptions = { - /** The actor ID to use for this document, a random one will be generated if `null` is passed */ - actor?: ActorId - freeze?: boolean - /** A callback which will be called with the initial patch once the document has finished loading */ - patchCallback?: PatchCallback -} - -interface InternalState { - handle: Automerge - heads: Heads | undefined - freeze: boolean - patchCallback?: PatchCallback -} - -/** @hidden */ -export function getBackend(doc: Doc): Automerge { - return _state(doc).handle -} - -function _state(doc: Doc, checkroot = true): InternalState { - if (typeof doc !== "object") { - throw new RangeError("must be the document root") - } - const state = Reflect.get(doc, STATE) as InternalState - if ( - state === undefined || - state == null || - (checkroot && _obj(doc) !== "_root") - ) { - throw new RangeError("must be the document root") - } - return state -} - -function _trace(doc: Doc): string | undefined { - return Reflect.get(doc, TRACE) as string -} - -function _obj(doc: Doc): ObjID | null { - if (!(typeof doc === "object") || doc === null) { - return null - } - return Reflect.get(doc, OBJECT_ID) as ObjID -} - -function _is_proxy(doc: Doc): boolean { - return !!Reflect.get(doc, IS_PROXY) -} - -function importOpts(_actor?: ActorId | InitOptions): InitOptions { - if (typeof _actor === "object") { - return _actor - } else { - return { actor: _actor } - } -} - -/** - * Create a new automerge document - * - * @typeParam T - The type of value contained in the document. This will be the - * type that is passed to the change closure in {@link change} - * @param _opts - Either an actorId or an {@link InitOptions} (which may - * contain an actorId). If this is null the document will be initialised with a - * random actor ID - */ -export function init(_opts?: ActorId | InitOptions): Doc { - const opts = importOpts(_opts) - const freeze = !!opts.freeze - const patchCallback = opts.patchCallback - const handle = ApiHandler.create(true, opts.actor) - handle.enablePatches(true) - handle.enableFreeze(!!opts.freeze) - handle.registerDatatype("counter", n => new Counter(n)) - const doc = handle.materialize("/", undefined, { - handle, - heads: undefined, - freeze, - patchCallback, - }) as Doc - return doc -} - -/** - * Make an immutable view of an automerge document as at `heads` - * - * @remarks - * The document returned from this function cannot be passed to {@link change}. - * This is because it shares the same underlying memory as `doc`, but it is - * consequently a very cheap copy. - * - * Note that this function will throw an error if any of the hashes in `heads` - * are not in the document. - * - * @typeParam T - The type of the value contained in the document - * @param doc - The document to create a view of - * @param heads - The hashes of the heads to create a view at - */ -export function view(doc: Doc, heads: Heads): Doc { - const state = _state(doc) - const handle = state.handle - return state.handle.materialize("/", heads, { - ...state, - handle, - heads, - }) as Doc -} - -/** - * Make a full writable copy of an automerge document - * - * @remarks - * Unlike {@link view} this function makes a full copy of the memory backing - * the document and can thus be passed to {@link change}. It also generates a - * new actor ID so that changes made in the new document do not create duplicate - * sequence numbers with respect to the old document. If you need control over - * the actor ID which is generated you can pass the actor ID as the second - * argument - * - * @typeParam T - The type of the value contained in the document - * @param doc - The document to clone - * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} - */ -export function clone( - doc: Doc, - _opts?: ActorId | InitOptions -): Doc { - const state = _state(doc) - const heads = state.heads - const opts = importOpts(_opts) - const handle = state.handle.fork(opts.actor, heads) - - // `change` uses the presence of state.heads to determine if we are in a view - // set it to undefined to indicate that this is a full fat document - const { heads: oldHeads, ...stateSansHeads } = state - return handle.applyPatches(doc, { ...stateSansHeads, handle }) -} - -/** Explicity free the memory backing a document. Note that this is note - * necessary in environments which support - * [`FinalizationRegistry`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry) - */ -export function free(doc: Doc) { - return _state(doc).handle.free() -} - -/** - * Create an automerge document from a POJO - * - * @param initialState - The initial state which will be copied into the document - * @typeParam T - The type of the value passed to `from` _and_ the type the resulting document will contain - * @typeParam actor - The actor ID of the resulting document, if this is null a random actor ID will be used - * - * @example - * ``` - * const doc = automerge.from({ - * tasks: [ - * {description: "feed dogs", done: false} - * ] - * }) - * ``` - */ -export function from>( - initialState: T | Doc, - _opts?: ActorId | InitOptions -): Doc { - return change(init(_opts), d => Object.assign(d, initialState)) -} - -/** - * Update the contents of an automerge document - * @typeParam T - The type of the value contained in the document - * @param doc - The document to update - * @param options - Either a message, an {@link ChangeOptions}, or a {@link ChangeFn} - * @param callback - A `ChangeFn` to be used if `options` was a `string` - * - * Note that if the second argument is a function it will be used as the `ChangeFn` regardless of what the third argument is. - * - * @example A simple change - * ``` - * let doc1 = automerge.init() - * doc1 = automerge.change(doc1, d => { - * d.key = "value" - * }) - * assert.equal(doc1.key, "value") - * ``` - * - * @example A change with a message - * - * ``` - * doc1 = automerge.change(doc1, "add another value", d => { - * d.key2 = "value2" - * }) - * ``` - * - * @example A change with a message and a timestamp - * - * ``` - * doc1 = automerge.change(doc1, {message: "add another value", timestamp: 1640995200}, d => { - * d.key2 = "value2" - * }) - * ``` - * - * @example responding to a patch callback - * ``` - * let patchedPath - * let patchCallback = patch => { - * patchedPath = patch.path - * } - * doc1 = automerge.change(doc1, {message, "add another value", timestamp: 1640995200, patchCallback}, d => { - * d.key2 = "value2" - * }) - * assert.equal(patchedPath, ["key2"]) - * ``` - */ -export function change( - doc: Doc, - options: string | ChangeOptions | ChangeFn, - callback?: ChangeFn -): Doc { - if (typeof options === "function") { - return _change(doc, {}, options) - } else if (typeof callback === "function") { - if (typeof options === "string") { - options = { message: options } - } - return _change(doc, options, callback) - } else { - throw RangeError("Invalid args for change") - } -} - -function progressDocument( - doc: Doc, - heads: Heads | null, - callback?: PatchCallback -): Doc { - if (heads == null) { - return doc - } - const state = _state(doc) - const nextState = { ...state, heads: undefined } - const nextDoc = state.handle.applyPatches(doc, nextState, callback) - state.heads = heads - return nextDoc -} - -function _change( - doc: Doc, - options: ChangeOptions, - callback: ChangeFn -): Doc { - if (typeof callback !== "function") { - throw new RangeError("invalid change function") - } - - const state = _state(doc) - - if (doc === undefined || state === undefined) { - throw new RangeError("must be the document root") - } - if (state.heads) { - throw new RangeError( - "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." - ) - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - try { - state.heads = heads - const root: T = rootProxy(state.handle) - callback(root) - if (state.handle.pendingOps() === 0) { - state.heads = undefined - return doc - } else { - state.handle.commit(options.message, options.time) - return progressDocument( - doc, - heads, - options.patchCallback || state.patchCallback - ) - } - } catch (e) { - //console.log("ERROR: ",e) - state.heads = undefined - state.handle.rollback() - throw e - } -} - -/** - * Make a change to a document which does not modify the document - * - * @param doc - The doc to add the empty change to - * @param options - Either a message or a {@link ChangeOptions} for the new change - * - * Why would you want to do this? One reason might be that you have merged - * changes from some other peers and you want to generate a change which - * depends on those merged changes so that you can sign the new change with all - * of the merged changes as part of the new change. - */ -export function emptyChange( - doc: Doc, - options: string | ChangeOptions | void -) { - if (options === undefined) { - options = {} - } - if (typeof options === "string") { - options = { message: options } - } - - const state = _state(doc) - - if (state.heads) { - throw new RangeError( - "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." - ) - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - - const heads = state.handle.getHeads() - state.handle.emptyChange(options.message, options.time) - return progressDocument(doc, heads) -} - -/** - * Load an automerge document from a compressed document produce by {@link save} - * - * @typeParam T - The type of the value which is contained in the document. - * Note that no validation is done to make sure this type is in - * fact the type of the contained value so be a bit careful - * @param data - The compressed document - * @param _opts - Either an actor ID or some {@link InitOptions}, if the actor - * ID is null a random actor ID will be created - * - * Note that `load` will throw an error if passed incomplete content (for - * example if you are receiving content over the network and don't know if you - * have the complete document yet). If you need to handle incomplete content use - * {@link init} followed by {@link loadIncremental}. - */ -export function load( - data: Uint8Array, - _opts?: ActorId | InitOptions -): Doc { - const opts = importOpts(_opts) - const actor = opts.actor - const patchCallback = opts.patchCallback - const handle = ApiHandler.load(data, true, actor) - handle.enablePatches(true) - handle.enableFreeze(!!opts.freeze) - handle.registerDatatype("counter", n => new Counter(n)) - const doc = handle.materialize("/", undefined, { - handle, - heads: undefined, - patchCallback, - }) as Doc - return doc -} - -/** - * Load changes produced by {@link saveIncremental}, or partial changes - * - * @typeParam T - The type of the value which is contained in the document. - * Note that no validation is done to make sure this type is in - * fact the type of the contained value so be a bit careful - * @param data - The compressedchanges - * @param opts - an {@link ApplyOptions} - * - * This function is useful when staying up to date with a connected peer. - * Perhaps the other end sent you a full compresed document which you loaded - * with {@link load} and they're sending you the result of - * {@link getLastLocalChange} every time they make a change. - * - * Note that this function will succesfully load the results of {@link save} as - * well as {@link getLastLocalChange} or any other incremental change. - */ -export function loadIncremental( - doc: Doc, - data: Uint8Array, - opts?: ApplyOptions -): Doc { - if (!opts) { - opts = {} - } - const state = _state(doc) - if (state.heads) { - throw new RangeError( - "Attempting to change an out of date document - set at: " + _trace(doc) - ) - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - state.handle.loadIncremental(data) - return progressDocument(doc, heads, opts.patchCallback || state.patchCallback) -} - -/** - * Export the contents of a document to a compressed format - * - * @param doc - The doc to save - * - * The returned bytes can be passed to {@link load} or {@link loadIncremental} - */ -export function save(doc: Doc): Uint8Array { - return _state(doc).handle.save() -} - -/** - * Merge `local` into `remote` - * @typeParam T - The type of values contained in each document - * @param local - The document to merge changes into - * @param remote - The document to merge changes from - * - * @returns - The merged document - * - * Often when you are merging documents you will also need to clone them. Both - * arguments to `merge` are frozen after the call so you can no longer call - * mutating methods (such as {@link change}) on them. The symtom of this will be - * an error which says "Attempting to change an out of date document". To - * overcome this call {@link clone} on the argument before passing it to {@link - * merge}. - */ -export function merge(local: Doc, remote: Doc): Doc { - const localState = _state(local) - - if (localState.heads) { - throw new RangeError( - "Attempting to change an out of date document - set at: " + _trace(local) - ) - } - const heads = localState.handle.getHeads() - const remoteState = _state(remote) - const changes = localState.handle.getChangesAdded(remoteState.handle) - localState.handle.applyChanges(changes) - return progressDocument(local, heads, localState.patchCallback) -} - -/** - * Get the actor ID associated with the document - */ -export function getActorId(doc: Doc): ActorId { - const state = _state(doc) - return state.handle.getActorId() -} - -/** - * The type of conflicts for particular key or index - * - * Maps and sequences in automerge can contain conflicting values for a - * particular key or index. In this case {@link getConflicts} can be used to - * obtain a `Conflicts` representing the multiple values present for the property - * - * A `Conflicts` is a map from a unique (per property or index) key to one of - * the possible conflicting values for the given property. - */ -type Conflicts = { [key: string]: AutomergeValue } - -function conflictAt( - context: Automerge, - objectId: ObjID, - prop: Prop -): Conflicts | undefined { - const values = context.getAll(objectId, prop) - if (values.length <= 1) { - return - } - const result: Conflicts = {} - for (const fullVal of values) { - switch (fullVal[0]) { - case "map": - result[fullVal[1]] = mapProxy(context, fullVal[1], [prop], true) - break - case "list": - result[fullVal[1]] = listProxy(context, fullVal[1], [prop], true) - break - case "text": - result[fullVal[1]] = context.text(fullVal[1]) - break - //case "table": - //case "cursor": - case "str": - case "uint": - case "int": - case "f64": - case "boolean": - case "bytes": - case "null": - result[fullVal[2]] = fullVal[1] - break - case "counter": - result[fullVal[2]] = new Counter(fullVal[1]) - break - case "timestamp": - result[fullVal[2]] = new Date(fullVal[1]) - break - default: - throw RangeError(`datatype ${fullVal[0]} unimplemented`) - } - } - return result -} - -/** - * Get the conflicts associated with a property - * - * The values of properties in a map in automerge can be conflicted if there - * are concurrent "put" operations to the same key. Automerge chooses one value - * arbitrarily (but deterministically, any two nodes who have the same set of - * changes will choose the same value) from the set of conflicting values to - * present as the value of the key. - * - * Sometimes you may want to examine these conflicts, in this case you can use - * {@link getConflicts} to get the conflicts for the key. - * - * @example - * ``` - * import * as automerge from "@automerge/automerge" - * - * type Profile = { - * pets: Array<{name: string, type: string}> - * } - * - * let doc1 = automerge.init("aaaa") - * doc1 = automerge.change(doc1, d => { - * d.pets = [{name: "Lassie", type: "dog"}] - * }) - * let doc2 = automerge.init("bbbb") - * doc2 = automerge.merge(doc2, automerge.clone(doc1)) - * - * doc2 = automerge.change(doc2, d => { - * d.pets[0].name = "Beethoven" - * }) - * - * doc1 = automerge.change(doc1, d => { - * d.pets[0].name = "Babe" - * }) - * - * const doc3 = automerge.merge(doc1, doc2) - * - * // Note that here we pass `doc3.pets`, not `doc3` - * let conflicts = automerge.getConflicts(doc3.pets[0], "name") - * - * // The two conflicting values are the keys of the conflicts object - * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) - * ``` - */ -export function getConflicts( - doc: Doc, - prop: Prop -): Conflicts | undefined { - const state = _state(doc, false) - const objectId = _obj(doc) - if (objectId != null) { - return conflictAt(state.handle, objectId, prop) - } else { - return undefined - } -} - -/** - * Get the binary representation of the last change which was made to this doc - * - * This is most useful when staying in sync with other peers, every time you - * make a change locally via {@link change} you immediately call {@link - * getLastLocalChange} and send the result over the network to other peers. - */ -export function getLastLocalChange(doc: Doc): Change | undefined { - const state = _state(doc) - return state.handle.getLastLocalChange() || undefined -} - -/** - * Return the object ID of an arbitrary javascript value - * - * This is useful to determine if something is actually an automerge document, - * if `doc` is not an automerge document this will return null. - */ -export function getObjectId(doc: Doc, prop?: Prop): ObjID | null { - if (prop) { - const state = _state(doc, false) - const objectId = _obj(doc) - if (!state || !objectId) { - return null - } - return state.handle.get(objectId, prop) as ObjID - } else { - return _obj(doc) - } -} - -/** - * Get the changes which are in `newState` but not in `oldState`. The returned - * changes can be loaded in `oldState` via {@link applyChanges}. - * - * Note that this will crash if there are changes in `oldState` which are not in `newState`. - */ -export function getChanges(oldState: Doc, newState: Doc): Change[] { - const n = _state(newState) - return n.handle.getChanges(getHeads(oldState)) -} - -/** - * Get all the changes in a document - * - * This is different to {@link save} because the output is an array of changes - * which can be individually applied via {@link applyChanges}` - * - */ -export function getAllChanges(doc: Doc): Change[] { - const state = _state(doc) - return state.handle.getChanges([]) -} - -/** - * Apply changes received from another document - * - * `doc` will be updated to reflect the `changes`. If there are changes which - * we do not have dependencies for yet those will be stored in the document and - * applied when the depended on changes arrive. - * - * You can use the {@link ApplyOptions} to pass a patchcallback which will be - * informed of any changes which occur as a result of applying the changes - * - */ -export function applyChanges( - doc: Doc, - changes: Change[], - opts?: ApplyOptions -): [Doc] { - const state = _state(doc) - if (!opts) { - opts = {} - } - if (state.heads) { - throw new RangeError( - "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." - ) - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - state.handle.applyChanges(changes) - state.heads = heads - return [ - progressDocument(doc, heads, opts.patchCallback || state.patchCallback), - ] -} - -/** @hidden */ -export function getHistory(doc: Doc): State[] { - const history = getAllChanges(doc) - return history.map((change, index) => ({ - get change() { - return decodeChange(change) - }, - get snapshot() { - const [state] = applyChanges(init(), history.slice(0, index + 1)) - return state - }, - })) -} - -/** @hidden */ -// FIXME : no tests -// FIXME can we just use deep equals now? -export function equals(val1: unknown, val2: unknown): boolean { - if (!isObject(val1) || !isObject(val2)) return val1 === val2 - const keys1 = Object.keys(val1).sort(), - keys2 = Object.keys(val2).sort() - if (keys1.length !== keys2.length) return false - for (let i = 0; i < keys1.length; i++) { - if (keys1[i] !== keys2[i]) return false - if (!equals(val1[keys1[i]], val2[keys2[i]])) return false - } - return true -} - -/** - * encode a {@link SyncState} into binary to send over the network - * - * @group sync - * */ -export function encodeSyncState(state: SyncState): Uint8Array { - const sync = ApiHandler.importSyncState(state) - const result = ApiHandler.encodeSyncState(sync) - sync.free() - return result -} - -/** - * Decode some binary data into a {@link SyncState} - * - * @group sync - */ -export function decodeSyncState(state: Uint8Array): SyncState { - const sync = ApiHandler.decodeSyncState(state) - const result = ApiHandler.exportSyncState(sync) - sync.free() - return result -} - -/** - * Generate a sync message to send to the peer represented by `inState` - * @param doc - The doc to generate messages about - * @param inState - The {@link SyncState} representing the peer we are talking to - * - * @group sync - * - * @returns An array of `[newSyncState, syncMessage | null]` where - * `newSyncState` should replace `inState` and `syncMessage` should be sent to - * the peer if it is not null. If `syncMessage` is null then we are up to date. - */ -export function generateSyncMessage( - doc: Doc, - inState: SyncState -): [SyncState, SyncMessage | null] { - const state = _state(doc) - const syncState = ApiHandler.importSyncState(inState) - const message = state.handle.generateSyncMessage(syncState) - const outState = ApiHandler.exportSyncState(syncState) - return [outState, message] -} - -/** - * Update a document and our sync state on receiving a sync message - * - * @group sync - * - * @param doc - The doc the sync message is about - * @param inState - The {@link SyncState} for the peer we are communicating with - * @param message - The message which was received - * @param opts - Any {@link ApplyOption}s, used for passing a - * {@link PatchCallback} which will be informed of any changes - * in `doc` which occur because of the received sync message. - * - * @returns An array of `[newDoc, newSyncState, syncMessage | null]` where - * `newDoc` is the updated state of `doc`, `newSyncState` should replace - * `inState` and `syncMessage` should be sent to the peer if it is not null. If - * `syncMessage` is null then we are up to date. - */ -export function receiveSyncMessage( - doc: Doc, - inState: SyncState, - message: SyncMessage, - opts?: ApplyOptions -): [Doc, SyncState, null] { - const syncState = ApiHandler.importSyncState(inState) - if (!opts) { - opts = {} - } - const state = _state(doc) - if (state.heads) { - throw new RangeError( - "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." - ) - } - if (_is_proxy(doc)) { - throw new RangeError("Calls to Automerge.change cannot be nested") - } - const heads = state.handle.getHeads() - state.handle.receiveSyncMessage(syncState, message) - const outSyncState = ApiHandler.exportSyncState(syncState) - return [ - progressDocument(doc, heads, opts.patchCallback || state.patchCallback), - outSyncState, - null, - ] -} - -/** - * Create a new, blank {@link SyncState} - * - * When communicating with a peer for the first time use this to generate a new - * {@link SyncState} for them - * - * @group sync - */ -export function initSyncState(): SyncState { - return ApiHandler.exportSyncState(ApiHandler.initSyncState()) -} - -/** @hidden */ -export function encodeChange(change: ChangeToEncode): Change { - return ApiHandler.encodeChange(change) -} - -/** @hidden */ -export function decodeChange(data: Change): DecodedChange { - return ApiHandler.decodeChange(data) -} - -/** @hidden */ -export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { - return ApiHandler.encodeSyncMessage(message) -} - -/** @hidden */ -export function decodeSyncMessage(message: SyncMessage): DecodedSyncMessage { - return ApiHandler.decodeSyncMessage(message) -} - -/** - * Get any changes in `doc` which are not dependencies of `heads` - */ -export function getMissingDeps(doc: Doc, heads: Heads): Heads { - const state = _state(doc) - return state.handle.getMissingDeps(heads) -} - -export function splice( - doc: Doc, - prop: Prop, - index: number, - del: number, - newText?: string -) { - if (!_is_proxy(doc)) { - throw new RangeError("object cannot be modified outside of a change block") - } - const state = _state(doc, false) - const objectId = _obj(doc) - if (!objectId) { - throw new RangeError("invalid object for splice") - } - const value = `${objectId}/${prop}` - try { - return state.handle.splice(value, index, del, newText) - } catch (e) { - throw new RangeError(`Cannot splice: ${e}`) - } -} - -/** - * Get the hashes of the heads of this document - */ -export function getHeads(doc: Doc): Heads { - const state = _state(doc) - return state.heads || state.handle.getHeads() -} - -/** @hidden */ -export function dump(doc: Doc) { - const state = _state(doc) - state.handle.dump() -} - -/** @hidden */ -export function toJS(doc: Doc): T { - const state = _state(doc) - const enabled = state.handle.enableFreeze(false) - const result = state.handle.materialize() - state.handle.enableFreeze(enabled) - return result as T -} - -export function isAutomerge(doc: unknown): boolean { - if (typeof doc == "object" && doc !== null) { - return getObjectId(doc) === "_root" && !!Reflect.get(doc, STATE) - } else { - return false - } -} - -function isObject(obj: unknown): obj is Record { - return typeof obj === "object" && obj !== null -} - -export type { - API, - SyncState, - ActorId, - Conflicts, - Prop, - Change, - ObjID, - DecodedChange, - DecodedSyncMessage, - Heads, - MaterializeValue, -} +export * from "./stable" +import * as unstable from "./unstable" +export { unstable } diff --git a/javascript/src/internal_state.ts b/javascript/src/internal_state.ts new file mode 100644 index 00000000..92ab648e --- /dev/null +++ b/javascript/src/internal_state.ts @@ -0,0 +1,43 @@ +import { ObjID, Heads, Automerge } from "@automerge/automerge-wasm" + +import { STATE, OBJECT_ID, TRACE, IS_PROXY } from "./constants" + +import { type Doc, PatchCallback } from "./types" + +export interface InternalState { + handle: Automerge + heads: Heads | undefined + freeze: boolean + patchCallback?: PatchCallback + textV2: boolean +} + +export function _state(doc: Doc, checkroot = true): InternalState { + if (typeof doc !== "object") { + throw new RangeError("must be the document root") + } + const state = Reflect.get(doc, STATE) as InternalState + if ( + state === undefined || + state == null || + (checkroot && _obj(doc) !== "_root") + ) { + throw new RangeError("must be the document root") + } + return state +} + +export function _trace(doc: Doc): string | undefined { + return Reflect.get(doc, TRACE) as string +} + +export function _obj(doc: Doc): ObjID | null { + if (!(typeof doc === "object") || doc === null) { + return null + } + return Reflect.get(doc, OBJECT_ID) as ObjID +} + +export function _is_proxy(doc: Doc): boolean { + return !!Reflect.get(doc, IS_PROXY) +} diff --git a/javascript/src/proxies.ts b/javascript/src/proxies.ts index 523c4547..3fb3a825 100644 --- a/javascript/src/proxies.ts +++ b/javascript/src/proxies.ts @@ -1,6 +1,13 @@ +import { Text } from "./text" import { Automerge, Heads, ObjID } from "@automerge/automerge-wasm" import { Prop } from "@automerge/automerge-wasm" -import { AutomergeValue, ScalarValue, MapValue, ListValue } from "./types" +import { + AutomergeValue, + ScalarValue, + MapValue, + ListValue, + TextValue, +} from "./types" import { Counter, getWriteableCounter } from "./counter" import { STATE, @@ -12,6 +19,19 @@ import { UINT, F64, } from "./constants" +import { RawString } from "./raw_string" + +type Target = { + context: Automerge + objectId: ObjID + path: Array + readonly: boolean + heads?: Array + cache: {} + trace?: any + frozen: boolean + textV2: boolean +} function parseListIndex(key) { if (typeof key === "string" && /^[0-9]+$/.test(key)) key = parseInt(key, 10) @@ -24,8 +44,8 @@ function parseListIndex(key) { return key } -function valueAt(target, prop: Prop): AutomergeValue | undefined { - const { context, objectId, path, readonly, heads } = target +function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { + const { context, objectId, path, readonly, heads, textV2 } = target const value = context.getWithType(objectId, prop, heads) if (value === null) { return @@ -36,11 +56,35 @@ function valueAt(target, prop: Prop): AutomergeValue | undefined { case undefined: return case "map": - return mapProxy(context, val, [...path, prop], readonly, heads) + return mapProxy( + context, + val as ObjID, + textV2, + [...path, prop], + readonly, + heads + ) case "list": - return listProxy(context, val, [...path, prop], readonly, heads) + return listProxy( + context, + val as ObjID, + textV2, + [...path, prop], + readonly, + heads + ) case "text": - return context.text(val, heads) + if (textV2) { + return context.text(val as ObjID, heads) + } else { + return textProxy( + context, + val as ObjID, + [...path, prop], + readonly, + heads + ) + } case "str": return val case "uint": @@ -59,9 +103,9 @@ function valueAt(target, prop: Prop): AutomergeValue | undefined { return val case "counter": { if (readonly) { - return new Counter(val) + return new Counter(val as number) } else { - return getWriteableCounter(val, context, path, objectId, prop) + return getWriteableCounter(val as number, context, path, objectId, prop) } } default: @@ -69,7 +113,7 @@ function valueAt(target, prop: Prop): AutomergeValue | undefined { } } -function import_value(value) { +function import_value(value: any, textV2: boolean) { switch (typeof value) { case "object": if (value == null) { @@ -84,6 +128,10 @@ function import_value(value) { return [value.value, "counter"] } else if (value instanceof Date) { return [value.getTime(), "timestamp"] + } else if (value instanceof RawString) { + return [value.val, "str"] + } else if (value instanceof Text) { + return [value, "text"] } else if (value instanceof Uint8Array) { return [value, "bytes"] } else if (value instanceof Array) { @@ -97,7 +145,6 @@ function import_value(value) { } else { throw new RangeError(`Cannot assign unknown object: ${value}`) } - break case "boolean": return [value, "boolean"] case "number": @@ -106,17 +153,19 @@ function import_value(value) { } else { return [value, "f64"] } - break case "string": - return [value, "text"] - break + if (textV2) { + return [value, "text"] + } else { + return [value, "str"] + } default: throw new RangeError(`Unsupported type of value: ${typeof value}`) } } const MapHandler = { - get(target, key): AutomergeValue { + get(target: Target, key): AutomergeValue | { handle: Automerge } { const { context, objectId, cache } = target if (key === Symbol.toStringTag) { return target[Symbol.toStringTag] @@ -131,8 +180,8 @@ const MapHandler = { return cache[key] }, - set(target, key, val) { - const { context, objectId, path, readonly, frozen } = target + set(target: Target, key, val) { + const { context, objectId, path, readonly, frozen, textV2 } = target target.cache = {} // reset cache on set if (val && val[OBJECT_ID]) { throw new RangeError( @@ -143,7 +192,7 @@ const MapHandler = { target.trace = val return true } - const [value, datatype] = import_value(val) + const [value, datatype] = import_value(val, textV2) if (frozen) { throw new RangeError("Attempting to use an outdated Automerge document") } @@ -153,19 +202,39 @@ const MapHandler = { switch (datatype) { case "list": { const list = context.putObject(objectId, key, []) - const proxyList = listProxy(context, list, [...path, key], readonly) + const proxyList = listProxy( + context, + list, + textV2, + [...path, key], + readonly + ) for (let i = 0; i < value.length; i++) { proxyList[i] = value[i] } break } case "text": { - context.putObject(objectId, key, value, "text") + if (textV2) { + context.putObject(objectId, key, value) + } else { + const text = context.putObject(objectId, key, "") + const proxyText = textProxy(context, text, [...path, key], readonly) + for (let i = 0; i < value.length; i++) { + proxyText[i] = value.get(i) + } + } break } case "map": { const map = context.putObject(objectId, key, {}) - const proxyMap = mapProxy(context, map, [...path, key], readonly) + const proxyMap = mapProxy( + context, + map, + textV2, + [...path, key], + readonly + ) for (const key in value) { proxyMap[key] = value[key] } @@ -177,7 +246,7 @@ const MapHandler = { return true }, - deleteProperty(target, key) { + deleteProperty(target: Target, key) { const { context, objectId, readonly } = target target.cache = {} // reset cache on delete if (readonly) { @@ -187,12 +256,12 @@ const MapHandler = { return true }, - has(target, key) { + has(target: Target, key) { const value = this.get(target, key) return value !== undefined }, - getOwnPropertyDescriptor(target, key) { + getOwnPropertyDescriptor(target: Target, key) { // const { context, objectId } = target const value = this.get(target, key) if (typeof value !== "undefined") { @@ -204,7 +273,7 @@ const MapHandler = { } }, - ownKeys(target) { + ownKeys(target: Target) { const { context, objectId, heads } = target // FIXME - this is a tmp workaround until fix the dupe key bug in keys() const keys = context.keys(objectId, heads) @@ -213,7 +282,7 @@ const MapHandler = { } const ListHandler = { - get(target, index) { + get(target: Target, index) { const { context, objectId, heads } = target index = parseListIndex(index) if (index === Symbol.hasInstance) { @@ -236,8 +305,8 @@ const ListHandler = { } }, - set(target, index, val) { - const { context, objectId, path, readonly, frozen } = target + set(target: Target, index, val) { + const { context, objectId, path, readonly, frozen, textV2 } = target index = parseListIndex(index) if (val && val[OBJECT_ID]) { throw new RangeError( @@ -251,7 +320,7 @@ const ListHandler = { if (typeof index == "string") { throw new RangeError("list index must be a number") } - const [value, datatype] = import_value(val) + const [value, datatype] = import_value(val, textV2) if (frozen) { throw new RangeError("Attempting to use an outdated Automerge document") } @@ -266,15 +335,32 @@ const ListHandler = { } else { list = context.putObject(objectId, index, []) } - const proxyList = listProxy(context, list, [...path, index], readonly) + const proxyList = listProxy( + context, + list, + textV2, + [...path, index], + readonly + ) proxyList.splice(0, 0, ...value) break } case "text": { - if (index >= context.length(objectId)) { - context.insertObject(objectId, index, value, "text") + if (textV2) { + if (index >= context.length(objectId)) { + context.insertObject(objectId, index, value) + } else { + context.putObject(objectId, index, value) + } } else { - context.putObject(objectId, index, value, "text") + let text + if (index >= context.length(objectId)) { + text = context.insertObject(objectId, index, "") + } else { + text = context.putObject(objectId, index, "") + } + const proxyText = textProxy(context, text, [...path, index], readonly) + proxyText.splice(0, 0, ...value) } break } @@ -285,7 +371,13 @@ const ListHandler = { } else { map = context.putObject(objectId, index, {}) } - const proxyMap = mapProxy(context, map, [...path, index], readonly) + const proxyMap = mapProxy( + context, + map, + textV2, + [...path, index], + readonly + ) for (const key in value) { proxyMap[key] = value[key] } @@ -301,10 +393,11 @@ const ListHandler = { return true }, - deleteProperty(target, index) { + deleteProperty(target: Target, index) { const { context, objectId } = target index = parseListIndex(index) - if (context.get(objectId, index)[0] == "counter") { + const elem = context.get(objectId, index) + if (elem != null && elem[0] == "counter") { throw new TypeError( "Unsupported operation: deleting a counter from a list" ) @@ -313,7 +406,7 @@ const ListHandler = { return true }, - has(target, index) { + has(target: Target, index) { const { context, objectId, heads } = target index = parseListIndex(index) if (typeof index === "number") { @@ -322,7 +415,7 @@ const ListHandler = { return index === "length" }, - getOwnPropertyDescriptor(target, index) { + getOwnPropertyDescriptor(target: Target, index) { const { context, objectId, heads } = target if (index === "length") @@ -350,54 +443,114 @@ const ListHandler = { }, } +const TextHandler = Object.assign({}, ListHandler, { + get(target: Target, index: any) { + const { context, objectId, heads } = target + index = parseListIndex(index) + if (index === Symbol.hasInstance) { + return (instance: any) => { + return Array.isArray(instance) + } + } + if (index === Symbol.toStringTag) { + return target[Symbol.toStringTag] + } + if (index === OBJECT_ID) return objectId + if (index === IS_PROXY) return true + if (index === TRACE) return target.trace + if (index === STATE) return { handle: context } + if (index === "length") return context.length(objectId, heads) + if (typeof index === "number") { + return valueAt(target, index) + } else { + return textMethods(target)[index] || listMethods(target)[index] + } + }, + getPrototypeOf(/*target*/) { + return Object.getPrototypeOf(new Text()) + }, +}) + export function mapProxy( context: Automerge, objectId: ObjID, + textV2: boolean, path?: Prop[], readonly?: boolean, heads?: Heads ): MapValue { - return new Proxy( - { - context, - objectId, - path, - readonly: !!readonly, - frozen: false, - heads, - cache: {}, - }, - MapHandler - ) + const target: Target = { + context, + objectId, + path: path || [], + readonly: !!readonly, + frozen: false, + heads, + cache: {}, + textV2, + } + const proxied = {} + Object.assign(proxied, target) + let result = new Proxy(proxied, MapHandler) + // conversion through unknown is necessary because the types are so different + return result as unknown as MapValue } export function listProxy( context: Automerge, objectId: ObjID, + textV2: boolean, path?: Prop[], readonly?: boolean, heads?: Heads ): ListValue { - const target = [] - Object.assign(target, { + const target: Target = { context, objectId, - path, + path: path || [], readonly: !!readonly, frozen: false, heads, cache: {}, - }) - return new Proxy(target, ListHandler) + textV2, + } + const proxied = [] + Object.assign(proxied, target) + // @ts-ignore + return new Proxy(proxied, ListHandler) as unknown as ListValue } -export function rootProxy(context: Automerge, readonly?: boolean): T { +export function textProxy( + context: Automerge, + objectId: ObjID, + path?: Prop[], + readonly?: boolean, + heads?: Heads +): TextValue { + const target: Target = { + context, + objectId, + path: path || [], + readonly: !!readonly, + frozen: false, + heads, + cache: {}, + textV2: false, + } + return new Proxy(target, TextHandler) as unknown as TextValue +} + +export function rootProxy( + context: Automerge, + textV2: boolean, + readonly?: boolean +): T { /* eslint-disable-next-line */ - return mapProxy(context, "_root", [], !!readonly) + return mapProxy(context, "_root", textV2, [], !!readonly) } -function listMethods(target) { - const { context, objectId, path, readonly, frozen, heads } = target +function listMethods(target: Target) { + const { context, objectId, path, readonly, frozen, heads, textV2 } = target const methods = { deleteAt(index, numDelete) { if (typeof numDelete === "number") { @@ -409,13 +562,13 @@ function listMethods(target) { }, fill(val: ScalarValue, start: number, end: number) { - const [value, datatype] = import_value(val) + const [value, datatype] = import_value(val, textV2) const length = context.length(objectId) start = parseListIndex(start || 0) end = parseListIndex(end || length) for (let i = start; i < Math.min(end, length); i++) { if (datatype === "text" || datatype === "list" || datatype === "map") { - context.putObject(objectId, i, value, datatype) + context.putObject(objectId, i, value) } else { context.put(objectId, i, value, datatype) } @@ -427,7 +580,7 @@ function listMethods(target) { const length = context.length(objectId) for (let i = start; i < length; i++) { const value = context.getWithType(objectId, i, heads) - if ((value && value[1] === o[OBJECT_ID]) || value[1] === o) { + if (value && (value[1] === o[OBJECT_ID] || value[1] === o)) { return i } } @@ -488,7 +641,7 @@ function listMethods(target) { } context.delete(objectId, index) } - const values = vals.map(val => import_value(val)) + const values = vals.map(val => import_value(val, textV2)) for (const [value, datatype] of values) { switch (datatype) { case "list": { @@ -496,6 +649,7 @@ function listMethods(target) { const proxyList = listProxy( context, list, + textV2, [...path, index], readonly ) @@ -503,12 +657,29 @@ function listMethods(target) { break } case "text": { - context.insertObject(objectId, index, value) + if (textV2) { + context.insertObject(objectId, index, value) + } else { + const text = context.insertObject(objectId, index, "") + const proxyText = textProxy( + context, + text, + [...path, index], + readonly + ) + proxyText.splice(0, 0, ...value) + } break } case "map": { const map = context.insertObject(objectId, index, {}) - const proxyMap = mapProxy(context, map, [...path, index], readonly) + const proxyMap = mapProxy( + context, + map, + textV2, + [...path, index], + readonly + ) for (const key in value) { proxyMap[key] = value[key] } @@ -689,3 +860,47 @@ function listMethods(target) { } return methods } + +function textMethods(target: Target) { + const { context, objectId, heads } = target + const methods = { + set(index: number, value) { + return (this[index] = value) + }, + get(index: number): AutomergeValue { + return this[index] + }, + toString(): string { + return context.text(objectId, heads).replace(//g, "") + }, + toSpans(): AutomergeValue[] { + const spans: AutomergeValue[] = [] + let chars = "" + const length = context.length(objectId) + for (let i = 0; i < length; i++) { + const value = this[i] + if (typeof value === "string") { + chars += value + } else { + if (chars.length > 0) { + spans.push(chars) + chars = "" + } + spans.push(value) + } + } + if (chars.length > 0) { + spans.push(chars) + } + return spans + }, + toJSON(): string { + return this.toString() + }, + indexOf(o, start = 0) { + const text = context.text(objectId) + return text.indexOf(o, start) + }, + } + return methods +} diff --git a/javascript/src/raw_string.ts b/javascript/src/raw_string.ts new file mode 100644 index 00000000..7fc02084 --- /dev/null +++ b/javascript/src/raw_string.ts @@ -0,0 +1,6 @@ +export class RawString { + val: string + constructor(val: string) { + this.val = val + } +} diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts new file mode 100644 index 00000000..c52d0a4c --- /dev/null +++ b/javascript/src/stable.ts @@ -0,0 +1,955 @@ +/** @hidden **/ +export { /** @hidden */ uuid } from "./uuid" + +import { rootProxy, listProxy, mapProxy, textProxy } from "./proxies" +import { STATE } from "./constants" + +import { AutomergeValue, Counter, Doc, PatchCallback } from "./types" +export { + AutomergeValue, + Counter, + Doc, + Int, + Uint, + Float64, + Patch, + PatchCallback, + ScalarValue, + Text, +} from "./types" + +import { Text } from "./text" + +import { type API } from "@automerge/automerge-wasm" +export { + PutPatch, + DelPatch, + SplicePatch, + IncPatch, + SyncMessage, +} from "@automerge/automerge-wasm" +import { ApiHandler, ChangeToEncode, UseApi } from "./low_level" + +import { + Actor as ActorId, + Prop, + ObjID, + Change, + DecodedChange, + Heads, + Automerge, + MaterializeValue, +} from "@automerge/automerge-wasm" +import { + JsSyncState as SyncState, + SyncMessage, + DecodedSyncMessage, +} from "@automerge/automerge-wasm" + +import { RawString } from "./raw_string" + +import { _state, _is_proxy, _trace, _obj } from "./internal_state" + +/** Options passed to {@link change}, and {@link emptyChange} + * @typeParam T - The type of value contained in the document + */ +export type ChangeOptions = { + /** A message which describes the changes */ + message?: string + /** The unix timestamp of the change (purely advisory, not used in conflict resolution) */ + time?: number + /** A callback which will be called to notify the caller of any changes to the document */ + patchCallback?: PatchCallback +} + +/** Options passed to {@link loadIncremental}, {@link applyChanges}, and {@link receiveSyncMessage} + * @typeParam T - The type of value contained in the document + */ +export type ApplyOptions = { patchCallback?: PatchCallback } + +/** + * Function which is called by {@link change} when making changes to a `Doc` + * @typeParam T - The type of value contained in the document + * + * This function may mutate `doc` + */ +export type ChangeFn = (doc: T) => void + +/** @hidden **/ +export interface State { + change: DecodedChange + snapshot: T +} + +/** @hidden **/ +export function use(api: API) { + UseApi(api) +} + +import * as wasm from "@automerge/automerge-wasm" +use(wasm) + +/** + * Options to be passed to {@link init} or {@link load} + * @typeParam T - The type of the value the document contains + */ +export type InitOptions = { + /** The actor ID to use for this document, a random one will be generated if `null` is passed */ + actor?: ActorId + freeze?: boolean + /** A callback which will be called with the initial patch once the document has finished loading */ + patchCallback?: PatchCallback + /** @hidden */ + enableTextV2?: boolean +} + +/** @hidden */ +export function getBackend(doc: Doc): Automerge { + return _state(doc).handle +} + +function importOpts(_actor?: ActorId | InitOptions): InitOptions { + if (typeof _actor === "object") { + return _actor + } else { + return { actor: _actor } + } +} + +/** + * Create a new automerge document + * + * @typeParam T - The type of value contained in the document. This will be the + * type that is passed to the change closure in {@link change} + * @param _opts - Either an actorId or an {@link InitOptions} (which may + * contain an actorId). If this is null the document will be initialised with a + * random actor ID + */ +export function init(_opts?: ActorId | InitOptions): Doc { + const opts = importOpts(_opts) + const freeze = !!opts.freeze + const patchCallback = opts.patchCallback + const handle = ApiHandler.create(opts.enableTextV2 || false, opts.actor) + handle.enablePatches(true) + handle.enableFreeze(!!opts.freeze) + handle.registerDatatype("counter", (n: any) => new Counter(n)) + let textV2 = opts.enableTextV2 || false + if (textV2) { + handle.registerDatatype("str", (n: string) => new RawString(n)) + } else { + handle.registerDatatype("text", (n: any) => new Text(n)) + } + const doc = handle.materialize("/", undefined, { + handle, + heads: undefined, + freeze, + patchCallback, + textV2, + }) as Doc + return doc +} + +/** + * Make an immutable view of an automerge document as at `heads` + * + * @remarks + * The document returned from this function cannot be passed to {@link change}. + * This is because it shares the same underlying memory as `doc`, but it is + * consequently a very cheap copy. + * + * Note that this function will throw an error if any of the hashes in `heads` + * are not in the document. + * + * @typeParam T - The type of the value contained in the document + * @param doc - The document to create a view of + * @param heads - The hashes of the heads to create a view at + */ +export function view(doc: Doc, heads: Heads): Doc { + const state = _state(doc) + const handle = state.handle + return state.handle.materialize("/", heads, { + ...state, + handle, + heads, + }) as Doc +} + +/** + * Make a full writable copy of an automerge document + * + * @remarks + * Unlike {@link view} this function makes a full copy of the memory backing + * the document and can thus be passed to {@link change}. It also generates a + * new actor ID so that changes made in the new document do not create duplicate + * sequence numbers with respect to the old document. If you need control over + * the actor ID which is generated you can pass the actor ID as the second + * argument + * + * @typeParam T - The type of the value contained in the document + * @param doc - The document to clone + * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} + */ +export function clone( + doc: Doc, + _opts?: ActorId | InitOptions +): Doc { + const state = _state(doc) + const heads = state.heads + const opts = importOpts(_opts) + const handle = state.handle.fork(opts.actor, heads) + + // `change` uses the presence of state.heads to determine if we are in a view + // set it to undefined to indicate that this is a full fat document + const { heads: oldHeads, ...stateSansHeads } = state + return handle.applyPatches(doc, { ...stateSansHeads, handle }) +} + +/** Explicity free the memory backing a document. Note that this is note + * necessary in environments which support + * [`FinalizationRegistry`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry) + */ +export function free(doc: Doc) { + return _state(doc).handle.free() +} + +/** + * Create an automerge document from a POJO + * + * @param initialState - The initial state which will be copied into the document + * @typeParam T - The type of the value passed to `from` _and_ the type the resulting document will contain + * @typeParam actor - The actor ID of the resulting document, if this is null a random actor ID will be used + * + * @example + * ``` + * const doc = automerge.from({ + * tasks: [ + * {description: "feed dogs", done: false} + * ] + * }) + * ``` + */ +export function from>( + initialState: T | Doc, + _opts?: ActorId | InitOptions +): Doc { + return change(init(_opts), d => Object.assign(d, initialState)) +} + +/** + * Update the contents of an automerge document + * @typeParam T - The type of the value contained in the document + * @param doc - The document to update + * @param options - Either a message, an {@link ChangeOptions}, or a {@link ChangeFn} + * @param callback - A `ChangeFn` to be used if `options` was a `string` + * + * Note that if the second argument is a function it will be used as the `ChangeFn` regardless of what the third argument is. + * + * @example A simple change + * ``` + * let doc1 = automerge.init() + * doc1 = automerge.change(doc1, d => { + * d.key = "value" + * }) + * assert.equal(doc1.key, "value") + * ``` + * + * @example A change with a message + * + * ``` + * doc1 = automerge.change(doc1, "add another value", d => { + * d.key2 = "value2" + * }) + * ``` + * + * @example A change with a message and a timestamp + * + * ``` + * doc1 = automerge.change(doc1, {message: "add another value", timestamp: 1640995200}, d => { + * d.key2 = "value2" + * }) + * ``` + * + * @example responding to a patch callback + * ``` + * let patchedPath + * let patchCallback = patch => { + * patchedPath = patch.path + * } + * doc1 = automerge.change(doc1, {message, "add another value", timestamp: 1640995200, patchCallback}, d => { + * d.key2 = "value2" + * }) + * assert.equal(patchedPath, ["key2"]) + * ``` + */ +export function change( + doc: Doc, + options: string | ChangeOptions | ChangeFn, + callback?: ChangeFn +): Doc { + if (typeof options === "function") { + return _change(doc, {}, options) + } else if (typeof callback === "function") { + if (typeof options === "string") { + options = { message: options } + } + return _change(doc, options, callback) + } else { + throw RangeError("Invalid args for change") + } +} + +function progressDocument( + doc: Doc, + heads: Heads | null, + callback?: PatchCallback +): Doc { + if (heads == null) { + return doc + } + const state = _state(doc) + const nextState = { ...state, heads: undefined } + const nextDoc = state.handle.applyPatches(doc, nextState, callback) + state.heads = heads + return nextDoc +} + +function _change( + doc: Doc, + options: ChangeOptions, + callback: ChangeFn +): Doc { + if (typeof callback !== "function") { + throw new RangeError("invalid change function") + } + + const state = _state(doc) + + if (doc === undefined || state === undefined) { + throw new RangeError("must be the document root") + } + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + try { + state.heads = heads + const root: T = rootProxy(state.handle, state.textV2) + callback(root) + if (state.handle.pendingOps() === 0) { + state.heads = undefined + return doc + } else { + state.handle.commit(options.message, options.time) + return progressDocument( + doc, + heads, + options.patchCallback || state.patchCallback + ) + } + } catch (e) { + state.heads = undefined + state.handle.rollback() + throw e + } +} + +/** + * Make a change to a document which does not modify the document + * + * @param doc - The doc to add the empty change to + * @param options - Either a message or a {@link ChangeOptions} for the new change + * + * Why would you want to do this? One reason might be that you have merged + * changes from some other peers and you want to generate a change which + * depends on those merged changes so that you can sign the new change with all + * of the merged changes as part of the new change. + */ +export function emptyChange( + doc: Doc, + options: string | ChangeOptions | void +) { + if (options === undefined) { + options = {} + } + if (typeof options === "string") { + options = { message: options } + } + + const state = _state(doc) + + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + + const heads = state.handle.getHeads() + state.handle.emptyChange(options.message, options.time) + return progressDocument(doc, heads) +} + +/** + * Load an automerge document from a compressed document produce by {@link save} + * + * @typeParam T - The type of the value which is contained in the document. + * Note that no validation is done to make sure this type is in + * fact the type of the contained value so be a bit careful + * @param data - The compressed document + * @param _opts - Either an actor ID or some {@link InitOptions}, if the actor + * ID is null a random actor ID will be created + * + * Note that `load` will throw an error if passed incomplete content (for + * example if you are receiving content over the network and don't know if you + * have the complete document yet). If you need to handle incomplete content use + * {@link init} followed by {@link loadIncremental}. + */ +export function load( + data: Uint8Array, + _opts?: ActorId | InitOptions +): Doc { + const opts = importOpts(_opts) + const actor = opts.actor + const patchCallback = opts.patchCallback + const handle = ApiHandler.load(data, opts.enableTextV2 || false, actor) + handle.enablePatches(true) + handle.enableFreeze(!!opts.freeze) + handle.registerDatatype("counter", (n: number) => new Counter(n)) + const textV2 = opts.enableTextV2 || false + if (textV2) { + handle.registerDatatype("str", (n: string) => new RawString(n)) + } else { + handle.registerDatatype("text", (n: string) => new Text(n)) + } + const doc = handle.materialize("/", undefined, { + handle, + heads: undefined, + patchCallback, + textV2, + }) as Doc + return doc +} + +/** + * Load changes produced by {@link saveIncremental}, or partial changes + * + * @typeParam T - The type of the value which is contained in the document. + * Note that no validation is done to make sure this type is in + * fact the type of the contained value so be a bit careful + * @param data - The compressedchanges + * @param opts - an {@link ApplyOptions} + * + * This function is useful when staying up to date with a connected peer. + * Perhaps the other end sent you a full compresed document which you loaded + * with {@link load} and they're sending you the result of + * {@link getLastLocalChange} every time they make a change. + * + * Note that this function will succesfully load the results of {@link save} as + * well as {@link getLastLocalChange} or any other incremental change. + */ +export function loadIncremental( + doc: Doc, + data: Uint8Array, + opts?: ApplyOptions +): Doc { + if (!opts) { + opts = {} + } + const state = _state(doc) + if (state.heads) { + throw new RangeError( + "Attempting to change an out of date document - set at: " + _trace(doc) + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.loadIncremental(data) + return progressDocument(doc, heads, opts.patchCallback || state.patchCallback) +} + +/** + * Export the contents of a document to a compressed format + * + * @param doc - The doc to save + * + * The returned bytes can be passed to {@link load} or {@link loadIncremental} + */ +export function save(doc: Doc): Uint8Array { + return _state(doc).handle.save() +} + +/** + * Merge `local` into `remote` + * @typeParam T - The type of values contained in each document + * @param local - The document to merge changes into + * @param remote - The document to merge changes from + * + * @returns - The merged document + * + * Often when you are merging documents you will also need to clone them. Both + * arguments to `merge` are frozen after the call so you can no longer call + * mutating methods (such as {@link change}) on them. The symtom of this will be + * an error which says "Attempting to change an out of date document". To + * overcome this call {@link clone} on the argument before passing it to {@link + * merge}. + */ +export function merge(local: Doc, remote: Doc): Doc { + const localState = _state(local) + + if (localState.heads) { + throw new RangeError( + "Attempting to change an out of date document - set at: " + _trace(local) + ) + } + const heads = localState.handle.getHeads() + const remoteState = _state(remote) + const changes = localState.handle.getChangesAdded(remoteState.handle) + localState.handle.applyChanges(changes) + return progressDocument(local, heads, localState.patchCallback) +} + +/** + * Get the actor ID associated with the document + */ +export function getActorId(doc: Doc): ActorId { + const state = _state(doc) + return state.handle.getActorId() +} + +/** + * The type of conflicts for particular key or index + * + * Maps and sequences in automerge can contain conflicting values for a + * particular key or index. In this case {@link getConflicts} can be used to + * obtain a `Conflicts` representing the multiple values present for the property + * + * A `Conflicts` is a map from a unique (per property or index) key to one of + * the possible conflicting values for the given property. + */ +type Conflicts = { [key: string]: AutomergeValue } + +function conflictAt( + context: Automerge, + objectId: ObjID, + prop: Prop, + textV2: boolean +): Conflicts | undefined { + const values = context.getAll(objectId, prop) + if (values.length <= 1) { + return + } + const result: Conflicts = {} + for (const fullVal of values) { + switch (fullVal[0]) { + case "map": + result[fullVal[1]] = mapProxy(context, fullVal[1], textV2, [prop], true) + break + case "list": + result[fullVal[1]] = listProxy( + context, + fullVal[1], + textV2, + [prop], + true + ) + break + case "text": + if (textV2) { + result[fullVal[1]] = context.text(fullVal[1]) + } else { + result[fullVal[1]] = textProxy(context, objectId, [prop], true) + } + break + //case "table": + //case "cursor": + case "str": + case "uint": + case "int": + case "f64": + case "boolean": + case "bytes": + case "null": + result[fullVal[2]] = fullVal[1] + break + case "counter": + result[fullVal[2]] = new Counter(fullVal[1]) + break + case "timestamp": + result[fullVal[2]] = new Date(fullVal[1]) + break + default: + throw RangeError(`datatype ${fullVal[0]} unimplemented`) + } + } + return result +} + +/** + * Get the conflicts associated with a property + * + * The values of properties in a map in automerge can be conflicted if there + * are concurrent "put" operations to the same key. Automerge chooses one value + * arbitrarily (but deterministically, any two nodes who have the same set of + * changes will choose the same value) from the set of conflicting values to + * present as the value of the key. + * + * Sometimes you may want to examine these conflicts, in this case you can use + * {@link getConflicts} to get the conflicts for the key. + * + * @example + * ``` + * import * as automerge from "@automerge/automerge" + * + * type Profile = { + * pets: Array<{name: string, type: string}> + * } + * + * let doc1 = automerge.init("aaaa") + * doc1 = automerge.change(doc1, d => { + * d.pets = [{name: "Lassie", type: "dog"}] + * }) + * let doc2 = automerge.init("bbbb") + * doc2 = automerge.merge(doc2, automerge.clone(doc1)) + * + * doc2 = automerge.change(doc2, d => { + * d.pets[0].name = "Beethoven" + * }) + * + * doc1 = automerge.change(doc1, d => { + * d.pets[0].name = "Babe" + * }) + * + * const doc3 = automerge.merge(doc1, doc2) + * + * // Note that here we pass `doc3.pets`, not `doc3` + * let conflicts = automerge.getConflicts(doc3.pets[0], "name") + * + * // The two conflicting values are the keys of the conflicts object + * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) + * ``` + */ +export function getConflicts( + doc: Doc, + prop: Prop +): Conflicts | undefined { + const state = _state(doc, false) + const objectId = _obj(doc) + if (objectId != null) { + return conflictAt(state.handle, objectId, prop, state.textV2) + } else { + return undefined + } +} + +/** + * Get the binary representation of the last change which was made to this doc + * + * This is most useful when staying in sync with other peers, every time you + * make a change locally via {@link change} you immediately call {@link + * getLastLocalChange} and send the result over the network to other peers. + */ +export function getLastLocalChange(doc: Doc): Change | undefined { + const state = _state(doc) + return state.handle.getLastLocalChange() || undefined +} + +/** + * Return the object ID of an arbitrary javascript value + * + * This is useful to determine if something is actually an automerge document, + * if `doc` is not an automerge document this will return null. + */ +export function getObjectId(doc: any, prop?: Prop): ObjID | null { + if (prop) { + const state = _state(doc, false) + const objectId = _obj(doc) + if (!state || !objectId) { + return null + } + return state.handle.get(objectId, prop) as ObjID + } else { + return _obj(doc) + } +} + +/** + * Get the changes which are in `newState` but not in `oldState`. The returned + * changes can be loaded in `oldState` via {@link applyChanges}. + * + * Note that this will crash if there are changes in `oldState` which are not in `newState`. + */ +export function getChanges(oldState: Doc, newState: Doc): Change[] { + const n = _state(newState) + return n.handle.getChanges(getHeads(oldState)) +} + +/** + * Get all the changes in a document + * + * This is different to {@link save} because the output is an array of changes + * which can be individually applied via {@link applyChanges}` + * + */ +export function getAllChanges(doc: Doc): Change[] { + const state = _state(doc) + return state.handle.getChanges([]) +} + +/** + * Apply changes received from another document + * + * `doc` will be updated to reflect the `changes`. If there are changes which + * we do not have dependencies for yet those will be stored in the document and + * applied when the depended on changes arrive. + * + * You can use the {@link ApplyOptions} to pass a patchcallback which will be + * informed of any changes which occur as a result of applying the changes + * + */ +export function applyChanges( + doc: Doc, + changes: Change[], + opts?: ApplyOptions +): [Doc] { + const state = _state(doc) + if (!opts) { + opts = {} + } + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.applyChanges(changes) + state.heads = heads + return [ + progressDocument(doc, heads, opts.patchCallback || state.patchCallback), + ] +} + +/** @hidden */ +export function getHistory(doc: Doc): State[] { + const textV2 = _state(doc).textV2 + const history = getAllChanges(doc) + return history.map((change, index) => ({ + get change() { + return decodeChange(change) + }, + get snapshot() { + const [state] = applyChanges( + init({ enableTextV2: textV2 }), + history.slice(0, index + 1) + ) + return state + }, + })) +} + +/** @hidden */ +// FIXME : no tests +// FIXME can we just use deep equals now? +export function equals(val1: unknown, val2: unknown): boolean { + if (!isObject(val1) || !isObject(val2)) return val1 === val2 + const keys1 = Object.keys(val1).sort(), + keys2 = Object.keys(val2).sort() + if (keys1.length !== keys2.length) return false + for (let i = 0; i < keys1.length; i++) { + if (keys1[i] !== keys2[i]) return false + if (!equals(val1[keys1[i]], val2[keys2[i]])) return false + } + return true +} + +/** + * encode a {@link SyncState} into binary to send over the network + * + * @group sync + * */ +export function encodeSyncState(state: SyncState): Uint8Array { + const sync = ApiHandler.importSyncState(state) + const result = ApiHandler.encodeSyncState(sync) + sync.free() + return result +} + +/** + * Decode some binary data into a {@link SyncState} + * + * @group sync + */ +export function decodeSyncState(state: Uint8Array): SyncState { + const sync = ApiHandler.decodeSyncState(state) + const result = ApiHandler.exportSyncState(sync) + sync.free() + return result +} + +/** + * Generate a sync message to send to the peer represented by `inState` + * @param doc - The doc to generate messages about + * @param inState - The {@link SyncState} representing the peer we are talking to + * + * @group sync + * + * @returns An array of `[newSyncState, syncMessage | null]` where + * `newSyncState` should replace `inState` and `syncMessage` should be sent to + * the peer if it is not null. If `syncMessage` is null then we are up to date. + */ +export function generateSyncMessage( + doc: Doc, + inState: SyncState +): [SyncState, SyncMessage | null] { + const state = _state(doc) + const syncState = ApiHandler.importSyncState(inState) + const message = state.handle.generateSyncMessage(syncState) + const outState = ApiHandler.exportSyncState(syncState) + return [outState, message] +} + +/** + * Update a document and our sync state on receiving a sync message + * + * @group sync + * + * @param doc - The doc the sync message is about + * @param inState - The {@link SyncState} for the peer we are communicating with + * @param message - The message which was received + * @param opts - Any {@link ApplyOption}s, used for passing a + * {@link PatchCallback} which will be informed of any changes + * in `doc` which occur because of the received sync message. + * + * @returns An array of `[newDoc, newSyncState, syncMessage | null]` where + * `newDoc` is the updated state of `doc`, `newSyncState` should replace + * `inState` and `syncMessage` should be sent to the peer if it is not null. If + * `syncMessage` is null then we are up to date. + */ +export function receiveSyncMessage( + doc: Doc, + inState: SyncState, + message: SyncMessage, + opts?: ApplyOptions +): [Doc, SyncState, null] { + const syncState = ApiHandler.importSyncState(inState) + if (!opts) { + opts = {} + } + const state = _state(doc) + if (state.heads) { + throw new RangeError( + "Attempting to change an outdated document. Use Automerge.clone() if you wish to make a writable copy." + ) + } + if (_is_proxy(doc)) { + throw new RangeError("Calls to Automerge.change cannot be nested") + } + const heads = state.handle.getHeads() + state.handle.receiveSyncMessage(syncState, message) + const outSyncState = ApiHandler.exportSyncState(syncState) + return [ + progressDocument(doc, heads, opts.patchCallback || state.patchCallback), + outSyncState, + null, + ] +} + +/** + * Create a new, blank {@link SyncState} + * + * When communicating with a peer for the first time use this to generate a new + * {@link SyncState} for them + * + * @group sync + */ +export function initSyncState(): SyncState { + return ApiHandler.exportSyncState(ApiHandler.initSyncState()) +} + +/** @hidden */ +export function encodeChange(change: ChangeToEncode): Change { + return ApiHandler.encodeChange(change) +} + +/** @hidden */ +export function decodeChange(data: Change): DecodedChange { + return ApiHandler.decodeChange(data) +} + +/** @hidden */ +export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { + return ApiHandler.encodeSyncMessage(message) +} + +/** @hidden */ +export function decodeSyncMessage(message: SyncMessage): DecodedSyncMessage { + return ApiHandler.decodeSyncMessage(message) +} + +/** + * Get any changes in `doc` which are not dependencies of `heads` + */ +export function getMissingDeps(doc: Doc, heads: Heads): Heads { + const state = _state(doc) + return state.handle.getMissingDeps(heads) +} + +/** + * Get the hashes of the heads of this document + */ +export function getHeads(doc: Doc): Heads { + const state = _state(doc) + return state.heads || state.handle.getHeads() +} + +/** @hidden */ +export function dump(doc: Doc) { + const state = _state(doc) + state.handle.dump() +} + +/** @hidden */ +export function toJS(doc: Doc): T { + const state = _state(doc) + const enabled = state.handle.enableFreeze(false) + const result = state.handle.materialize() + state.handle.enableFreeze(enabled) + return result as T +} + +export function isAutomerge(doc: unknown): boolean { + if (typeof doc == "object" && doc !== null) { + return getObjectId(doc) === "_root" && !!Reflect.get(doc, STATE) + } else { + return false + } +} + +function isObject(obj: unknown): obj is Record { + return typeof obj === "object" && obj !== null +} + +export type { + API, + SyncState, + ActorId, + Conflicts, + Prop, + Change, + ObjID, + DecodedChange, + DecodedSyncMessage, + Heads, + MaterializeValue, +} diff --git a/javascript/src/text.ts b/javascript/src/text.ts new file mode 100644 index 00000000..bb0a868d --- /dev/null +++ b/javascript/src/text.ts @@ -0,0 +1,218 @@ +import { Value } from "@automerge/automerge-wasm" +import { TEXT, STATE } from "./constants" + +export class Text { + elems: Array + str: string | undefined + spans: Array | undefined + + constructor(text?: string | string[] | Value[]) { + if (typeof text === "string") { + this.elems = [...text] + } else if (Array.isArray(text)) { + this.elems = text + } else if (text === undefined) { + this.elems = [] + } else { + throw new TypeError(`Unsupported initial value for Text: ${text}`) + } + Reflect.defineProperty(this, TEXT, { value: true }) + } + + get length(): number { + return this.elems.length + } + + get(index: number): any { + return this.elems[index] + } + + /** + * Iterates over the text elements character by character, including any + * inline objects. + */ + [Symbol.iterator]() { + const elems = this.elems + let index = -1 + return { + next() { + index += 1 + if (index < elems.length) { + return { done: false, value: elems[index] } + } else { + return { done: true } + } + }, + } + } + + /** + * Returns the content of the Text object as a simple string, ignoring any + * non-character elements. + */ + toString(): string { + if (!this.str) { + // Concatting to a string is faster than creating an array and then + // .join()ing for small (<100KB) arrays. + // https://jsperf.com/join-vs-loop-w-type-test + this.str = "" + for (const elem of this.elems) { + if (typeof elem === "string") this.str += elem + else this.str += "\uFFFC" + } + } + return this.str + } + + /** + * Returns the content of the Text object as a sequence of strings, + * interleaved with non-character elements. + * + * For example, the value `['a', 'b', {x: 3}, 'c', 'd']` has spans: + * `=> ['ab', {x: 3}, 'cd']` + */ + toSpans(): Array { + if (!this.spans) { + this.spans = [] + let chars = "" + for (const elem of this.elems) { + if (typeof elem === "string") { + chars += elem + } else { + if (chars.length > 0) { + this.spans.push(chars) + chars = "" + } + this.spans.push(elem) + } + } + if (chars.length > 0) { + this.spans.push(chars) + } + } + return this.spans + } + + /** + * Returns the content of the Text object as a simple string, so that the + * JSON serialization of an Automerge document represents text nicely. + */ + toJSON(): string { + return this.toString() + } + + /** + * Updates the list item at position `index` to a new value `value`. + */ + set(index: number, value: Value) { + if (this[STATE]) { + throw new RangeError( + "object cannot be modified outside of a change block" + ) + } + this.elems[index] = value + } + + /** + * Inserts new list items `values` starting at position `index`. + */ + insertAt(index: number, ...values: Array) { + if (this[STATE]) { + throw new RangeError( + "object cannot be modified outside of a change block" + ) + } + this.elems.splice(index, 0, ...values) + } + + /** + * Deletes `numDelete` list items starting at position `index`. + * if `numDelete` is not given, one item is deleted. + */ + deleteAt(index: number, numDelete = 1) { + if (this[STATE]) { + throw new RangeError( + "object cannot be modified outside of a change block" + ) + } + this.elems.splice(index, numDelete) + } + + map(callback: (e: Value | Object) => T) { + this.elems.map(callback) + } + + lastIndexOf(searchElement: Value, fromIndex?: number) { + this.elems.lastIndexOf(searchElement, fromIndex) + } + + concat(other: Text): Text { + return new Text(this.elems.concat(other.elems)) + } + + every(test: (v: Value) => boolean): boolean { + return this.elems.every(test) + } + + filter(test: (v: Value) => boolean): Text { + return new Text(this.elems.filter(test)) + } + + find(test: (v: Value) => boolean): Value | undefined { + return this.elems.find(test) + } + + findIndex(test: (v: Value) => boolean): number | undefined { + return this.elems.findIndex(test) + } + + forEach(f: (v: Value) => undefined) { + this.elems.forEach(f) + } + + includes(elem: Value): boolean { + return this.elems.includes(elem) + } + + indexOf(elem: Value) { + return this.elems.indexOf(elem) + } + + join(sep?: string): string { + return this.elems.join(sep) + } + + reduce( + f: ( + previousValue: Value, + currentValue: Value, + currentIndex: number, + array: Value[] + ) => Value + ) { + this.elems.reduce(f) + } + + reduceRight( + f: ( + previousValue: Value, + currentValue: Value, + currentIndex: number, + array: Value[] + ) => Value + ) { + this.elems.reduceRight(f) + } + + slice(start?: number, end?: number) { + new Text(this.elems.slice(start, end)) + } + + some(test: (Value) => boolean): boolean { + return this.elems.some(test) + } + + toLocaleString() { + this.toString() + } +} diff --git a/javascript/src/types.ts b/javascript/src/types.ts index 62fdbba8..e3cb81f8 100644 --- a/javascript/src/types.ts +++ b/javascript/src/types.ts @@ -1,7 +1,10 @@ +export { Text } from "./text" export { Counter } from "./counter" export { Int, Uint, Float64 } from "./numbers" import { Counter } from "./counter" +import type { Patch } from "@automerge/automerge-wasm" +export type { Patch } from "@automerge/automerge-wasm" export type AutomergeValue = | ScalarValue @@ -9,6 +12,7 @@ export type AutomergeValue = | Array export type MapValue = { [key: string]: AutomergeValue } export type ListValue = Array +export type TextValue = Array export type ScalarValue = | string | number @@ -17,3 +21,25 @@ export type ScalarValue = | Date | Counter | Uint8Array + +/** + * An automerge document. + * @typeParam T - The type of the value contained in this document + * + * Note that this provides read only access to the fields of the value. To + * modify the value use {@link change} + */ +export type Doc = { readonly [P in keyof T]: T[P] } + +/** + * Callback which is called by various methods in this library to notify the + * user of what changes have been made. + * @param patch - A description of the changes made + * @param before - The document before the change was made + * @param after - The document after the change was made + */ +export type PatchCallback = ( + patches: Array, + before: Doc, + after: Doc +) => void diff --git a/javascript/src/unstable.ts b/javascript/src/unstable.ts new file mode 100644 index 00000000..8f25586c --- /dev/null +++ b/javascript/src/unstable.ts @@ -0,0 +1,292 @@ +/** + * # The unstable API + * + * This module contains new features we are working on which are either not yet + * ready for a stable release and/or which will result in backwards incompatible + * API changes. The API of this module may change in arbitrary ways between + * point releases - we will always document what these changes are in the + * CHANGELOG below, but only depend on this module if you are prepared to deal + * with frequent changes. + * + * ## Differences from stable + * + * In the stable API text objects are represented using the {@link Text} class. + * This means you must decide up front whether your string data might need + * concurrent merges in the future and if you change your mind you have to + * figure out how to migrate your data. In the unstable API the `Text` class is + * gone and all `string`s are represented using the text CRDT, allowing for + * concurrent changes. Modifying a string is done using the {@link splice} + * function. You can still access the old behaviour of strings which do not + * support merging behaviour via the {@link RawString} class. + * + * This leads to the following differences from `stable`: + * + * * There is no `unstable.Text` class, all strings are text objects + * * Reading strings in a `future` document is the same as reading any other + * javascript string + * * To modify strings in a `future` document use {@link splice} + * * The {@link AutomergeValue} type does not include the {@link Text} + * class but the {@link RawString} class is included in the {@link ScalarValue} + * type + * + * ## CHANGELOG + * * Introduce this module to expose the new API which has no `Text` class + * + * + * @module + */ +import { Counter } from "./types" + +export { Counter, Doc, Int, Uint, Float64, Patch, PatchCallback } from "./types" + +import type { PatchCallback } from "./stable" + +export type AutomergeValue = + | ScalarValue + | { [key: string]: AutomergeValue } + | Array +export type MapValue = { [key: string]: AutomergeValue } +export type ListValue = Array +export type ScalarValue = + | string + | number + | null + | boolean + | Date + | Counter + | Uint8Array + | RawString + +export type Conflicts = { [key: string]: AutomergeValue } + +export { + PutPatch, + DelPatch, + SplicePatch, + IncPatch, + SyncMessage, +} from "@automerge/automerge-wasm" + +export type { ChangeOptions, ApplyOptions, ChangeFn } from "./stable" +export { + view, + free, + getHeads, + change, + emptyChange, + loadIncremental, + save, + merge, + getActorId, + getLastLocalChange, + getChanges, + getAllChanges, + applyChanges, + getHistory, + equals, + encodeSyncState, + decodeSyncState, + generateSyncMessage, + receiveSyncMessage, + initSyncState, + encodeChange, + decodeChange, + encodeSyncMessage, + decodeSyncMessage, + getMissingDeps, + dump, + toJS, + isAutomerge, + getObjectId, +} from "./stable" + +export type InitOptions = { + /** The actor ID to use for this document, a random one will be generated if `null` is passed */ + actor?: ActorId + freeze?: boolean + /** A callback which will be called with the initial patch once the document has finished loading */ + patchCallback?: PatchCallback +} + +import { ActorId, Doc } from "./stable" +import * as stable from "./stable" +export { RawString } from "./raw_string" + +/** @hidden */ +export const getBackend = stable.getBackend + +import { _is_proxy, _state, _obj } from "./internal_state" +import { RawString } from "./raw_string" + +/** + * Create a new automerge document + * + * @typeParam T - The type of value contained in the document. This will be the + * type that is passed to the change closure in {@link change} + * @param _opts - Either an actorId or an {@link InitOptions} (which may + * contain an actorId). If this is null the document will be initialised with a + * random actor ID + */ +export function init(_opts?: ActorId | InitOptions): Doc { + let opts = importOpts(_opts) + opts.enableTextV2 = true + return stable.init(opts) +} + +/** + * Make a full writable copy of an automerge document + * + * @remarks + * Unlike {@link view} this function makes a full copy of the memory backing + * the document and can thus be passed to {@link change}. It also generates a + * new actor ID so that changes made in the new document do not create duplicate + * sequence numbers with respect to the old document. If you need control over + * the actor ID which is generated you can pass the actor ID as the second + * argument + * + * @typeParam T - The type of the value contained in the document + * @param doc - The document to clone + * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} + */ +export function clone( + doc: Doc, + _opts?: ActorId | InitOptions +): Doc { + let opts = importOpts(_opts) + opts.enableTextV2 = true + return stable.clone(doc, opts) +} + +/** + * Create an automerge document from a POJO + * + * @param initialState - The initial state which will be copied into the document + * @typeParam T - The type of the value passed to `from` _and_ the type the resulting document will contain + * @typeParam actor - The actor ID of the resulting document, if this is null a random actor ID will be used + * + * @example + * ``` + * const doc = automerge.from({ + * tasks: [ + * {description: "feed dogs", done: false} + * ] + * }) + * ``` + */ +export function from>( + initialState: T | Doc, + _opts?: ActorId | InitOptions +): Doc { + const opts = importOpts(_opts) + opts.enableTextV2 = true + return stable.from(initialState, opts) +} + +/** + * Load an automerge document from a compressed document produce by {@link save} + * + * @typeParam T - The type of the value which is contained in the document. + * Note that no validation is done to make sure this type is in + * fact the type of the contained value so be a bit careful + * @param data - The compressed document + * @param _opts - Either an actor ID or some {@link InitOptions}, if the actor + * ID is null a random actor ID will be created + * + * Note that `load` will throw an error if passed incomplete content (for + * example if you are receiving content over the network and don't know if you + * have the complete document yet). If you need to handle incomplete content use + * {@link init} followed by {@link loadIncremental}. + */ +export function load( + data: Uint8Array, + _opts?: ActorId | InitOptions +): Doc { + const opts = importOpts(_opts) + opts.enableTextV2 = true + return stable.load(data, opts) +} + +function importOpts( + _actor?: ActorId | InitOptions +): stable.InitOptions { + if (typeof _actor === "object") { + return _actor + } else { + return { actor: _actor } + } +} + +export function splice( + doc: Doc, + prop: stable.Prop, + index: number, + del: number, + newText?: string +) { + if (!_is_proxy(doc)) { + throw new RangeError("object cannot be modified outside of a change block") + } + const state = _state(doc, false) + const objectId = _obj(doc) + if (!objectId) { + throw new RangeError("invalid object for splice") + } + const value = `${objectId}/${prop}` + try { + return state.handle.splice(value, index, del, newText) + } catch (e) { + throw new RangeError(`Cannot splice: ${e}`) + } +} + +/** + * Get the conflicts associated with a property + * + * The values of properties in a map in automerge can be conflicted if there + * are concurrent "put" operations to the same key. Automerge chooses one value + * arbitrarily (but deterministically, any two nodes who have the same set of + * changes will choose the same value) from the set of conflicting values to + * present as the value of the key. + * + * Sometimes you may want to examine these conflicts, in this case you can use + * {@link getConflicts} to get the conflicts for the key. + * + * @example + * ``` + * import * as automerge from "@automerge/automerge" + * + * type Profile = { + * pets: Array<{name: string, type: string}> + * } + * + * let doc1 = automerge.init("aaaa") + * doc1 = automerge.change(doc1, d => { + * d.pets = [{name: "Lassie", type: "dog"}] + * }) + * let doc2 = automerge.init("bbbb") + * doc2 = automerge.merge(doc2, automerge.clone(doc1)) + * + * doc2 = automerge.change(doc2, d => { + * d.pets[0].name = "Beethoven" + * }) + * + * doc1 = automerge.change(doc1, d => { + * d.pets[0].name = "Babe" + * }) + * + * const doc3 = automerge.merge(doc1, doc2) + * + * // Note that here we pass `doc3.pets`, not `doc3` + * let conflicts = automerge.getConflicts(doc3.pets[0], "name") + * + * // The two conflicting values are the keys of the conflicts object + * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) + * ``` + */ +export function getConflicts( + doc: Doc, + prop: stable.Prop +): Conflicts | undefined { + // this function only exists to get the types to line up with future.AutomergeValue + return stable.getConflicts(doc, prop) +} diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index c14c0e20..90e7a99d 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -1,5 +1,5 @@ import * as assert from "assert" -import * as Automerge from "../src" +import { unstable as Automerge } from "../src" import * as WASM from "@automerge/automerge-wasm" describe("Automerge", () => { diff --git a/javascript/test/extra_api_tests.ts b/javascript/test/extra_api_tests.ts index 69932d1f..84fa4c39 100644 --- a/javascript/test/extra_api_tests.ts +++ b/javascript/test/extra_api_tests.ts @@ -1,5 +1,5 @@ import * as assert from "assert" -import * as Automerge from "../src" +import { unstable as Automerge } from "../src" describe("Automerge", () => { describe("basics", () => { diff --git a/javascript/test/legacy_tests.ts b/javascript/test/legacy_tests.ts index 477a5545..a423b51f 100644 --- a/javascript/test/legacy_tests.ts +++ b/javascript/test/legacy_tests.ts @@ -1,5 +1,5 @@ import * as assert from "assert" -import * as Automerge from "../src" +import { unstable as Automerge } from "../src" import { assertEqualsOneOf } from "./helpers" import { decodeChange } from "./legacy/columnar" diff --git a/javascript/test/stable_unstable_interop.ts b/javascript/test/stable_unstable_interop.ts new file mode 100644 index 00000000..2f58c256 --- /dev/null +++ b/javascript/test/stable_unstable_interop.ts @@ -0,0 +1,41 @@ +import * as assert from "assert" +import * as stable from "../src" +import { unstable } from "../src" + +describe("stable/unstable interop", () => { + it("should allow reading Text from stable as strings in unstable", () => { + let stableDoc = stable.from({ + text: new stable.Text("abc"), + }) + let unstableDoc = unstable.init() + unstableDoc = unstable.merge(unstableDoc, stableDoc) + assert.deepStrictEqual(unstableDoc.text, "abc") + }) + + it("should allow string from stable as Text in unstable", () => { + let unstableDoc = unstable.from({ + text: "abc", + }) + let stableDoc = stable.init() + stableDoc = unstable.merge(stableDoc, unstableDoc) + assert.deepStrictEqual(stableDoc.text, new stable.Text("abc")) + }) + + it("should allow reading strings from stable as RawString in unstable", () => { + let stableDoc = stable.from({ + text: "abc", + }) + let unstableDoc = unstable.init() + unstableDoc = unstable.merge(unstableDoc, stableDoc) + assert.deepStrictEqual(unstableDoc.text, new unstable.RawString("abc")) + }) + + it("should allow reading RawString from unstable as string in stable", () => { + let unstableDoc = unstable.from({ + text: new unstable.RawString("abc"), + }) + let stableDoc = stable.init() + stableDoc = unstable.merge(stableDoc, unstableDoc) + assert.deepStrictEqual(stableDoc.text, "abc") + }) +}) diff --git a/javascript/test/text_test.ts b/javascript/test/text_test.ts index 076e20b2..518c7d2b 100644 --- a/javascript/test/text_test.ts +++ b/javascript/test/text_test.ts @@ -1,5 +1,5 @@ import * as assert from "assert" -import * as Automerge from "../src" +import { unstable as Automerge } from "../src" import { assertEqualsOneOf } from "./helpers" type DocType = { diff --git a/javascript/test/text_v1.ts b/javascript/test/text_v1.ts new file mode 100644 index 00000000..b111530f --- /dev/null +++ b/javascript/test/text_v1.ts @@ -0,0 +1,281 @@ +import * as assert from "assert" +import * as Automerge from "../src" +import { assertEqualsOneOf } from "./helpers" + +type DocType = { text: Automerge.Text; [key: string]: any } + +describe("Automerge.Text", () => { + let s1: Automerge.Doc, s2: Automerge.Doc + beforeEach(() => { + s1 = Automerge.change( + Automerge.init(), + doc => (doc.text = new Automerge.Text()) + ) + s2 = Automerge.merge(Automerge.init(), s1) + }) + + it("should support insertion", () => { + s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a")) + assert.strictEqual(s1.text.length, 1) + assert.strictEqual(s1.text.get(0), "a") + assert.strictEqual(s1.text.toString(), "a") + //assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`) + }) + + it("should support deletion", () => { + s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) + s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 1)) + assert.strictEqual(s1.text.length, 2) + assert.strictEqual(s1.text.get(0), "a") + assert.strictEqual(s1.text.get(1), "c") + assert.strictEqual(s1.text.toString(), "ac") + }) + + it("should support implicit and explicit deletion", () => { + s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) + s1 = Automerge.change(s1, doc => doc.text.deleteAt(1)) + s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 0)) + assert.strictEqual(s1.text.length, 2) + assert.strictEqual(s1.text.get(0), "a") + assert.strictEqual(s1.text.get(1), "c") + assert.strictEqual(s1.text.toString(), "ac") + }) + + it("should handle concurrent insertion", () => { + s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) + s2 = Automerge.change(s2, doc => doc.text.insertAt(0, "x", "y", "z")) + s1 = Automerge.merge(s1, s2) + assert.strictEqual(s1.text.length, 6) + assertEqualsOneOf(s1.text.toString(), "abcxyz", "xyzabc") + assertEqualsOneOf(s1.text.join(""), "abcxyz", "xyzabc") + }) + + it("should handle text and other ops in the same change", () => { + s1 = Automerge.change(s1, doc => { + doc.foo = "bar" + doc.text.insertAt(0, "a") + }) + assert.strictEqual(s1.foo, "bar") + assert.strictEqual(s1.text.toString(), "a") + assert.strictEqual(s1.text.join(""), "a") + }) + + it("should serialize to JSON as a simple string", () => { + s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", '"', "b")) + assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}') + }) + + it("should allow modification before an object is assigned to a document", () => { + s1 = Automerge.change(Automerge.init(), doc => { + const text = new Automerge.Text() + text.insertAt(0, "a", "b", "c", "d") + text.deleteAt(2) + doc.text = text + assert.strictEqual(doc.text.toString(), "abd") + assert.strictEqual(doc.text.join(""), "abd") + }) + assert.strictEqual(s1.text.toString(), "abd") + assert.strictEqual(s1.text.join(""), "abd") + }) + + it("should allow modification after an object is assigned to a document", () => { + s1 = Automerge.change(Automerge.init(), doc => { + const text = new Automerge.Text() + doc.text = text + doc.text.insertAt(0, "a", "b", "c", "d") + doc.text.deleteAt(2) + assert.strictEqual(doc.text.toString(), "abd") + assert.strictEqual(doc.text.join(""), "abd") + }) + assert.strictEqual(s1.text.join(""), "abd") + }) + + it("should not allow modification outside of a change callback", () => { + assert.throws( + () => s1.text.insertAt(0, "a"), + /object cannot be modified outside of a change block/ + ) + }) + + describe("with initial value", () => { + it("should accept a string as initial value", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.text = new Automerge.Text("init")) + ) + assert.strictEqual(s1.text.length, 4) + assert.strictEqual(s1.text.get(0), "i") + assert.strictEqual(s1.text.get(1), "n") + assert.strictEqual(s1.text.get(2), "i") + assert.strictEqual(s1.text.get(3), "t") + assert.strictEqual(s1.text.toString(), "init") + }) + + it("should accept an array as initial value", () => { + let s1 = Automerge.change( + Automerge.init(), + doc => (doc.text = new Automerge.Text(["i", "n", "i", "t"])) + ) + assert.strictEqual(s1.text.length, 4) + assert.strictEqual(s1.text.get(0), "i") + assert.strictEqual(s1.text.get(1), "n") + assert.strictEqual(s1.text.get(2), "i") + assert.strictEqual(s1.text.get(3), "t") + assert.strictEqual(s1.text.toString(), "init") + }) + + it("should initialize text in Automerge.from()", () => { + let s1 = Automerge.from({ text: new Automerge.Text("init") }) + assert.strictEqual(s1.text.length, 4) + assert.strictEqual(s1.text.get(0), "i") + assert.strictEqual(s1.text.get(1), "n") + assert.strictEqual(s1.text.get(2), "i") + assert.strictEqual(s1.text.get(3), "t") + assert.strictEqual(s1.text.toString(), "init") + }) + + it("should encode the initial value as a change", () => { + const s1 = Automerge.from({ text: new Automerge.Text("init") }) + const changes = Automerge.getAllChanges(s1) + assert.strictEqual(changes.length, 1) + const [s2] = Automerge.applyChanges(Automerge.init(), changes) + assert.strictEqual(s2.text instanceof Automerge.Text, true) + assert.strictEqual(s2.text.toString(), "init") + assert.strictEqual(s2.text.join(""), "init") + }) + + it("should allow immediate access to the value", () => { + Automerge.change(Automerge.init(), doc => { + const text = new Automerge.Text("init") + assert.strictEqual(text.length, 4) + assert.strictEqual(text.get(0), "i") + assert.strictEqual(text.toString(), "init") + doc.text = text + assert.strictEqual(doc.text.length, 4) + assert.strictEqual(doc.text.get(0), "i") + assert.strictEqual(doc.text.toString(), "init") + }) + }) + + it("should allow pre-assignment modification of the initial value", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + const text = new Automerge.Text("init") + text.deleteAt(3) + assert.strictEqual(text.join(""), "ini") + doc.text = text + assert.strictEqual(doc.text.join(""), "ini") + assert.strictEqual(doc.text.toString(), "ini") + }) + assert.strictEqual(s1.text.toString(), "ini") + assert.strictEqual(s1.text.join(""), "ini") + }) + + it("should allow post-assignment modification of the initial value", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + const text = new Automerge.Text("init") + doc.text = text + doc.text.deleteAt(0) + doc.text.insertAt(0, "I") + assert.strictEqual(doc.text.join(""), "Init") + assert.strictEqual(doc.text.toString(), "Init") + }) + assert.strictEqual(s1.text.join(""), "Init") + assert.strictEqual(s1.text.toString(), "Init") + }) + }) + + describe("non-textual control characters", () => { + let s1: Automerge.Doc + beforeEach(() => { + s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text() + doc.text.insertAt(0, "a") + doc.text.insertAt(1, { attribute: "bold" }) + }) + }) + + it("should allow fetching non-textual characters", () => { + assert.deepEqual(s1.text.get(1), { attribute: "bold" }) + //assert.strictEqual(s1.text.getElemId(1), `3@${Automerge.getActorId(s1)}`) + }) + + it("should include control characters in string length", () => { + assert.strictEqual(s1.text.length, 2) + assert.strictEqual(s1.text.get(0), "a") + }) + + it("should replace control characters from toString()", () => { + assert.strictEqual(s1.text.toString(), "a\uFFFC") + }) + + it("should allow control characters to be updated", () => { + const s2 = Automerge.change( + s1, + doc => (doc.text.get(1)!.attribute = "italic") + ) + const s3 = Automerge.load(Automerge.save(s2)) + assert.strictEqual(s1.text.get(1).attribute, "bold") + assert.strictEqual(s2.text.get(1).attribute, "italic") + assert.strictEqual(s3.text.get(1).attribute, "italic") + }) + + describe("spans interface to Text", () => { + it("should return a simple string as a single span", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text("hello world") + }) + assert.deepEqual(s1.text.toSpans(), ["hello world"]) + }) + it("should return an empty string as an empty array", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text() + }) + assert.deepEqual(s1.text.toSpans(), []) + }) + it("should split a span at a control character", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text("hello world") + doc.text.insertAt(5, { attributes: { bold: true } }) + }) + assert.deepEqual(s1.text.toSpans(), [ + "hello", + { attributes: { bold: true } }, + " world", + ]) + }) + it("should allow consecutive control characters", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text("hello world") + doc.text.insertAt(5, { attributes: { bold: true } }) + doc.text.insertAt(6, { attributes: { italic: true } }) + }) + assert.deepEqual(s1.text.toSpans(), [ + "hello", + { attributes: { bold: true } }, + { attributes: { italic: true } }, + " world", + ]) + }) + it("should allow non-consecutive control characters", () => { + let s1 = Automerge.change(Automerge.init(), doc => { + doc.text = new Automerge.Text("hello world") + doc.text.insertAt(5, { attributes: { bold: true } }) + doc.text.insertAt(12, { attributes: { italic: true } }) + }) + assert.deepEqual(s1.text.toSpans(), [ + "hello", + { attributes: { bold: true } }, + " world", + { attributes: { italic: true } }, + ]) + }) + }) + }) + + it("should support unicode when creating text", () => { + s1 = Automerge.from({ + text: new Automerge.Text("🐦"), + }) + assert.strictEqual(s1.text.get(0), "🐦") + }) +}) From d1220b9dd08e0a9e4206634ffb4956634453c26b Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 10 Jan 2023 11:25:06 +0000 Subject: [PATCH 20/72] javascript: Use glob to list files in package.json We have been listing all the files to be included in the distributed package in package.json:files. This is tedious and error prone. We change to using globs instead, to do this without also including the test and src files when outputting declarations we add a new typescript config file for the declaration generation which excludes tests. --- javascript/config/declonly.json | 8 ++++++++ javascript/package.json | 30 +++++------------------------- 2 files changed, 13 insertions(+), 25 deletions(-) create mode 100644 javascript/config/declonly.json diff --git a/javascript/config/declonly.json b/javascript/config/declonly.json new file mode 100644 index 00000000..df615930 --- /dev/null +++ b/javascript/config/declonly.json @@ -0,0 +1,8 @@ +{ + "extends": "../tsconfig.json", + "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], + "emitDeclarationOnly": true, + "compilerOptions": { + "outDir": "../dist" + } +} diff --git a/javascript/package.json b/javascript/package.json index 33523370..a7412c70 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -12,30 +12,10 @@ "README.md", "LICENSE", "package.json", - "index.d.ts", - "dist/*.d.ts", - "dist/cjs/constants.js", - "dist/cjs/types.js", - "dist/cjs/numbers.js", - "dist/cjs/index.js", - "dist/cjs/uuid.js", - "dist/cjs/counter.js", - "dist/cjs/low_level.js", - "dist/cjs/next.js", - "dist/cjs/text.js", - "dist/cjs/proxies.js", - "dist/cjs/raw_string.js", - "dist/mjs/constants.js", - "dist/mjs/types.js", - "dist/mjs/numbers.js", - "dist/mjs/next.js", - "dist/mjs/index.js", - "dist/mjs/uuid.js", - "dist/mjs/counter.js", - "dist/mjs/low_level.js", - "dist/mjs/text.js", - "dist/mjs/proxies.js", - "dist/mjs/raw_string.js" + "dist/index.d.ts", + "dist/cjs/**/*.js", + "dist/mjs/**/*.js", + "dist/*.d.ts" ], "types": "./dist/index.d.ts", "module": "./dist/mjs/index.js", @@ -43,7 +23,7 @@ "license": "MIT", "scripts": { "lint": "eslint src", - "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc --emitDeclarationOnly", + "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc -p config/declonly.json --emitDeclarationOnly", "test": "ts-mocha test/*.ts", "watch-docs": "typedoc src/index.ts --watch --readme typedoc-readme.md" }, From 0e7fb6cc10c0fac0aaa4dc799f05b9aed6c17f31 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 10 Jan 2023 11:49:16 +0000 Subject: [PATCH 21/72] javascript: Add @packageDocumentation TSDoc Instead of using the `--readme` argument to `typedoc` use the `@packageDocumentation` TSDoc tag to include the readme text in the typedoc output. --- javascript/.prettierignore | 1 + javascript/package.json | 2 +- javascript/src/index.ts | 239 +++++++++++++++++++++++++++++++++++ javascript/src/unstable.ts | 2 +- javascript/typedoc-readme.md | 226 --------------------------------- 5 files changed, 242 insertions(+), 228 deletions(-) delete mode 100644 javascript/typedoc-readme.md diff --git a/javascript/.prettierignore b/javascript/.prettierignore index 8116ea24..c2dcd4bb 100644 --- a/javascript/.prettierignore +++ b/javascript/.prettierignore @@ -1,2 +1,3 @@ e2e/verdacciodb dist +docs diff --git a/javascript/package.json b/javascript/package.json index a7412c70..a424de48 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -25,7 +25,7 @@ "lint": "eslint src", "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc -p config/declonly.json --emitDeclarationOnly", "test": "ts-mocha test/*.ts", - "watch-docs": "typedoc src/index.ts --watch --readme typedoc-readme.md" + "watch-docs": "typedoc src/index.ts --watch --readme none" }, "devDependencies": { "@types/expect": "^24.3.0", diff --git a/javascript/src/index.ts b/javascript/src/index.ts index 7d4a68ba..bf84c68d 100644 --- a/javascript/src/index.ts +++ b/javascript/src/index.ts @@ -1,3 +1,242 @@ +/** + * # Automerge + * + * This library provides the core automerge data structure and sync algorithms. + * Other libraries can be built on top of this one which provide IO and + * persistence. + * + * An automerge document can be though of an immutable POJO (plain old javascript + * object) which `automerge` tracks the history of, allowing it to be merged with + * any other automerge document. + * + * ## Creating and modifying a document + * + * You can create a document with {@link init} or {@link from} and then make + * changes to it with {@link change}, you can merge two documents with {@link + * merge}. + * + * ```ts + * import * as automerge from "@automerge/automerge" + * + * type DocType = {ideas: Array} + * + * let doc1 = automerge.init() + * doc1 = automerge.change(doc1, d => { + * d.ideas = [new automerge.Text("an immutable document")] + * }) + * + * let doc2 = automerge.init() + * doc2 = automerge.merge(doc2, automerge.clone(doc1)) + * doc2 = automerge.change(doc2, d => { + * d.ideas.push(new automerge.Text("which records it's history")) + * }) + * + * // Note the `automerge.clone` call, see the "cloning" section of this readme for + * // more detail + * doc1 = automerge.merge(doc1, automerge.clone(doc2)) + * doc1 = automerge.change(doc1, d => { + * d.ideas[0].deleteAt(13, 8) + * d.ideas[0].insertAt(13, "object") + * }) + * + * let doc3 = automerge.merge(doc1, doc2) + * // doc3 is now {ideas: ["an immutable object", "which records it's history"]} + * ``` + * + * ## Applying changes from another document + * + * You can get a representation of the result of the last {@link change} you made + * to a document with {@link getLastLocalChange} and you can apply that change to + * another document using {@link applyChanges}. + * + * If you need to get just the changes which are in one document but not in another + * you can use {@link getHeads} to get the heads of the document without the + * changes and then {@link getMissingDeps}, passing the result of {@link getHeads} + * on the document with the changes. + * + * ## Saving and loading documents + * + * You can {@link save} a document to generate a compresed binary representation of + * the document which can be loaded with {@link load}. If you have a document which + * you have recently made changes to you can generate recent changes with {@link + * saveIncremental}, this will generate all the changes since you last called + * `saveIncremental`, the changes generated can be applied to another document with + * {@link loadIncremental}. + * + * ## Viewing different versions of a document + * + * Occasionally you may wish to explicitly step to a different point in a document + * history. One common reason to do this is if you need to obtain a set of changes + * which take the document from one state to another in order to send those changes + * to another peer (or to save them somewhere). You can use {@link view} to do this. + * + * ```ts + * import * as automerge from "@automerge/automerge" + * import * as assert from "assert" + * + * let doc = automerge.from({ + * key1: "value1", + * }) + * + * // Make a clone of the document at this point, maybe this is actually on another + * // peer. + * let doc2 = automerge.clone < any > doc + * + * let heads = automerge.getHeads(doc) + * + * doc = + * automerge.change < + * any > + * (doc, + * d => { + * d.key2 = "value2" + * }) + * + * doc = + * automerge.change < + * any > + * (doc, + * d => { + * d.key3 = "value3" + * }) + * + * // At this point we've generated two separate changes, now we want to send + * // just those changes to someone else + * + * // view is a cheap reference based copy of a document at a given set of heads + * let before = automerge.view(doc, heads) + * + * // This view doesn't show the last two changes in the document state + * assert.deepEqual(before, { + * key1: "value1", + * }) + * + * // Get the changes to send to doc2 + * let changes = automerge.getChanges(before, doc) + * + * // Apply the changes at doc2 + * doc2 = automerge.applyChanges < any > (doc2, changes)[0] + * assert.deepEqual(doc2, { + * key1: "value1", + * key2: "value2", + * key3: "value3", + * }) + * ``` + * + * If you have a {@link view} of a document which you want to make changes to you + * can {@link clone} the viewed document. + * + * ## Syncing + * + * The sync protocol is stateful. This means that we start by creating a {@link + * SyncState} for each peer we are communicating with using {@link initSyncState}. + * Then we generate a message to send to the peer by calling {@link + * generateSyncMessage}. When we receive a message from the peer we call {@link + * receiveSyncMessage}. Here's a simple example of a loop which just keeps two + * peers in sync. + * + * ```ts + * let sync1 = automerge.initSyncState() + * let msg: Uint8Array | null + * ;[sync1, msg] = automerge.generateSyncMessage(doc1, sync1) + * + * while (true) { + * if (msg != null) { + * network.send(msg) + * } + * let resp: Uint8Array = + * (network.receive()[(doc1, sync1, _ignore)] = + * automerge.receiveSyncMessage(doc1, sync1, resp)[(sync1, msg)] = + * automerge.generateSyncMessage(doc1, sync1)) + * } + * ``` + * + * ## Conflicts + * + * The only time conflicts occur in automerge documents is in concurrent + * assignments to the same key in an object. In this case automerge + * deterministically chooses an arbitrary value to present to the application but + * you can examine the conflicts using {@link getConflicts}. + * + * ``` + * import * as automerge from "@automerge/automerge" + * + * type Profile = { + * pets: Array<{name: string, type: string}> + * } + * + * let doc1 = automerge.init("aaaa") + * doc1 = automerge.change(doc1, d => { + * d.pets = [{name: "Lassie", type: "dog"}] + * }) + * let doc2 = automerge.init("bbbb") + * doc2 = automerge.merge(doc2, automerge.clone(doc1)) + * + * doc2 = automerge.change(doc2, d => { + * d.pets[0].name = "Beethoven" + * }) + * + * doc1 = automerge.change(doc1, d => { + * d.pets[0].name = "Babe" + * }) + * + * const doc3 = automerge.merge(doc1, doc2) + * + * // Note that here we pass `doc3.pets`, not `doc3` + * let conflicts = automerge.getConflicts(doc3.pets[0], "name") + * + * // The two conflicting values are the keys of the conflicts object + * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) + * ``` + * + * ## Actor IDs + * + * By default automerge will generate a random actor ID for you, but most methods + * for creating a document allow you to set the actor ID. You can get the actor ID + * associated with the document by calling {@link getActorId}. Actor IDs must not + * be used in concurrent threads of executiong - all changes by a given actor ID + * are expected to be sequential. + * + * ## Listening to patches + * + * Sometimes you want to respond to changes made to an automerge document. In this + * case you can use the {@link PatchCallback} type to receive notifications when + * changes have been made. + * + * ## Cloning + * + * Currently you cannot make mutating changes (i.e. call {@link change}) to a + * document which you have two pointers to. For example, in this code: + * + * ```javascript + * let doc1 = automerge.init() + * let doc2 = automerge.change(doc1, d => (d.key = "value")) + * ``` + * + * `doc1` and `doc2` are both pointers to the same state. Any attempt to call + * mutating methods on `doc1` will now result in an error like + * + * Attempting to change an out of date document + * + * If you encounter this you need to clone the original document, the above sample + * would work as: + * + * ```javascript + * let doc1 = automerge.init() + * let doc2 = automerge.change(automerge.clone(doc1), d => (d.key = "value")) + * ``` + * @packageDocumentation + * + * ## The {@link unstable} module + * + * We are working on some changes to automerge which are not yet complete and + * will result in backwards incompatible API changes. Once these changes are + * ready for production use we will release a new major version of automerge. + * However, until that point you can use the {@link unstable} module to try out + * the new features, documents from the {@link unstable} module are + * interoperable with documents from the main module. Please see the docs for + * the {@link unstable} module for more details. + */ export * from "./stable" import * as unstable from "./unstable" export { unstable } diff --git a/javascript/src/unstable.ts b/javascript/src/unstable.ts index 8f25586c..3ee18dbc 100644 --- a/javascript/src/unstable.ts +++ b/javascript/src/unstable.ts @@ -5,7 +5,7 @@ * ready for a stable release and/or which will result in backwards incompatible * API changes. The API of this module may change in arbitrary ways between * point releases - we will always document what these changes are in the - * CHANGELOG below, but only depend on this module if you are prepared to deal + * [CHANGELOG](#changelog) below, but only depend on this module if you are prepared to deal * with frequent changes. * * ## Differences from stable diff --git a/javascript/typedoc-readme.md b/javascript/typedoc-readme.md deleted file mode 100644 index 258b9e20..00000000 --- a/javascript/typedoc-readme.md +++ /dev/null @@ -1,226 +0,0 @@ -# Automerge - -This library provides the core automerge data structure and sync algorithms. -Other libraries can be built on top of this one which provide IO and -persistence. - -An automerge document can be though of an immutable POJO (plain old javascript -object) which `automerge` tracks the history of, allowing it to be merged with -any other automerge document. - -## Creating and modifying a document - -You can create a document with {@link init} or {@link from} and then make -changes to it with {@link change}, you can merge two documents with {@link -merge}. - -```javascript -import * as automerge from "@automerge/automerge" - -type DocType = {ideas: Array} - -let doc1 = automerge.init() -doc1 = automerge.change(doc1, d => { - d.ideas = [new automerge.Text("an immutable document")] -}) - -let doc2 = automerge.init() -doc2 = automerge.merge(doc2, automerge.clone(doc1)) -doc2 = automerge.change(doc2, d => { - d.ideas.push(new automerge.Text("which records it's history")) -}) - -// Note the `automerge.clone` call, see the "cloning" section of this readme for -// more detail -doc1 = automerge.merge(doc1, automerge.clone(doc2)) -doc1 = automerge.change(doc1, d => { - d.ideas[0].deleteAt(13, 8) - d.ideas[0].insertAt(13, "object") -}) - -let doc3 = automerge.merge(doc1, doc2) -// doc3 is now {ideas: ["an immutable object", "which records it's history"]} -``` - -## Applying changes from another document - -You can get a representation of the result of the last {@link change} you made -to a document with {@link getLastLocalChange} and you can apply that change to -another document using {@link applyChanges}. - -If you need to get just the changes which are in one document but not in another -you can use {@link getHeads} to get the heads of the document without the -changes and then {@link getMissingDeps}, passing the result of {@link getHeads} -on the document with the changes. - -## Saving and loading documents - -You can {@link save} a document to generate a compresed binary representation of -the document which can be loaded with {@link load}. If you have a document which -you have recently made changes to you can generate recent changes with {@link -saveIncremental}, this will generate all the changes since you last called -`saveIncremental`, the changes generated can be applied to another document with -{@link loadIncremental}. - -## Viewing different versions of a document - -Occasionally you may wish to explicitly step to a different point in a document -history. One common reason to do this is if you need to obtain a set of changes -which take the document from one state to another in order to send those changes -to another peer (or to save them somewhere). You can use {@link view} to do this. - -```javascript -import * as automerge from "@automerge/automerge" -import * as assert from "assert" - -let doc = automerge.from({ - key1: "value1", -}) - -// Make a clone of the document at this point, maybe this is actually on another -// peer. -let doc2 = automerge.clone < any > doc - -let heads = automerge.getHeads(doc) - -doc = - automerge.change < - any > - (doc, - d => { - d.key2 = "value2" - }) - -doc = - automerge.change < - any > - (doc, - d => { - d.key3 = "value3" - }) - -// At this point we've generated two separate changes, now we want to send -// just those changes to someone else - -// view is a cheap reference based copy of a document at a given set of heads -let before = automerge.view(doc, heads) - -// This view doesn't show the last two changes in the document state -assert.deepEqual(before, { - key1: "value1", -}) - -// Get the changes to send to doc2 -let changes = automerge.getChanges(before, doc) - -// Apply the changes at doc2 -doc2 = automerge.applyChanges < any > (doc2, changes)[0] -assert.deepEqual(doc2, { - key1: "value1", - key2: "value2", - key3: "value3", -}) -``` - -If you have a {@link view} of a document which you want to make changes to you -can {@link clone} the viewed document. - -## Syncing - -The sync protocol is stateful. This means that we start by creating a {@link -SyncState} for each peer we are communicating with using {@link initSyncState}. -Then we generate a message to send to the peer by calling {@link -generateSyncMessage}. When we receive a message from the peer we call {@link -receiveSyncMessage}. Here's a simple example of a loop which just keeps two -peers in sync. - -```javascript -let sync1 = automerge.initSyncState() -let msg: Uint8Array | null -;[sync1, msg] = automerge.generateSyncMessage(doc1, sync1) - -while (true) { - if (msg != null) { - network.send(msg) - } - let resp: Uint8Array = - (network.receive()[(doc1, sync1, _ignore)] = - automerge.receiveSyncMessage(doc1, sync1, resp)[(sync1, msg)] = - automerge.generateSyncMessage(doc1, sync1)) -} -``` - -## Conflicts - -The only time conflicts occur in automerge documents is in concurrent -assignments to the same key in an object. In this case automerge -deterministically chooses an arbitrary value to present to the application but -you can examine the conflicts using {@link getConflicts}. - -``` -import * as automerge from "@automerge/automerge" - -type Profile = { - pets: Array<{name: string, type: string}> -} - -let doc1 = automerge.init("aaaa") -doc1 = automerge.change(doc1, d => { - d.pets = [{name: "Lassie", type: "dog"}] -}) -let doc2 = automerge.init("bbbb") -doc2 = automerge.merge(doc2, automerge.clone(doc1)) - -doc2 = automerge.change(doc2, d => { - d.pets[0].name = "Beethoven" -}) - -doc1 = automerge.change(doc1, d => { - d.pets[0].name = "Babe" -}) - -const doc3 = automerge.merge(doc1, doc2) - -// Note that here we pass `doc3.pets`, not `doc3` -let conflicts = automerge.getConflicts(doc3.pets[0], "name") - -// The two conflicting values are the keys of the conflicts object -assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) -``` - -## Actor IDs - -By default automerge will generate a random actor ID for you, but most methods -for creating a document allow you to set the actor ID. You can get the actor ID -associated with the document by calling {@link getActorId}. Actor IDs must not -be used in concurrent threads of executiong - all changes by a given actor ID -are expected to be sequential. - -## Listening to patches - -Sometimes you want to respond to changes made to an automerge document. In this -case you can use the {@link PatchCallback} type to receive notifications when -changes have been made. - -## Cloning - -Currently you cannot make mutating changes (i.e. call {@link change}) to a -document which you have two pointers to. For example, in this code: - -```javascript -let doc1 = automerge.init() -let doc2 = automerge.change(doc1, d => (d.key = "value")) -``` - -`doc1` and `doc2` are both pointers to the same state. Any attempt to call -mutating methods on `doc1` will now result in an error like - - Attempting to change an out of date document - -If you encounter this you need to clone the original document, the above sample -would work as: - -```javascript -let doc1 = automerge.init() -let doc2 = automerge.change(automerge.clone(doc1), d => (d.key = "value")) -``` From 9c3d0976c8b9d740184b291b96fedb27fddcb783 Mon Sep 17 00:00:00 2001 From: Alex Currie-Clark Date: Wed, 11 Jan 2023 16:00:03 +0000 Subject: [PATCH 22/72] Add workflow to generate a deno.land and npm release when pushing a new `automerge-wasm` version to #main --- .github/workflows/release.yaml | 96 ++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 .github/workflows/release.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..9bc2a72b --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,96 @@ +name: Release +on: + push: + branches: + - main + +jobs: + check_if_wasm_version_upgraded: + name: Check if WASM version has been upgraded + runs-on: ubuntu-latest + outputs: + wasm_version: ${{ steps.version-updated.outputs.current-package-version }} + wasm_has_updated: ${{ steps.version-updated.outputs.has-updated }} + steps: + - uses: JiPaix/package-json-updated-action@v1.0.3 + id: version-updated + with: + path: rust/automerge-wasm/package.json + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + INPUT_PATH: ${{ github.workspace }}/rust/automerge-wasm/package.json + publish-wasm: + runs-on: ubuntu-latest + needs: + - check_if_wasm_version_upgraded + # We create release only if the version in the package.json has been upgraded + if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated + steps: + - uses: denoland/setup-deno@v1 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + - name: Get rid of local github workflows + run: rm -r .github/workflows + - name: Remove tmp_branch if it exists + run: git push origin :tmp_branch || true + - run: git checkout -b tmp_branch + - name: Install wasm-bindgen-cli + run: cargo install wasm-bindgen-cli wasm-opt + - name: Install wasm32 target + run: rustup target add wasm32-unknown-unknown + - name: run wasm js tests + id: wasm_js_tests + run: ./scripts/ci/wasm_tests + - name: run wasm deno tests + id: wasm_deno_tests + run: ./scripts/ci/deno_tests + - name: Collate deno release files + if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' + run: | + mkdir $GITHUB_WORKSPACE/deno_wasm_dist + cp $GITHUB_WORKSPACE/rust/automerge-wasm/deno/* $GITHUB_WORKSPACE/deno_wasm_dist + cp $GITHUB_WORKSPACE/rust/automerge-wasm/index.d.ts $GITHUB_WORKSPACE/deno_wasm_dist + sed -i '1i /// ' $GITHUB_WORKSPACE/deno_wasm_dist/automerge_wasm.js + - name: Create npm release + if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' + run: | + if [ "$(npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm show . version)" = "$VERSION" ]; then + echo "This version is already published" + exit 0 + fi + EXTRA_ARGS="--access public" + if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then + echo "Is pre-release version" + EXTRA_ARGS="$EXTRA_ARGS --tag next" + fi + if [ "$NODE_AUTH_TOKEN" = "" ]; then + echo "Can't publish on NPM, You need a NPM_TOKEN secret." + false + fi + npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm publish $EXTRA_ARGS + env: + NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} + VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} + - name: Commit wasm deno release files + run: | + git config --global user.name "actions" + git config --global user.email actions@github.com + git add $GITHUB_WORKSPACE/deno_wasm_dist + git commit -am "Add deno release files" + git push origin tmp_branch + - name: Tag wasm release + if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' + uses: softprops/action-gh-release@v1 + with: + name: Automerge Wasm v${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} + tag_name: js/automerge-wasm-${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} + target_commitish: tmp_branch + generate_release_notes: false + draft: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Remove tmp_branch + run: git push origin :tmp_branch + From 93a257896eecfe683541a483a5b4d1122ce63a76 Mon Sep 17 00:00:00 2001 From: Alex Currie-Clark Date: Wed, 11 Jan 2023 20:08:45 +0000 Subject: [PATCH 23/72] Release action: Fix for check that WASM version has been updated before publishing --- .github/workflows/release.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9bc2a72b..cd405b03 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,19 +12,19 @@ jobs: wasm_version: ${{ steps.version-updated.outputs.current-package-version }} wasm_has_updated: ${{ steps.version-updated.outputs.has-updated }} steps: - - uses: JiPaix/package-json-updated-action@v1.0.3 + - uses: JiPaix/package-json-updated-action@v1.0.5 id: version-updated with: path: rust/automerge-wasm/package.json env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - INPUT_PATH: ${{ github.workspace }}/rust/automerge-wasm/package.json publish-wasm: + name: Publish WASM package runs-on: ubuntu-latest needs: - check_if_wasm_version_upgraded # We create release only if the version in the package.json has been upgraded - if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated + if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' steps: - uses: denoland/setup-deno@v1 - uses: actions/checkout@v3 From a0d698dc8e00a4f3b7925c90b7dd35f65277d398 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 09:55:12 +0000 Subject: [PATCH 24/72] Version bump js and wasm js: 2.0.1-alpha.3 wasm: 0.1.20 --- javascript/package.json | 4 ++-- rust/automerge-wasm/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index a424de48..5e2efbda 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1-alpha.2", + "version": "2.0.1-alpha.3", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -44,7 +44,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.19", + "@automerge/automerge-wasm": "0.1.20", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 7c02d820..47dd7f32 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.19", + "version": "0.1.20", "license": "MIT", "files": [ "README.md", From d12bd3bb06b683a39dbe110ac2c3d1cb9df7662f Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 10:27:03 +0000 Subject: [PATCH 25/72] correctly call npm publish in release action --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index cd405b03..282bd8a6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -69,7 +69,7 @@ jobs: echo "Can't publish on NPM, You need a NPM_TOKEN secret." false fi - npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm publish $EXTRA_ARGS + npm publish $GITHUB_WORKSPACE/rust/automerge-wasm $EXTRA_ARGS env: NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} From 3ef60747f458f870801cd1a15108588011db3726 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 10:37:11 +0000 Subject: [PATCH 26/72] Roll back automerge-wasm to test release action The release action we are working conditionally executes based on the version of `automerge-wasm` in the previous commit. We need to trigger it even though the version has not changed so we roll back the version in this commit and the commit immediately following this will bump it again. --- rust/automerge-wasm/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 47dd7f32..7c02d820 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.20", + "version": "0.1.19", "license": "MIT", "files": [ "README.md", From 5c02445bee66e1ce3cc981920902b851fe1bb668 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 10:39:11 +0000 Subject: [PATCH 27/72] Bump automerge-wasm, again In order to re-trigger the release action we are testing we bump the version which was de-bumped in the last commit. --- rust/automerge-wasm/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 7c02d820..47dd7f32 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.19", + "version": "0.1.20", "license": "MIT", "files": [ "README.md", From f073dbf70142cb17ed1369e2046350fbdcdb1302 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 11:04:22 +0000 Subject: [PATCH 28/72] use setup-node prior to attempting to publish in release action --- .github/workflows/release.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 282bd8a6..530f07c7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -24,8 +24,12 @@ jobs: needs: - check_if_wasm_version_upgraded # We create release only if the version in the package.json has been upgraded - if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' + #if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' steps: + - uses: actions/setup-node@v3 + with: + node-version: '16.x' + registry-url: 'https://registry.npmjs.org' - uses: denoland/setup-deno@v1 - uses: actions/checkout@v3 with: From 2d8df125224a251da729efb149dda7f8bb255d26 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 11:35:48 +0000 Subject: [PATCH 29/72] re-enable version check for WASM release --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 530f07c7..15495233 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -24,7 +24,7 @@ jobs: needs: - check_if_wasm_version_upgraded # We create release only if the version in the package.json has been upgraded - #if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' + if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' steps: - uses: actions/setup-node@v3 with: From 22e9915fac632adb213e4675c6169953167d3349 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 12 Jan 2023 12:32:53 +0000 Subject: [PATCH 30/72] automerge-wasm: publish release build in Github Action --- .github/workflows/release.yaml | 6 ++++++ javascript/package.json | 2 +- rust/automerge-wasm/package.json | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 15495233..b3c0aed1 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -50,12 +50,18 @@ jobs: - name: run wasm deno tests id: wasm_deno_tests run: ./scripts/ci/deno_tests + - name: build release + id: build_release + run: | + npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm run release - name: Collate deno release files if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' run: | mkdir $GITHUB_WORKSPACE/deno_wasm_dist cp $GITHUB_WORKSPACE/rust/automerge-wasm/deno/* $GITHUB_WORKSPACE/deno_wasm_dist cp $GITHUB_WORKSPACE/rust/automerge-wasm/index.d.ts $GITHUB_WORKSPACE/deno_wasm_dist + cp $GITHUB_WORKSPACE/rust/automerge-wasm/README.md $GITHUB_WORKSPACE/deno_wasm_dist + cp $GITHUB_WORKSPACE/rust/automerge-wasm/LICENSE $GITHUB_WORKSPACE/deno_wasm_dist sed -i '1i /// ' $GITHUB_WORKSPACE/deno_wasm_dist/automerge_wasm.js - name: Create npm release if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' diff --git a/javascript/package.json b/javascript/package.json index 5e2efbda..53cc6fdc 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -44,7 +44,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.20", + "@automerge/automerge-wasm": "0.1.21", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 47dd7f32..76167a3e 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.20", + "version": "0.1.21", "license": "MIT", "files": [ "README.md", From 681a3f1f3fd6161cb7733e07cdfe46d68b6967fe Mon Sep 17 00:00:00 2001 From: Alex Currie-Clark Date: Thu, 12 Jan 2023 07:04:40 +0000 Subject: [PATCH 31/72] Add github action to deploy deno package --- .github/workflows/release.yaml | 110 +++++++++++++++++++++++- javascript/.denoifyrc.json | 3 + javascript/.gitignore | 1 + javascript/config/cjs.json | 7 +- javascript/config/declonly.json | 7 +- javascript/config/mjs.json | 7 +- javascript/deno-tests/deno.ts | 10 +++ javascript/package.json | 5 +- javascript/scripts/deno-prefixer.mjs | 9 ++ javascript/scripts/denoify-replacer.mjs | 42 +++++++++ javascript/src/constants.ts | 2 +- javascript/src/counter.ts | 2 +- javascript/src/internal_state.ts | 4 +- javascript/src/low_level.ts | 20 ++--- javascript/src/numbers.ts | 2 +- javascript/src/proxies.ts | 9 +- javascript/src/stable.ts | 45 +++++----- javascript/src/text.ts | 8 +- javascript/src/unstable.ts | 12 ++- javascript/src/uuid.deno.ts | 26 ++++++ javascript/tsconfig.json | 2 +- scripts/ci/deno_tests | 13 ++- 22 files changed, 296 insertions(+), 50 deletions(-) create mode 100644 javascript/.denoifyrc.json create mode 100644 javascript/deno-tests/deno.ts create mode 100644 javascript/scripts/deno-prefixer.mjs create mode 100644 javascript/scripts/denoify-replacer.mjs create mode 100644 javascript/src/uuid.deno.ts diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b3c0aed1..762671ff 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -103,4 +103,112 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Remove tmp_branch run: git push origin :tmp_branch - + check_if_js_version_upgraded: + name: Check if JS version has been upgraded + runs-on: ubuntu-latest + outputs: + js_version: ${{ steps.version-updated.outputs.current-package-version }} + js_has_updated: ${{ steps.version-updated.outputs.has-updated }} + steps: + - uses: JiPaix/package-json-updated-action@v1.0.5 + id: version-updated + with: + path: javascript/package.json + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + publish-js: + name: Publish JS package + runs-on: ubuntu-latest + needs: + - check_if_js_version_upgraded + - check_if_wasm_version_upgraded + - publish-wasm + # We create release only if the version in the package.json has been upgraded and after the WASM release + if: | + (always() && ! cancelled()) && + (needs.publish-wasm.result == 'success' || needs.publish-wasm.result == 'skipped') && + needs.check_if_js_version_upgraded.outputs.js_has_updated == 'true' + steps: + - uses: actions/setup-node@v3 + with: + node-version: '16.x' + registry-url: 'https://registry.npmjs.org' + - uses: denoland/setup-deno@v1 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + - name: Get rid of local github workflows + run: rm -r .github/workflows + - name: Remove js_tmp_branch if it exists + run: git push origin :js_tmp_branch || true + - run: git checkout -b js_tmp_branch + - name: check js formatting + run: | + yarn global add prettier + prettier -c javascript/.prettierrc javascript + - name: run js tests + id: js_tests + run: | + cargo install wasm-bindgen-cli wasm-opt + rustup target add wasm32-unknown-unknown + ./scripts/ci/js_tests + - name: build js release + id: build_release + run: | + npm --prefix $GITHUB_WORKSPACE/javascript run build + - name: build js deno release + id: build_deno_release + run: | + VERSION=$WASM_VERSION npm --prefix $GITHUB_WORKSPACE/javascript run deno:build + env: + WASM_VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} + - name: run deno tests + id: deno_tests + run: | + npm --prefix $GITHUB_WORKSPACE/javascript run deno:test + - name: Collate deno release files + if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' + run: | + mkdir $GITHUB_WORKSPACE/deno_js_dist + cp $GITHUB_WORKSPACE/javascript/deno_dist/* $GITHUB_WORKSPACE/deno_js_dist + - name: Create npm release + if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' + run: | + if [ "$(npm --prefix $GITHUB_WORKSPACE/javascript show . version)" = "$VERSION" ]; then + echo "This version is already published" + exit 0 + fi + EXTRA_ARGS="--access public" + if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then + echo "Is pre-release version" + EXTRA_ARGS="$EXTRA_ARGS --tag next" + fi + if [ "$NODE_AUTH_TOKEN" = "" ]; then + echo "Can't publish on NPM, You need a NPM_TOKEN secret." + false + fi + npm publish $GITHUB_WORKSPACE/javascript $EXTRA_ARGS + env: + NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} + VERSION: ${{ needs.check_if_js_version_upgraded.outputs.js_version }} + - name: Commit js deno release files + run: | + git config --global user.name "actions" + git config --global user.email actions@github.com + git add $GITHUB_WORKSPACE/deno_js_dist + git commit -am "Add deno js release files" + git push origin js_tmp_branch + - name: Tag JS release + if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' + uses: softprops/action-gh-release@v1 + with: + name: Automerge v${{ needs.check_if_js_version_upgraded.outputs.js_version }} + tag_name: js/automerge-${{ needs.check_if_js_version_upgraded.outputs.js_version }} + target_commitish: js_tmp_branch + generate_release_notes: false + draft: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Remove js_tmp_branch + run: git push origin :js_tmp_branch diff --git a/javascript/.denoifyrc.json b/javascript/.denoifyrc.json new file mode 100644 index 00000000..9453a31f --- /dev/null +++ b/javascript/.denoifyrc.json @@ -0,0 +1,3 @@ +{ + "replacer": "scripts/denoify-replacer.mjs" +} diff --git a/javascript/.gitignore b/javascript/.gitignore index ab4ec70d..f98d9db2 100644 --- a/javascript/.gitignore +++ b/javascript/.gitignore @@ -3,3 +3,4 @@ dist docs/ .vim +deno_dist/ diff --git a/javascript/config/cjs.json b/javascript/config/cjs.json index fc500311..0b135067 100644 --- a/javascript/config/cjs.json +++ b/javascript/config/cjs.json @@ -1,6 +1,11 @@ { "extends": "../tsconfig.json", - "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], + "exclude": [ + "../dist/**/*", + "../node_modules", + "../test/**/*", + "../src/**/*.deno.ts" + ], "compilerOptions": { "outDir": "../dist/cjs" } diff --git a/javascript/config/declonly.json b/javascript/config/declonly.json index df615930..7c1df687 100644 --- a/javascript/config/declonly.json +++ b/javascript/config/declonly.json @@ -1,6 +1,11 @@ { "extends": "../tsconfig.json", - "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], + "exclude": [ + "../dist/**/*", + "../node_modules", + "../test/**/*", + "../src/**/*.deno.ts" + ], "emitDeclarationOnly": true, "compilerOptions": { "outDir": "../dist" diff --git a/javascript/config/mjs.json b/javascript/config/mjs.json index 2ee7a8b8..ecf3ce36 100644 --- a/javascript/config/mjs.json +++ b/javascript/config/mjs.json @@ -1,6 +1,11 @@ { "extends": "../tsconfig.json", - "exclude": ["../dist/**/*", "../node_modules", "../test/**/*"], + "exclude": [ + "../dist/**/*", + "../node_modules", + "../test/**/*", + "../src/**/*.deno.ts" + ], "compilerOptions": { "target": "es6", "module": "es6", diff --git a/javascript/deno-tests/deno.ts b/javascript/deno-tests/deno.ts new file mode 100644 index 00000000..fc0a4dad --- /dev/null +++ b/javascript/deno-tests/deno.ts @@ -0,0 +1,10 @@ +import * as Automerge from "../deno_dist/index.ts" + +Deno.test("It should create, clone and free", () => { + let doc1 = Automerge.init() + let doc2 = Automerge.clone(doc1) + + // this is only needed if weakrefs are not supported + Automerge.free(doc1) + Automerge.free(doc2) +}) diff --git a/javascript/package.json b/javascript/package.json index 53cc6fdc..39464fac 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1-alpha.3", + "version": "2.0.1-alpha.4", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -25,6 +25,8 @@ "lint": "eslint src", "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc -p config/declonly.json --emitDeclarationOnly", "test": "ts-mocha test/*.ts", + "deno:build": "denoify && node ./scripts/deno-prefixer.mjs", + "deno:test": "deno test ./deno-tests/deno.ts --allow-read --allow-net", "watch-docs": "typedoc src/index.ts --watch --readme none" }, "devDependencies": { @@ -33,6 +35,7 @@ "@types/uuid": "^9.0.0", "@typescript-eslint/eslint-plugin": "^5.46.0", "@typescript-eslint/parser": "^5.46.0", + "denoify": "^1.4.5", "eslint": "^8.29.0", "fast-sha256": "^1.3.0", "mocha": "^10.2.0", diff --git a/javascript/scripts/deno-prefixer.mjs b/javascript/scripts/deno-prefixer.mjs new file mode 100644 index 00000000..28544102 --- /dev/null +++ b/javascript/scripts/deno-prefixer.mjs @@ -0,0 +1,9 @@ +import * as fs from "fs" + +const files = ["./deno_dist/proxies.ts"] +for (const filepath of files) { + const data = fs.readFileSync(filepath) + fs.writeFileSync(filepath, "// @ts-nocheck \n" + data) + + console.log('Prepended "// @ts-nocheck" to ' + filepath) +} diff --git a/javascript/scripts/denoify-replacer.mjs b/javascript/scripts/denoify-replacer.mjs new file mode 100644 index 00000000..fcf4bc45 --- /dev/null +++ b/javascript/scripts/denoify-replacer.mjs @@ -0,0 +1,42 @@ +// @denoify-ignore + +import { makeThisModuleAnExecutableReplacer } from "denoify" +// import { assert } from "tsafe"; +// import * as path from "path"; + +makeThisModuleAnExecutableReplacer( + async ({ parsedImportExportStatement, destDirPath, version }) => { + version = process.env.VERSION || version + + switch (parsedImportExportStatement.parsedArgument.nodeModuleName) { + case "@automerge/automerge-wasm": + { + const moduleRoot = + process.env.MODULE_ROOT || + `https://deno.land/x/automerge_wasm@${version}` + /* + *We expect not to run against statements like + *import(..).then(...) + *or + *export * from "..." + *in our code. + */ + if ( + !parsedImportExportStatement.isAsyncImport && + (parsedImportExportStatement.statementType === "import" || + parsedImportExportStatement.statementType === "export") + ) { + if (parsedImportExportStatement.isTypeOnly) { + return `${parsedImportExportStatement.statementType} type ${parsedImportExportStatement.target} from "${moduleRoot}/index.d.ts";` + } else { + return `${parsedImportExportStatement.statementType} ${parsedImportExportStatement.target} from "${moduleRoot}/automerge_wasm.js";` + } + } + } + break + } + + //The replacer should return undefined when we want to let denoify replace the statement + return undefined + } +) diff --git a/javascript/src/constants.ts b/javascript/src/constants.ts index d3bd8138..7b714772 100644 --- a/javascript/src/constants.ts +++ b/javascript/src/constants.ts @@ -2,7 +2,7 @@ export const STATE = Symbol.for("_am_meta") // symbol used to hide application metadata on automerge objects export const TRACE = Symbol.for("_am_trace") // used for debugging -export const OBJECT_ID = Symbol.for("_am_objectId") // synbol used to hide the object id on automerge objects +export const OBJECT_ID = Symbol.for("_am_objectId") // symbol used to hide the object id on automerge objects export const IS_PROXY = Symbol.for("_am_isProxy") // symbol used to test if the document is a proxy object export const UINT = Symbol.for("_am_uint") diff --git a/javascript/src/counter.ts b/javascript/src/counter.ts index 6b9ad277..873fa157 100644 --- a/javascript/src/counter.ts +++ b/javascript/src/counter.ts @@ -1,4 +1,4 @@ -import { Automerge, ObjID, Prop } from "@automerge/automerge-wasm" +import { Automerge, type ObjID, type Prop } from "@automerge/automerge-wasm" import { COUNTER } from "./constants" /** * The most basic CRDT: an integer value that can be changed only by diff --git a/javascript/src/internal_state.ts b/javascript/src/internal_state.ts index 92ab648e..f3da49b1 100644 --- a/javascript/src/internal_state.ts +++ b/javascript/src/internal_state.ts @@ -1,8 +1,8 @@ -import { ObjID, Heads, Automerge } from "@automerge/automerge-wasm" +import { type ObjID, type Heads, Automerge } from "@automerge/automerge-wasm" import { STATE, OBJECT_ID, TRACE, IS_PROXY } from "./constants" -import { type Doc, PatchCallback } from "./types" +import type { Doc, PatchCallback } from "./types" export interface InternalState { handle: Automerge diff --git a/javascript/src/low_level.ts b/javascript/src/low_level.ts index 94ac63db..63ef5546 100644 --- a/javascript/src/low_level.ts +++ b/javascript/src/low_level.ts @@ -1,20 +1,20 @@ import { + type API, Automerge, - Change, - DecodedChange, - Actor, + type Change, + type DecodedChange, + type Actor, SyncState, - SyncMessage, - JsSyncState, - DecodedSyncMessage, - ChangeToEncode, + type SyncMessage, + type JsSyncState, + type DecodedSyncMessage, + type ChangeToEncode, } from "@automerge/automerge-wasm" -export { ChangeToEncode } from "@automerge/automerge-wasm" -import { API } from "@automerge/automerge-wasm" +export type { ChangeToEncode } from "@automerge/automerge-wasm" export function UseApi(api: API) { for (const k in api) { - ApiHandler[k] = api[k] + ;(ApiHandler as any)[k] = (api as any)[k] } } diff --git a/javascript/src/numbers.ts b/javascript/src/numbers.ts index d52a36c5..7ad95998 100644 --- a/javascript/src/numbers.ts +++ b/javascript/src/numbers.ts @@ -1,4 +1,4 @@ -// Convience classes to allow users to stricly specify the number type they want +// Convenience classes to allow users to strictly specify the number type they want import { INT, UINT, F64 } from "./constants" diff --git a/javascript/src/proxies.ts b/javascript/src/proxies.ts index 3fb3a825..7a99cf80 100644 --- a/javascript/src/proxies.ts +++ b/javascript/src/proxies.ts @@ -1,7 +1,12 @@ import { Text } from "./text" -import { Automerge, Heads, ObjID } from "@automerge/automerge-wasm" -import { Prop } from "@automerge/automerge-wasm" import { + Automerge, + type Heads, + type ObjID, + type Prop, +} from "@automerge/automerge-wasm" + +import type { AutomergeValue, ScalarValue, MapValue, diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts index c52d0a4c..1f38cb27 100644 --- a/javascript/src/stable.ts +++ b/javascript/src/stable.ts @@ -4,47 +4,50 @@ export { /** @hidden */ uuid } from "./uuid" import { rootProxy, listProxy, mapProxy, textProxy } from "./proxies" import { STATE } from "./constants" -import { AutomergeValue, Counter, Doc, PatchCallback } from "./types" -export { - AutomergeValue, +import { + type AutomergeValue, Counter, - Doc, + type Doc, + type PatchCallback, +} from "./types" +export { + type AutomergeValue, + Counter, + type Doc, Int, Uint, Float64, - Patch, - PatchCallback, - ScalarValue, + type Patch, + type PatchCallback, + type ScalarValue, Text, } from "./types" import { Text } from "./text" -import { type API } from "@automerge/automerge-wasm" -export { - PutPatch, - DelPatch, - SplicePatch, - IncPatch, - SyncMessage, -} from "@automerge/automerge-wasm" -import { ApiHandler, ChangeToEncode, UseApi } from "./low_level" - -import { +import type { + API, Actor as ActorId, Prop, ObjID, Change, DecodedChange, Heads, - Automerge, MaterializeValue, -} from "@automerge/automerge-wasm" -import { JsSyncState as SyncState, SyncMessage, DecodedSyncMessage, } from "@automerge/automerge-wasm" +export type { + PutPatch, + DelPatch, + SplicePatch, + IncPatch, + SyncMessage, +} from "@automerge/automerge-wasm" +import { ApiHandler, type ChangeToEncode, UseApi } from "./low_level" + +import { Automerge } from "@automerge/automerge-wasm" import { RawString } from "./raw_string" diff --git a/javascript/src/text.ts b/javascript/src/text.ts index bb0a868d..f87af891 100644 --- a/javascript/src/text.ts +++ b/javascript/src/text.ts @@ -1,10 +1,12 @@ -import { Value } from "@automerge/automerge-wasm" +import type { Value } from "@automerge/automerge-wasm" import { TEXT, STATE } from "./constants" +import type { InternalState } from "./internal_state" export class Text { elems: Array str: string | undefined - spans: Array | undefined + spans: Array | undefined; + [STATE]?: InternalState constructor(text?: string | string[] | Value[]) { if (typeof text === "string") { @@ -208,7 +210,7 @@ export class Text { new Text(this.elems.slice(start, end)) } - some(test: (Value) => boolean): boolean { + some(test: (arg: Value) => boolean): boolean { return this.elems.some(test) } diff --git a/javascript/src/unstable.ts b/javascript/src/unstable.ts index 3ee18dbc..b448d955 100644 --- a/javascript/src/unstable.ts +++ b/javascript/src/unstable.ts @@ -37,7 +37,15 @@ */ import { Counter } from "./types" -export { Counter, Doc, Int, Uint, Float64, Patch, PatchCallback } from "./types" +export { + Counter, + type Doc, + Int, + Uint, + Float64, + type Patch, + type PatchCallback, +} from "./types" import type { PatchCallback } from "./stable" @@ -59,7 +67,7 @@ export type ScalarValue = export type Conflicts = { [key: string]: AutomergeValue } -export { +export type { PutPatch, DelPatch, SplicePatch, diff --git a/javascript/src/uuid.deno.ts b/javascript/src/uuid.deno.ts new file mode 100644 index 00000000..04c9b93d --- /dev/null +++ b/javascript/src/uuid.deno.ts @@ -0,0 +1,26 @@ +import * as v4 from "https://deno.land/x/uuid@v0.1.2/mod.ts" + +// this file is a deno only port of the uuid module + +function defaultFactory() { + return v4.uuid().replace(/-/g, "") +} + +let factory = defaultFactory + +interface UUIDFactory extends Function { + setFactory(f: typeof factory): void + reset(): void +} + +export const uuid: UUIDFactory = () => { + return factory() +} + +uuid.setFactory = newFactory => { + factory = newFactory +} + +uuid.reset = () => { + factory = defaultFactory +} diff --git a/javascript/tsconfig.json b/javascript/tsconfig.json index c6684ca0..628aea8e 100644 --- a/javascript/tsconfig.json +++ b/javascript/tsconfig.json @@ -15,5 +15,5 @@ "outDir": "./dist" }, "include": ["src/**/*", "test/**/*"], - "exclude": ["./dist/**/*", "./node_modules"] + "exclude": ["./dist/**/*", "./node_modules", "./src/**/*.deno.ts"] } diff --git a/scripts/ci/deno_tests b/scripts/ci/deno_tests index bc655468..bdec9b95 100755 --- a/scripts/ci/deno_tests +++ b/scripts/ci/deno_tests @@ -1,6 +1,17 @@ THIS_SCRIPT=$(dirname "$0"); WASM_PROJECT=$THIS_SCRIPT/../../rust/automerge-wasm; +JS_PROJECT=$THIS_SCRIPT/../../javascript; +echo "Running Wasm Deno tests"; yarn --cwd $WASM_PROJECT install; yarn --cwd $WASM_PROJECT build; -deno test $WASM_PROJECT/deno-tests/deno.ts --allow-read +deno test $WASM_PROJECT/deno-tests/deno.ts --allow-read; + +cp $WASM_PROJECT/index.d.ts $WASM_PROJECT/deno; +sed -i '1i /// ' $WASM_PROJECT/deno/automerge_wasm.js; + +echo "Running JS Deno tests"; +yarn --cwd $JS_PROJECT install; +ROOT_MODULE=$WASM_PROJECT/deno yarn --cwd $JS_PROJECT deno:build; +yarn --cwd $JS_PROJECT deno:test; + From d8df1707d903497417a74d6febf7675b8f8695c4 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Sat, 14 Jan 2023 11:06:58 +0000 Subject: [PATCH 32/72] Update rust toolchain for "linux" step --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a5d42010..c2d469d5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -136,7 +136,7 @@ jobs: strategy: matrix: toolchain: - - 1.60.0 + - 1.66.0 - nightly continue-on-error: ${{ matrix.toolchain == 'nightly' }} steps: From 964ae2bd818bd3176092aa35083bfeaee4eeca84 Mon Sep 17 00:00:00 2001 From: alexjg Date: Sat, 14 Jan 2023 11:27:48 +0000 Subject: [PATCH 33/72] Fix SeekOpWithPatch on optrees with only internal optrees (#496) In #480 we fixed an issue where `SeekOp` calculated an incorrect insertion index on optrees where the only visible ops were on internal nodes. We forgot to port this fix to `SeekOpWithPatch`, which has almost the same logic just with additional work done in order to notify an `OpObserver` of changes. Add a test and fix to `SeekOpWithPatch` --- rust/automerge/src/query/seek_op.rs | 75 +++++++++++-------- .../automerge/src/query/seek_op_with_patch.rs | 34 ++++++++- 2 files changed, 76 insertions(+), 33 deletions(-) diff --git a/rust/automerge/src/query/seek_op.rs b/rust/automerge/src/query/seek_op.rs index 4d955f96..22d1f58d 100644 --- a/rust/automerge/src/query/seek_op.rs +++ b/rust/automerge/src/query/seek_op.rs @@ -161,7 +161,7 @@ impl<'a> TreeQuery<'a> for SeekOp<'a> { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::{ op_set::OpSet, op_tree::B, @@ -170,36 +170,43 @@ mod tests { ActorId, ScalarValue, }; - #[test] - fn seek_on_page_boundary() { - // Create an optree in which the only visible ops are on the boundaries of the nodes, - // i.e. the visible elements are in the internal nodes. Like so - // - // .----------------------. - // | id | key | succ | - // | B | "a" | | - // | 2B | "b" | | - // '----------------------' - // / | \ - // ;------------------------. | `------------------------------------. - // | id | op | succ | | | id | op | succ | - // | 0 |set "a" | 1 | | | 2B + 1 |set "c" | 2B + 2 | - // | 1 |set "a" | 2 | | | 2B + 2 |set "c" | 2B + 3 | - // | 2 |set "a" | 3 | | ... - // ... | | 3B |set "c" | | - // | B - 1 |set "a" | B | | '------------------------------------' - // '--------'--------'------' | - // | - // .-----------------------------. - // | id | key | succ | - // | B + 1 | "b" | B + 2 | - // | B + 2 | "b" | B + 3 | - // .... - // | B + (B - 1 | "b" | 2B | - // '-----------------------------' - // - // The important point here is that the leaf nodes contain no visible ops for keys "a" and - // "b". + /// Create an optree in which the only visible ops are on the boundaries of the nodes, + /// i.e. the visible elements are in the internal nodes. Like so + /// + /// ```notrust + /// + /// .----------------------. + /// | id | key | succ | + /// | B | "a" | | + /// | 2B | "b" | | + /// '----------------------' + /// / | \ + /// ;------------------------. | `------------------------------------. + /// | id | op | succ | | | id | op | succ | + /// | 0 |set "a" | 1 | | | 2B + 1 |set "c" | 2B + 2 | + /// | 1 |set "a" | 2 | | | 2B + 2 |set "c" | 2B + 3 | + /// | 2 |set "a" | 3 | | ... + /// ... | | 3B |set "c" | | + /// | B - 1 |set "a" | B | | '------------------------------------' + /// '--------'--------'------' | + /// | + /// .-----------------------------. + /// | id | key | succ | + /// | B + 1 | "b" | B + 2 | + /// | B + 2 | "b" | B + 3 | + /// .... + /// | B + (B - 1 | "b" | 2B | + /// '-----------------------------' + /// ``` + /// + /// The important point here is that the leaf nodes contain no visible ops for keys "a" and + /// "b". + /// + /// # Returns + /// + /// The opset in question and an op which should be inserted at the next position after the + /// internally visible ops. + pub(crate) fn optree_with_only_internally_visible_ops() -> (OpSet, Op) { let mut set = OpSet::new(); let actor = set.m.actors.cache(ActorId::random()); let a = set.m.props.cache("a".to_string()); @@ -255,6 +262,12 @@ mod tests { .sorted_opids(std::iter::once(OpId::new(B as u64 - 1, actor))), insert: false, }; + (set, new_op) + } + + #[test] + fn seek_on_page_boundary() { + let (set, new_op) = optree_with_only_internally_visible_ops(); let q = SeekOp::new(&new_op); let q = set.search(&ObjId::root(), q); diff --git a/rust/automerge/src/query/seek_op_with_patch.rs b/rust/automerge/src/query/seek_op_with_patch.rs index 0cc48b37..7cacb032 100644 --- a/rust/automerge/src/query/seek_op_with_patch.rs +++ b/rust/automerge/src/query/seek_op_with_patch.rs @@ -136,8 +136,18 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { if self.pos + child.len() >= start { // skip empty nodes if child.index.visible_len(self.encoding) == 0 { - self.pos += child.len(); - QueryResult::Next + let child_contains_key = + child.elements.iter().any(|e| ops[*e].key == self.op.key); + if !child_contains_key { + // If we are in a node which has no visible ops, but none of the + // elements of the node match the key of the op, then we must have + // finished processing and so we can just return. + // See https://github.com/automerge/automerge-rs/pull/480 + QueryResult::Finish + } else { + self.pos += child.len(); + QueryResult::Next + } } else { QueryResult::Descend } @@ -291,3 +301,23 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { } } } + +#[cfg(test)] +mod tests { + use super::{super::seek_op::tests::optree_with_only_internally_visible_ops, SeekOpWithPatch}; + use crate::{ + op_tree::B, + types::{ListEncoding, ObjId}, + }; + + #[test] + fn test_insert_on_internal_only_nodes() { + let (set, new_op) = optree_with_only_internally_visible_ops(); + + let q = SeekOpWithPatch::new(&new_op, ListEncoding::List); + let q = set.search(&ObjId::root(), q); + + // we've inserted `B - 1` elements for "a", so the index should be `B` + assert_eq!(q.pos, B); + } +} From 5629a7bec4ccf5be72bd38776c26167ba54bea4c Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 19 Jan 2023 15:38:27 +0000 Subject: [PATCH 34/72] Various CI script fixes (#501) Some of the scripts in scripts/ci were not reliable detecting the path they were operating in. Additionally the deno_tests script was not correctly picking up the ROOT_MODULE environment variable. Add more robust path handling and fix the deno_tests script. --- javascript/.prettierignore | 1 + javascript/scripts/denoify-replacer.mjs | 2 +- scripts/ci/cmake-build | 3 ++- scripts/ci/deno_tests | 20 ++++++++++++-------- scripts/ci/fmt_js | 4 +++- scripts/ci/js_tests | 6 ++++-- scripts/ci/lint | 5 ++++- scripts/ci/rust-docs | 4 +++- scripts/ci/wasm_tests | 3 ++- 9 files changed, 32 insertions(+), 16 deletions(-) diff --git a/javascript/.prettierignore b/javascript/.prettierignore index c2dcd4bb..6ab2f796 100644 --- a/javascript/.prettierignore +++ b/javascript/.prettierignore @@ -1,3 +1,4 @@ e2e/verdacciodb dist docs +deno_dist diff --git a/javascript/scripts/denoify-replacer.mjs b/javascript/scripts/denoify-replacer.mjs index fcf4bc45..e183ba0d 100644 --- a/javascript/scripts/denoify-replacer.mjs +++ b/javascript/scripts/denoify-replacer.mjs @@ -12,7 +12,7 @@ makeThisModuleAnExecutableReplacer( case "@automerge/automerge-wasm": { const moduleRoot = - process.env.MODULE_ROOT || + process.env.ROOT_MODULE || `https://deno.land/x/automerge_wasm@${version}` /* *We expect not to run against statements like diff --git a/scripts/ci/cmake-build b/scripts/ci/cmake-build index 3924dc4a..f6f9f9b1 100755 --- a/scripts/ci/cmake-build +++ b/scripts/ci/cmake-build @@ -1,7 +1,8 @@ #!/usr/bin/env bash set -eoux pipefail -THIS_SCRIPT=$(dirname "$0"); +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" # \note CMake's default build types are "Debug", "MinSizeRel", "Release" and # "RelWithDebInfo" but custom ones can also be defined so we pass it verbatim. BUILD_TYPE=$1; diff --git a/scripts/ci/deno_tests b/scripts/ci/deno_tests index bdec9b95..9f297557 100755 --- a/scripts/ci/deno_tests +++ b/scripts/ci/deno_tests @@ -1,17 +1,21 @@ -THIS_SCRIPT=$(dirname "$0"); +#!/usr/bin/env bash +set -eou pipefail +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" WASM_PROJECT=$THIS_SCRIPT/../../rust/automerge-wasm; JS_PROJECT=$THIS_SCRIPT/../../javascript; +E2E_PROJECT=$THIS_SCRIPT/../../javascript/e2e; -echo "Running Wasm Deno tests"; -yarn --cwd $WASM_PROJECT install; -yarn --cwd $WASM_PROJECT build; -deno test $WASM_PROJECT/deno-tests/deno.ts --allow-read; - -cp $WASM_PROJECT/index.d.ts $WASM_PROJECT/deno; +echo "building wasm and js" +yarn --cwd $E2E_PROJECT install; +yarn --cwd $E2E_PROJECT e2e buildjs; +cp $WASM_PROJECT/index.d.ts $WASM_PROJECT/deno/; sed -i '1i /// ' $WASM_PROJECT/deno/automerge_wasm.js; +echo "Running Wasm Deno tests"; +deno test $WASM_PROJECT/deno-tests/deno.ts --allow-read; + echo "Running JS Deno tests"; -yarn --cwd $JS_PROJECT install; ROOT_MODULE=$WASM_PROJECT/deno yarn --cwd $JS_PROJECT deno:build; yarn --cwd $JS_PROJECT deno:test; diff --git a/scripts/ci/fmt_js b/scripts/ci/fmt_js index acaf1e08..8f387b6a 100755 --- a/scripts/ci/fmt_js +++ b/scripts/ci/fmt_js @@ -1,5 +1,7 @@ #!/usr/bin/env bash set -eoux pipefail -yarn --cwd javascript prettier -c . +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +yarn --cwd $THIS_SCRIPT/../../javascript prettier -c . diff --git a/scripts/ci/js_tests b/scripts/ci/js_tests index b05edd1c..68205a33 100755 --- a/scripts/ci/js_tests +++ b/scripts/ci/js_tests @@ -1,6 +1,8 @@ -set -e +#!/usr/bin/env bash +set -eoux pipefail -THIS_SCRIPT=$(dirname "$0"); +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" WASM_PROJECT=$THIS_SCRIPT/../../rust/automerge-wasm; JS_PROJECT=$THIS_SCRIPT/../../javascript; E2E_PROJECT=$THIS_SCRIPT/../../javascript/e2e; diff --git a/scripts/ci/lint b/scripts/ci/lint index 15a0228d..87a16765 100755 --- a/scripts/ci/lint +++ b/scripts/ci/lint @@ -1,7 +1,10 @@ #!/usr/bin/env bash set -eoux pipefail -cd rust +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" + +cd $THIS_SCRIPT/../../rust # Force clippy to consider all local sources # https://github.com/rust-lang/rust-clippy/issues/4612 find . -name "*.rs" -not -path "./target/*" -exec touch "{}" + diff --git a/scripts/ci/rust-docs b/scripts/ci/rust-docs index bbbc4fe1..4be0ed9a 100755 --- a/scripts/ci/rust-docs +++ b/scripts/ci/rust-docs @@ -1,6 +1,8 @@ #!/usr/bin/env bash set -eoux pipefail -cd rust +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +cd $THIS_SCRIPT/../../rust RUSTDOCFLAGS="-D rustdoc::broken-intra-doc-links -D warnings" \ cargo doc --no-deps --workspace --document-private-items diff --git a/scripts/ci/wasm_tests b/scripts/ci/wasm_tests index 2f273d99..fac344d8 100755 --- a/scripts/ci/wasm_tests +++ b/scripts/ci/wasm_tests @@ -1,4 +1,5 @@ -THIS_SCRIPT=$(dirname "$0"); +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" WASM_PROJECT=$THIS_SCRIPT/../../rust/automerge-wasm; yarn --cwd $WASM_PROJECT install; From d8baa116e7bc6f1f25e56bbbd75fc2ffc7140170 Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 19 Jan 2023 17:02:47 +0000 Subject: [PATCH 35/72] automerge-rs: Add `ExId::to_bytes` (#491) The `ExId` structure has some internal details which make lookups for object IDs which were produced by the document doing the looking up faster. These internal details are quite specific to the implementation so we don't want to expose them as a public API. On the other hand, we need to be able to serialize `ExId`s so that FFI clients can hold on to them without referencing memory which is owned by the document (ahem, looking at you Java). Introduce `ExId::to_bytes` and `TryFrom<&[u8]> ExId` implementing a canonical serialization which includes a version tag, giveing us compatibility options if we decide to change the implementation. --- rust/automerge/src/exid.rs | 135 +++++++++++++++++++++++++++++++++++++ rust/automerge/src/lib.rs | 2 +- 2 files changed, 136 insertions(+), 1 deletion(-) diff --git a/rust/automerge/src/exid.rs b/rust/automerge/src/exid.rs index 2c174e28..3ff8fbb5 100644 --- a/rust/automerge/src/exid.rs +++ b/rust/automerge/src/exid.rs @@ -1,3 +1,4 @@ +use crate::storage::parse; use crate::ActorId; use serde::Serialize; use serde::Serializer; @@ -11,6 +12,102 @@ pub enum ExId { Id(u64, ActorId, usize), } +const SERIALIZATION_VERSION_TAG: u8 = 0; +const TYPE_ROOT: u8 = 0; +const TYPE_ID: u8 = 1; + +impl ExId { + /// Serialize the ExId to a byte array. + pub fn to_bytes(&self) -> Vec { + // The serialized format is + // + // .--------------------------------. + // | version | type | data | + // +--------------------------------+ + // | 4 bytes |4 bytes | variable | + // '--------------------------------' + // + // Version is currently always `0` + // + // `data` depends on the type + // + // * If the type is `TYPE_ROOT` (0) then there is no data + // * If the type is `TYPE_ID` (1) then the data is + // + // .-------------------------------------------------------. + // | actor ID len | actor ID bytes | counter | actor index | + // '-------------------------------------------------------' + // + // Where the actor ID len, counter, and actor index are all uLEB encoded + // integers. The actor ID bytes is just an array of bytes. + // + match self { + ExId::Root => { + let val: u8 = SERIALIZATION_VERSION_TAG | (TYPE_ROOT << 4); + vec![val] + } + ExId::Id(id, actor, counter) => { + let actor_bytes = actor.to_bytes(); + let mut bytes = Vec::with_capacity(actor_bytes.len() + 4 + 4); + let tag = SERIALIZATION_VERSION_TAG | (TYPE_ID << 4); + bytes.push(tag); + leb128::write::unsigned(&mut bytes, actor_bytes.len() as u64).unwrap(); + bytes.extend_from_slice(actor_bytes); + leb128::write::unsigned(&mut bytes, *counter as u64).unwrap(); + leb128::write::unsigned(&mut bytes, *id).unwrap(); + bytes + } + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ObjIdFromBytesError { + #[error("no version tag")] + NoVersion, + #[error("invalid version tag")] + InvalidVersion(u8), + #[error("invalid type tag")] + InvalidType(u8), + #[error("invalid Actor ID length: {0}")] + ParseActorLen(String), + #[error("Not enough bytes in actor ID")] + ParseActor, + #[error("invalid counter: {0}")] + ParseCounter(String), + #[error("invalid actor index hint: {0}")] + ParseActorIdxHint(String), +} + +impl<'a> TryFrom<&'a [u8]> for ExId { + type Error = ObjIdFromBytesError; + + fn try_from(value: &'a [u8]) -> Result { + let i = parse::Input::new(value); + let (i, tag) = parse::take1::<()>(i).map_err(|_| ObjIdFromBytesError::NoVersion)?; + let version = tag & 0b1111; + if version != SERIALIZATION_VERSION_TAG { + return Err(ObjIdFromBytesError::InvalidVersion(version)); + } + let type_tag = tag >> 4; + match type_tag { + TYPE_ROOT => Ok(ExId::Root), + TYPE_ID => { + let (i, len) = parse::leb128_u64::(i) + .map_err(|e| ObjIdFromBytesError::ParseActorLen(e.to_string()))?; + let (i, actor) = parse::take_n::<()>(len as usize, i) + .map_err(|_| ObjIdFromBytesError::ParseActor)?; + let (i, counter) = parse::leb128_u64::(i) + .map_err(|e| ObjIdFromBytesError::ParseCounter(e.to_string()))?; + let (_i, actor_idx_hint) = parse::leb128_u64::(i) + .map_err(|e| ObjIdFromBytesError::ParseActorIdxHint(e.to_string()))?; + Ok(Self::Id(actor_idx_hint, actor.into(), counter as usize)) + } + other => Err(ObjIdFromBytesError::InvalidType(other)), + } + } +} + impl PartialEq for ExId { fn eq(&self, other: &Self) -> bool { match (self, other) { @@ -80,3 +177,41 @@ impl AsRef for ExId { self } } + +#[cfg(test)] +mod tests { + use super::ExId; + use proptest::prelude::*; + + use crate::ActorId; + + fn gen_actorid() -> impl Strategy { + proptest::collection::vec(any::(), 0..100).prop_map(ActorId::from) + } + + prop_compose! { + fn gen_non_root_objid()(actor in gen_actorid(), counter in any::(), idx in any::()) -> ExId { + ExId::Id(idx as u64, actor, counter) + } + } + + fn gen_obji() -> impl Strategy { + prop_oneof![Just(ExId::Root), gen_non_root_objid()] + } + + proptest! { + #[test] + fn objid_roundtrip(objid in gen_obji()) { + let bytes = objid.to_bytes(); + let objid2 = ExId::try_from(&bytes[..]).unwrap(); + assert_eq!(objid, objid2); + } + } + + #[test] + fn test_root_roundtrip() { + let bytes = ExId::Root.to_bytes(); + let objid2 = ExId::try_from(&bytes[..]).unwrap(); + assert_eq!(ExId::Root, objid2); + } +} diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index 97ff0650..58f5b263 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -93,7 +93,7 @@ pub use change::{Change, LoadError as LoadChangeError}; pub use error::AutomergeError; pub use error::InvalidActorId; pub use error::InvalidChangeHashSlice; -pub use exid::ExId as ObjId; +pub use exid::{ExId as ObjId, ObjIdFromBytesError}; pub use keys::Keys; pub use keys_at::KeysAt; pub use legacy::Change as ExpandedChange; From 9b44a75f69e0b6bcca7a8054395ff887bda92b7e Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 19 Jan 2023 21:11:36 +0000 Subject: [PATCH 36/72] fix: don't panic when generating parents for hidden objects (#500) Problem: the `OpSet::export_key` method uses `query::ElemIdPos` to determine the index of sequence elements when exporting a key. This query returned `None` for invisible elements. The `Parents` iterator which is used to generate paths to objects in patches in `automerge-wasm` used `export_key`. The end result is that applying a remote change which deletes an object in a sequence would panic as it tries to generate a path for an invisible object. Solution: modify `query::ElemIdPos` to include invisible objects. This does mean that the path generated will refer to the previous visible object in the sequence as it's index, but this is probably fine as for an invisible object the path shouldn't be used anyway. While we're here also change the return value of `OpSet::export_key` to an `Option` and make `query::Index::ops` private as obeisance to the Lady of the Golden Blade. --- rust/automerge/src/op_set.rs | 16 +++++---- rust/automerge/src/parents.rs | 44 ++++++++++++++++++++++++- rust/automerge/src/query.rs | 7 +++- rust/automerge/src/query/elem_id_pos.rs | 35 ++++++++++++++------ 4 files changed, 83 insertions(+), 19 deletions(-) diff --git a/rust/automerge/src/op_set.rs b/rust/automerge/src/op_set.rs index 1f5a4486..5b50d2b0 100644 --- a/rust/automerge/src/op_set.rs +++ b/rust/automerge/src/op_set.rs @@ -89,15 +89,17 @@ impl OpSetInternal { }) } - pub(crate) fn export_key(&self, obj: ObjId, key: Key, encoding: ListEncoding) -> Prop { + pub(crate) fn export_key(&self, obj: ObjId, key: Key, encoding: ListEncoding) -> Option { match key { - Key::Map(m) => Prop::Map(self.m.props.get(m).into()), + Key::Map(m) => self.m.props.safe_get(m).map(|s| Prop::Map(s.to_string())), Key::Seq(opid) => { - let i = self - .search(&obj, query::ElemIdPos::new(opid, encoding)) - .index() - .unwrap(); - Prop::Seq(i) + if opid.is_head() { + Some(Prop::Seq(0)) + } else { + self.search(&obj, query::ElemIdPos::new(opid, encoding)) + .index() + .map(Prop::Seq) + } } } } diff --git a/rust/automerge/src/parents.rs b/rust/automerge/src/parents.rs index 1d01ffbf..76c4bba1 100644 --- a/rust/automerge/src/parents.rs +++ b/rust/automerge/src/parents.rs @@ -47,7 +47,10 @@ impl<'a> Iterator for Parents<'a> { self.obj = obj; Some(Parent { obj: self.ops.id_to_exid(self.obj.0), - prop: self.ops.export_key(self.obj, key, ListEncoding::List), + prop: self + .ops + .export_key(self.obj, key, ListEncoding::List) + .unwrap(), visible, }) } else { @@ -62,3 +65,42 @@ pub struct Parent { pub prop: Prop, pub visible: bool, } + +#[cfg(test)] +mod tests { + use super::Parent; + use crate::{transaction::Transactable, Prop}; + + #[test] + fn test_invisible_parents() { + // Create a document with a list of objects, then delete one of the objects, then generate + // a path to the deleted object. + + let mut doc = crate::AutoCommit::new(); + let list = doc + .put_object(crate::ROOT, "list", crate::ObjType::List) + .unwrap(); + let obj1 = doc.insert_object(&list, 0, crate::ObjType::Map).unwrap(); + let _obj2 = doc.insert_object(&list, 1, crate::ObjType::Map).unwrap(); + doc.put(&obj1, "key", "value").unwrap(); + doc.delete(&list, 0).unwrap(); + + let mut parents = doc.parents(&obj1).unwrap().collect::>(); + parents.reverse(); + assert_eq!( + parents, + vec![ + Parent { + obj: crate::ROOT, + prop: Prop::Map("list".to_string()), + visible: true, + }, + Parent { + obj: list, + prop: Prop::Seq(0), + visible: false, + }, + ] + ); + } +} diff --git a/rust/automerge/src/query.rs b/rust/automerge/src/query.rs index 9707da33..721756c1 100644 --- a/rust/automerge/src/query.rs +++ b/rust/automerge/src/query.rs @@ -114,7 +114,7 @@ pub(crate) struct Index { pub(crate) visible16: usize, pub(crate) visible8: usize, /// Set of opids found in this node and below. - pub(crate) ops: HashSet, + ops: HashSet, } impl Index { @@ -140,6 +140,11 @@ impl Index { self.visible.contains_key(seen) } + /// Whether `opid` is in this node or any below it + pub(crate) fn has_op(&self, opid: &OpId) -> bool { + self.ops.contains(opid) + } + pub(crate) fn change_vis<'a>( &mut self, change_vis: ChangeVisibility<'a>, diff --git a/rust/automerge/src/query/elem_id_pos.rs b/rust/automerge/src/query/elem_id_pos.rs index 8eecd7e0..cb559216 100644 --- a/rust/automerge/src/query/elem_id_pos.rs +++ b/rust/automerge/src/query/elem_id_pos.rs @@ -1,14 +1,14 @@ use crate::{ op_tree::OpTreeNode, - types::{ElemId, Key, ListEncoding, Op}, + types::{ElemId, ListEncoding, Op, OpId}, }; use super::{QueryResult, TreeQuery}; -/// Lookup the index in the list that this elemid occupies. +/// Lookup the index in the list that this elemid occupies, includes hidden elements. #[derive(Clone, Debug)] pub(crate) struct ElemIdPos { - elemid: ElemId, + elem_opid: OpId, pos: usize, found: bool, encoding: ListEncoding, @@ -16,11 +16,20 @@ pub(crate) struct ElemIdPos { impl ElemIdPos { pub(crate) fn new(elemid: ElemId, encoding: ListEncoding) -> Self { - Self { - elemid, - pos: 0, - found: false, - encoding, + if elemid.is_head() { + Self { + elem_opid: elemid.0, + pos: 0, + found: true, + encoding, + } + } else { + Self { + elem_opid: elemid.0, + pos: 0, + found: false, + encoding, + } } } @@ -35,8 +44,11 @@ impl ElemIdPos { impl<'a> TreeQuery<'a> for ElemIdPos { fn query_node(&mut self, child: &OpTreeNode, _ops: &[Op]) -> QueryResult { + if self.found { + return QueryResult::Finish; + } // if index has our element then we can continue - if child.index.has_visible(&Key::Seq(self.elemid)) { + if child.index.has_op(&self.elem_opid) { // element is in this node somewhere QueryResult::Descend } else { @@ -47,7 +59,10 @@ impl<'a> TreeQuery<'a> for ElemIdPos { } fn query_element(&mut self, element: &crate::types::Op) -> QueryResult { - if element.elemid() == Some(self.elemid) { + if self.found { + return QueryResult::Finish; + } + if element.elemid() == Some(ElemId(self.elem_opid)) { // this is it self.found = true; return QueryResult::Finish; From 6b0ee6da2e7e0dfe9341c6fa4d3cc8c4b9b87549 Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 19 Jan 2023 22:15:06 +0000 Subject: [PATCH 37/72] Bump js to 2.0.1-alpha.5 and automerge-wasm to 0.1.22 (#497) --- javascript/package.json | 4 ++-- rust/automerge-wasm/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index 39464fac..caeeb647 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1-alpha.4", + "version": "2.0.1-alpha.5", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -47,7 +47,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.21", + "@automerge/automerge-wasm": "0.1.22", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 76167a3e..0f133468 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.21", + "version": "0.1.22", "license": "MIT", "files": [ "README.md", From 98e755106f5d44e6cff2897921138ac3f95de3d0 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Mon, 23 Jan 2023 04:01:05 -0700 Subject: [PATCH 38/72] Fix and simplify lebsize calculations (#503) Before this change numbits_i64() was incorrect for every value of the form 0 - 2^x. This only manifested in a visible error if x%7 == 6 (so for -64, -8192, etc.) at which point `lebsize` would return a value one too large, causing a panic in commit(). --- .../automerge/src/columnar/encoding/leb128.rs | 47 +++++++++++-------- rust/automerge/tests/test.rs | 6 +++ 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/rust/automerge/src/columnar/encoding/leb128.rs b/rust/automerge/src/columnar/encoding/leb128.rs index 036cfba8..cbb82c31 100644 --- a/rust/automerge/src/columnar/encoding/leb128.rs +++ b/rust/automerge/src/columnar/encoding/leb128.rs @@ -1,29 +1,22 @@ /// The number of bytes required to encode `val` as a LEB128 integer -pub(crate) fn lebsize(val: i64) -> u64 { - let numbits = numbits_i64(val); - (numbits as f64 / 7.0).floor() as u64 + 1 +pub(crate) fn lebsize(mut val: i64) -> u64 { + if val < 0 { + val = !val + } + // 1 extra for the sign bit + leb_bytes(1 + 64 - val.leading_zeros() as u64) } /// The number of bytes required to encode `val` as a uLEB128 integer pub(crate) fn ulebsize(val: u64) -> u64 { - if val <= 1 { + if val == 0 { return 1; } - let numbits = numbits_u64(val); - let mut numblocks = (numbits as f64 / 7.0).floor() as u64; - if numbits % 7 != 0 { - numblocks += 1; - } - numblocks + leb_bytes(64 - val.leading_zeros() as u64) } -fn numbits_i64(val: i64) -> u64 { - // Is this right? This feels like it's not right - (std::mem::size_of::() as u32 * 8 - val.abs().leading_zeros()) as u64 -} - -fn numbits_u64(val: u64) -> u64 { - (std::mem::size_of::() as u32 * 8 - val.leading_zeros()) as u64 +fn leb_bytes(bits: u64) -> u64 { + (bits + 6) / 7 } #[cfg(test)] @@ -51,7 +44,7 @@ mod tests { #[test] fn ulebsize_examples() { - let scenarios = vec![0, 1, 127, 128, 129, 169]; + let scenarios = vec![0, 1, 127, 128, 129, 169, u64::MAX]; for val in scenarios { let mut out = Vec::new(); leb128::write::unsigned(&mut out, val).unwrap(); @@ -62,7 +55,23 @@ mod tests { #[test] fn lebsize_examples() { - let scenarios = vec![0, 1, -1, 127, 128, -127, -128, -2097152, 169]; + let scenarios = vec![ + 0, + 1, + -1, + 63, + 64, + -64, + -65, + 127, + 128, + -127, + -128, + -2097152, + 169, + i64::MIN, + i64::MAX, + ]; for val in scenarios { let mut out = Vec::new(); leb128::write::signed(&mut out, val).unwrap(); diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index 6ab797f0..4648cf87 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -1412,6 +1412,12 @@ fn invalid_deflate_stream() { assert!(Automerge::load(&bytes).is_err()); } +#[test] +fn negative_64() { + let mut doc = Automerge::new(); + assert!(doc.transact(|d| { d.put(ROOT, "a", -64_i64) }).is_ok()) +} + #[test] fn bad_change_on_optree_node_boundary() { let mut doc = Automerge::new(); From 1f7b109dcdb735366c5eff8ff0736738e740fee4 Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Mon, 23 Jan 2023 17:01:41 +0000 Subject: [PATCH 39/72] Add From for ScalarValue::Str (#506) --- rust/automerge/src/value.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rust/automerge/src/value.rs b/rust/automerge/src/value.rs index b3142bdf..d8429f4e 100644 --- a/rust/automerge/src/value.rs +++ b/rust/automerge/src/value.rs @@ -266,6 +266,12 @@ impl<'a> From for Value<'a> { } } +impl<'a> From for Value<'a> { + fn from(s: SmolStr) -> Self { + Value::Scalar(Cow::Owned(ScalarValue::Str(s))) + } +} + impl<'a> From for Value<'a> { fn from(c: char) -> Self { Value::Scalar(Cow::Owned(ScalarValue::Str(SmolStr::new(c.to_string())))) From 78adbc4ff94b8ff62df0e02de1cd4fb519c8e9a9 Mon Sep 17 00:00:00 2001 From: Alex Currie-Clark <1306728+acurrieclark@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:02:02 +0000 Subject: [PATCH 40/72] Update patch types (#499) * Update `Patch` types * Clarify that the splice patch applies to text * Add Splice patch type to exports * Add new patches to javascript --- javascript/src/stable.ts | 3 ++- javascript/src/unstable.ts | 3 ++- rust/automerge-wasm/index.d.ts | 10 ++++++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts index 1f38cb27..9db4d0e2 100644 --- a/javascript/src/stable.ts +++ b/javascript/src/stable.ts @@ -41,7 +41,8 @@ import type { export type { PutPatch, DelPatch, - SplicePatch, + SpliceTextPatch, + InsertPatch, IncPatch, SyncMessage, } from "@automerge/automerge-wasm" diff --git a/javascript/src/unstable.ts b/javascript/src/unstable.ts index b448d955..21b5be08 100644 --- a/javascript/src/unstable.ts +++ b/javascript/src/unstable.ts @@ -70,7 +70,8 @@ export type Conflicts = { [key: string]: AutomergeValue } export type { PutPatch, DelPatch, - SplicePatch, + SpliceTextPatch, + InsertPatch, IncPatch, SyncMessage, } from "@automerge/automerge-wasm" diff --git a/rust/automerge-wasm/index.d.ts b/rust/automerge-wasm/index.d.ts index 29586b47..be12e4c1 100644 --- a/rust/automerge-wasm/index.d.ts +++ b/rust/automerge-wasm/index.d.ts @@ -94,7 +94,7 @@ export type Op = { pred: string[], } -export type Patch = PutPatch | DelPatch | SplicePatch | IncPatch; +export type Patch = PutPatch | DelPatch | SpliceTextPatch | IncPatch | InsertPatch; export type PutPatch = { action: 'put' @@ -115,9 +115,15 @@ export type DelPatch = { length?: number, } -export type SplicePatch = { +export type SpliceTextPatch = { action: 'splice' path: Prop[], + value: string, +} + +export type InsertPatch = { + action: 'insert' + path: Prop[], values: Value[], } From 819767cc3327ed6e5724970aae39173775c9e5c1 Mon Sep 17 00:00:00 2001 From: alexjg Date: Mon, 23 Jan 2023 19:19:55 +0000 Subject: [PATCH 41/72] fix: use saturating_sub when updating cached text width (#505) Problem: In `automerge::query::Index::change_vis` we use `-=` to subtract the width of an operation which is being hidden from the text widths which we store on the index of each node in the optree. This index represents the width of all the visible text operations in this node and below. This was causing an integer underflow error when encountering some list operations. More specifically, when a `ScalarValue::Str` in a list was made invisible by a later operation which contained a _shorter_ string, the width subtracted from the indexed text widths could be longer than the current index. Solution: use `saturating_sub` instead. This is technically papering over the problem because really the width should never go below zero, but the text widths are only relevant for text objects where the existing logic works as advertised because we don't have a `set` operation for text indices. A more robust solution would be to track the type of the Index (and consequently of the `OpTree`) at the type level, but time is limited and problems are infinite. Also, add a lengthy description of the reason we are using `saturating_sub` so that when I read it in about a month I don't have to redo the painful debugging process that got me to this commit. --- rust/automerge/src/query.rs | 81 +++++++++++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/rust/automerge/src/query.rs b/rust/automerge/src/query.rs index 721756c1..640ecf8d 100644 --- a/rust/automerge/src/query.rs +++ b/rust/automerge/src/query.rs @@ -107,12 +107,65 @@ pub(crate) enum QueryResult { Finish, } +#[derive(Clone, Debug, PartialEq)] +struct TextWidth { + utf8: usize, + utf16: usize, +} + +impl TextWidth { + fn add_op(&mut self, op: &Op) { + self.utf8 += op.width(ListEncoding::Text(TextEncoding::Utf8)); + self.utf16 += op.width(ListEncoding::Text(TextEncoding::Utf16)); + } + + fn remove_op(&mut self, op: &Op) { + // Why are we using saturating_sub here? Shouldn't this always be greater than 0? + // + // In the case of objects which are _not_ `Text` we may end up subtracting more than the + // current width. This can happen if the elements in a list are `ScalarValue::str` and + // there are conflicting elements for the same index in the list. Like so: + // + // ```notrust + // [ + // "element", + // ["conflict1", "conflict2_longer"], + // "element" + // ] + // ``` + // + // Where there are two conflicted elements at index 1 + // + // in `Index::insert` and `Index::change_visibility` we add the width of the inserted op in + // utf8 and utf16 to the current width, but only if there was not a previous element for + // that index. Imagine that we encounter the "conflict1" op first, then we will add the + // length of 'conflict1' to the text widths. When 'conflict2_longer' is added we don't do + // anything because we've already seen an op for this index. Imagine that later we remove + // the `conflict2_longer` op, then we will end up subtracting the length of + // 'conflict2_longer' from the text widths, hence, `saturating_sub`. This isn't a problem + // because for non text objects we don't need the text widths to be accurate anyway. + // + // Really this is a sign that we should be tracking the type of the Index (List or Text) at + // the type level, but for now we just look the other way. + self.utf8 = self + .utf8 + .saturating_sub(op.width(ListEncoding::Text(TextEncoding::Utf8))); + self.utf16 = self + .utf16 + .saturating_sub(op.width(ListEncoding::Text(TextEncoding::Utf16))); + } + + fn merge(&mut self, other: &TextWidth) { + self.utf8 += other.utf8; + self.utf16 += other.utf16; + } +} + #[derive(Clone, Debug, PartialEq)] pub(crate) struct Index { /// The map of visible keys to the number of visible operations for that key. - pub(crate) visible: HashMap, - pub(crate) visible16: usize, - pub(crate) visible8: usize, + visible: HashMap, + visible_text: TextWidth, /// Set of opids found in this node and below. ops: HashSet, } @@ -121,8 +174,7 @@ impl Index { pub(crate) fn new() -> Self { Index { visible: Default::default(), - visible16: 0, - visible8: 0, + visible_text: TextWidth { utf8: 0, utf16: 0 }, ops: Default::default(), } } @@ -131,8 +183,8 @@ impl Index { pub(crate) fn visible_len(&self, encoding: ListEncoding) -> usize { match encoding { ListEncoding::List => self.visible.len(), - ListEncoding::Text(TextEncoding::Utf8) => self.visible8, - ListEncoding::Text(TextEncoding::Utf16) => self.visible16, + ListEncoding::Text(TextEncoding::Utf8) => self.visible_text.utf8, + ListEncoding::Text(TextEncoding::Utf16) => self.visible_text.utf16, } } @@ -159,8 +211,7 @@ impl Index { (true, false) => match self.visible.get(&key).copied() { Some(n) if n == 1 => { self.visible.remove(&key); - self.visible8 -= op.width(ListEncoding::Text(TextEncoding::Utf8)); - self.visible16 -= op.width(ListEncoding::Text(TextEncoding::Utf16)); + self.visible_text.remove_op(op); } Some(n) => { self.visible.insert(key, n - 1); @@ -172,8 +223,7 @@ impl Index { self.visible.insert(key, n + 1); } else { self.visible.insert(key, 1); - self.visible8 += op.width(ListEncoding::Text(TextEncoding::Utf8)); - self.visible16 += op.width(ListEncoding::Text(TextEncoding::Utf16)); + self.visible_text.add_op(op); } } _ => {} @@ -189,8 +239,7 @@ impl Index { self.visible.insert(key, n + 1); } else { self.visible.insert(key, 1); - self.visible8 += op.width(ListEncoding::Text(TextEncoding::Utf8)); - self.visible16 += op.width(ListEncoding::Text(TextEncoding::Utf16)); + self.visible_text.add_op(op); } } } @@ -202,8 +251,7 @@ impl Index { match self.visible.get(&key).copied() { Some(n) if n == 1 => { self.visible.remove(&key); - self.visible8 -= op.width(ListEncoding::Text(TextEncoding::Utf8)); - self.visible16 -= op.width(ListEncoding::Text(TextEncoding::Utf16)); + self.visible_text.remove_op(op); } Some(n) => { self.visible.insert(key, n - 1); @@ -223,8 +271,7 @@ impl Index { .and_modify(|len| *len += *other_len) .or_insert(*other_len); } - self.visible16 += other.visible16; - self.visible8 += other.visible8; + self.visible_text.merge(&other.visible_text); } } From 931ee7e77bd83d5c8b52c79fc2c99143171a33a5 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 25 Jan 2023 09:03:05 -0700 Subject: [PATCH 42/72] Add Fuzz Testing (#498) * Add fuzz testing for document load * Fix fuzz crashers and add to test suite --- rust/automerge/fuzz/.gitignore | 3 ++ rust/automerge/fuzz/Cargo.toml | 29 ++++++++++++++ rust/automerge/fuzz/fuzz_targets/load.rs | 37 ++++++++++++++++++ .../src/columnar/column_range/deps.rs | 6 ++- .../src/columnar/column_range/opid_list.rs | 7 +++- .../src/storage/columns/raw_column.rs | 5 ++- .../src/storage/load/change_collector.rs | 15 ++++++- ...h-da39a3ee5e6b4b0d3255bfef95601890afd80709 | Bin 0 -> 10 bytes .../fuzz-crashers/incorrect_max_op.automerge | Bin 0 -> 126 bytes .../invalid_deflate_stream.automerge | Bin 0 -> 123 bytes .../fuzz-crashers/missing_actor.automerge | Bin 0 -> 126 bytes .../overflow_in_length.automerge | Bin 0 -> 182 bytes .../fuzz-crashers/too_many_deps.automerge | Bin 0 -> 134 bytes .../fuzz-crashers/too_many_ops.automerge | Bin 0 -> 134 bytes rust/automerge/tests/test.rs | 20 +++++----- 15 files changed, 108 insertions(+), 14 deletions(-) create mode 100644 rust/automerge/fuzz/.gitignore create mode 100644 rust/automerge/fuzz/Cargo.toml create mode 100644 rust/automerge/fuzz/fuzz_targets/load.rs create mode 100644 rust/automerge/tests/fuzz-crashers/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 create mode 100644 rust/automerge/tests/fuzz-crashers/incorrect_max_op.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/invalid_deflate_stream.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/missing_actor.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/overflow_in_length.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/too_many_deps.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/too_many_ops.automerge diff --git a/rust/automerge/fuzz/.gitignore b/rust/automerge/fuzz/.gitignore new file mode 100644 index 00000000..2eb15f8e --- /dev/null +++ b/rust/automerge/fuzz/.gitignore @@ -0,0 +1,3 @@ +target +corpus +coverage diff --git a/rust/automerge/fuzz/Cargo.toml b/rust/automerge/fuzz/Cargo.toml new file mode 100644 index 00000000..3461e9f3 --- /dev/null +++ b/rust/automerge/fuzz/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "automerge-fuzz" +version = "0.0.0" +publish = false +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.4" +leb128 = "^0.2.5" +sha2 = "^0.10.0" + +[dependencies.automerge] +path = ".." + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[profile.release] +debug = 1 + +[[bin]] +name = "load" +path = "fuzz_targets/load.rs" +test = false +doc = false \ No newline at end of file diff --git a/rust/automerge/fuzz/fuzz_targets/load.rs b/rust/automerge/fuzz/fuzz_targets/load.rs new file mode 100644 index 00000000..0dea2624 --- /dev/null +++ b/rust/automerge/fuzz/fuzz_targets/load.rs @@ -0,0 +1,37 @@ +#![no_main] + +use sha2::{Sha256, Digest}; +use automerge::{Automerge}; +use libfuzzer_sys::arbitrary::{Arbitrary, Result, Unstructured}; +use libfuzzer_sys::fuzz_target; + +#[derive(Debug)] +struct DocumentChunk { + bytes: Vec, +} + +fn add_header(typ: u8, data: &[u8]) -> Vec { + let mut input = vec![u8::from(typ)]; + leb128::write::unsigned(&mut input, data.len() as u64).unwrap(); + input.extend(data.as_ref()); + let hash_result = Sha256::digest(input.clone()); + let array: [u8; 32] = hash_result.into(); + + let mut out = vec![133, 111, 74, 131, array[0], array[1], array[2], array[3]]; + out.extend(input); + out +} + +impl<'a> Arbitrary<'a> for DocumentChunk +{ + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + let input = u.bytes(u.len())?; + let contents = add_header(0, input); + + return Ok(DocumentChunk{bytes: contents}) + } +} + +fuzz_target!(|doc: DocumentChunk| { + Automerge::load(&doc.bytes); +}); diff --git a/rust/automerge/src/columnar/column_range/deps.rs b/rust/automerge/src/columnar/column_range/deps.rs index df49192a..1956acd1 100644 --- a/rust/automerge/src/columnar/column_range/deps.rs +++ b/rust/automerge/src/columnar/column_range/deps.rs @@ -62,7 +62,11 @@ impl<'a> DepsIter<'a> { } None => return Ok(None), }; - let mut result = Vec::with_capacity(num); + // We cannot trust `num` because it is provided over the network, + // but in the common case it will be correct and small (so we + // use with_capacity to make sure the vector is precisely the right + // size). + let mut result = Vec::with_capacity(std::cmp::min(num, 100)); while result.len() < num { match self .deps diff --git a/rust/automerge/src/columnar/column_range/opid_list.rs b/rust/automerge/src/columnar/column_range/opid_list.rs index 12279c08..6a9c8a38 100644 --- a/rust/automerge/src/columnar/column_range/opid_list.rs +++ b/rust/automerge/src/columnar/column_range/opid_list.rs @@ -189,7 +189,12 @@ impl<'a> OpIdListIter<'a> { Some(None) => return Err(DecodeColumnError::unexpected_null("num")), None => return Ok(None), }; - let mut p = Vec::with_capacity(num as usize); + + // We cannot trust `num` because it is provided over the network, + // but in the common case it will be correct and small (so we + // use with_capacity to make sure the vector is precisely the right + // size). + let mut p = Vec::with_capacity(std::cmp::min(num, 100) as usize); for _ in 0..num { let actor = self .actor diff --git a/rust/automerge/src/storage/columns/raw_column.rs b/rust/automerge/src/storage/columns/raw_column.rs index 808b53cf..ac9a5759 100644 --- a/rust/automerge/src/storage/columns/raw_column.rs +++ b/rust/automerge/src/storage/columns/raw_column.rs @@ -219,7 +219,10 @@ impl RawColumns { let columns: Vec> = specs_and_lens .into_iter() .scan(0_usize, |offset, (spec, len)| { - let end = *offset + len as usize; + // Note: we use a saturating add here as len was passed over the network + // and so could be anything. If the addition does every saturate we would + // expect parsing to fail later (but at least it won't panic!). + let end = offset.saturating_add(len as usize); let data = *offset..end; *offset = end; Some(RawColumn { diff --git a/rust/automerge/src/storage/load/change_collector.rs b/rust/automerge/src/storage/load/change_collector.rs index 75ef98f1..d05367a9 100644 --- a/rust/automerge/src/storage/load/change_collector.rs +++ b/rust/automerge/src/storage/load/change_collector.rs @@ -26,6 +26,8 @@ pub(crate) enum Error { MissingChange, #[error("unable to read change metadata: {0}")] ReadChange(Box), + #[error("incorrect max op")] + IncorrectMaxOp, #[error("missing ops")] MissingOps, } @@ -180,7 +182,18 @@ impl<'a> PartialChange<'a> { .ops .iter() .map(|(obj, op)| op_as_actor_id(obj, op, metadata)); - let actor = metadata.actors.get(self.actor).clone(); + let actor = metadata + .actors + .safe_get(self.actor) + .ok_or_else(|| { + tracing::error!(actor_index = self.actor, "actor out of bounds"); + Error::MissingActor + })? + .clone(); + + if num_ops > self.max_op { + return Err(Error::IncorrectMaxOp); + } let change = match StoredChange::builder() .with_dependencies(deps) diff --git a/rust/automerge/tests/fuzz-crashers/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 b/rust/automerge/tests/fuzz-crashers/crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 new file mode 100644 index 0000000000000000000000000000000000000000..bcb12cddc6980d44c13dd0351899abe297817f70 GIT binary patch literal 10 RcmZq8_iCQDXxb$P1^^m_1Y!UH literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/incorrect_max_op.automerge b/rust/automerge/tests/fuzz-crashers/incorrect_max_op.automerge new file mode 100644 index 0000000000000000000000000000000000000000..05cc2c82681529ae087bc4ab88c3ebc7ffbf73a7 GIT binary patch literal 126 zcmZq8_iFy6Eq;Zegi(Mga9P2Di~d0kS!`#NOG_3rZg0ucpBfVWKQ9lyTY8rUT zb)+h5Oppy)Q?ugCCKWbDCT1pKCS@iErZ6TBQ8q;&(}d9p$O&g@U}UOisAmLX5M-}s S$!CP{K$KfPLqTyp0|Nj9lO`qr literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/invalid_deflate_stream.automerge b/rust/automerge/tests/fuzz-crashers/invalid_deflate_stream.automerge new file mode 100644 index 0000000000000000000000000000000000000000..21e869eb4bafd66b9f2a3bb7f856fd2b312c61fa GIT binary patch literal 123 zcmZq8_i8o(0)|3H0T7K07?C;H*ldhU%uEVQ226%P$f3ZZ2x2lCGFdW(GdD0Y)icyH Z0wDtsvez@ERe|^*0kik}_trBo004Sr7)}5H literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/missing_actor.automerge b/rust/automerge/tests/fuzz-crashers/missing_actor.automerge new file mode 100644 index 0000000000000000000000000000000000000000..cc8c61b14d4873ab1a117ad4d1b6eb39d9037e25 GIT binary patch literal 126 zcmZq8_iAP@etLtUgi+xAhCLyj-A82@#BJP1t8G;SXSckWBrGZ zVG=09K&AgdKpjk?5Kt>X6i6IIbASji09nky!obAH9t+h3vXupFBO?P)mWhcymXQGf DM*mhW literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/too_many_deps.automerge b/rust/automerge/tests/fuzz-crashers/too_many_deps.automerge new file mode 100644 index 0000000000000000000000000000000000000000..657ce9930f000a3b8d4585e3889220b3b48e1db0 GIT binary patch literal 134 zcmZq8_iCP-9<9Jo!zl26!=8}NZl_EAt>0%B6p6fGoFREZtGfJf_fnlJA6~mF{yla} zlIf?86Z5p}SdGt-$7i!KGBGm=GbuAUaD_2(h_WdHnI?=*OkqsnEDelI^$hilK*&)4 b3JMtN+3Q*I848L)GK{+!>)rD6K^z7E|5`CV literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/too_many_ops.automerge b/rust/automerge/tests/fuzz-crashers/too_many_ops.automerge new file mode 100644 index 0000000000000000000000000000000000000000..661258b0933e854bde60d741b6a47c731029de3b GIT binary patch literal 134 zcmZq8_i7G3?{Jo(hEd@ChCLyjvz;#Ww|<{lP$cq#afajtt?Kf_-Ai?@e0c4y`1jZ? zNv5AVPR!G?V>LcU9-qy|$i&Pf%%sfZz!b*BA Result<(), AutomergeError> { } #[test] -fn invalid_deflate_stream() { - let bytes: [u8; 123] = [ - 133, 111, 74, 131, 48, 48, 48, 48, 0, 113, 1, 16, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 1, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 6, 1, 2, 3, 2, 32, 2, 48, - 2, 49, 2, 49, 2, 8, 32, 4, 33, 2, 48, 2, 49, 1, 49, 2, 57, 2, 87, 3, 128, 1, 2, 127, 0, - 127, 1, 127, 1, 127, 0, 127, 0, 127, 7, 127, 2, 102, 122, 127, 0, 127, 1, 1, 127, 1, 127, - 54, 239, 191, 189, 127, 0, 0, - ]; +fn fuzz_crashers() { + let paths = fs::read_dir("./tests/fuzz-crashers").unwrap(); - assert!(Automerge::load(&bytes).is_err()); + for path in paths { + // uncomment this line to figure out which fixture is crashing: + // println!("{:?}", path.as_ref().unwrap().path().display()); + let bytes = fs::read(path.as_ref().unwrap().path()); + let res = Automerge::load(&bytes.unwrap()); + assert!(res.is_err()); + } } #[test] From f428fe0169434782254b9f4320e9b4e7269c7bdb Mon Sep 17 00:00:00 2001 From: alexjg Date: Fri, 27 Jan 2023 17:23:13 +0000 Subject: [PATCH 43/72] Improve typescript types (#508) --- javascript/.eslintrc.cjs | 9 + javascript/src/conflicts.ts | 100 ++++++++ javascript/src/counter.ts | 2 +- javascript/src/low_level.ts | 1 + javascript/src/proxies.ts | 268 ++++++++++++++------- javascript/src/stable.ts | 102 +++----- javascript/src/text.ts | 10 +- javascript/src/types.ts | 3 +- javascript/src/unstable.ts | 45 ++-- javascript/src/unstable_types.ts | 30 +++ javascript/test/basic_test.ts | 1 - javascript/test/legacy_tests.ts | 7 +- javascript/test/stable_unstable_interop.ts | 58 +++++ 13 files changed, 450 insertions(+), 186 deletions(-) create mode 100644 javascript/src/conflicts.ts create mode 100644 javascript/src/unstable_types.ts diff --git a/javascript/.eslintrc.cjs b/javascript/.eslintrc.cjs index 5d11eb94..88776271 100644 --- a/javascript/.eslintrc.cjs +++ b/javascript/.eslintrc.cjs @@ -3,4 +3,13 @@ module.exports = { parser: "@typescript-eslint/parser", plugins: ["@typescript-eslint"], extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended"], + rules: { + "@typescript-eslint/no-unused-vars": [ + "error", + { + argsIgnorePattern: "^_", + varsIgnorePattern: "^_", + }, + ], + }, } diff --git a/javascript/src/conflicts.ts b/javascript/src/conflicts.ts new file mode 100644 index 00000000..52af23e1 --- /dev/null +++ b/javascript/src/conflicts.ts @@ -0,0 +1,100 @@ +import { Counter, type AutomergeValue } from "./types" +import { Text } from "./text" +import { type AutomergeValue as UnstableAutomergeValue } from "./unstable_types" +import { type Target, Text1Target, Text2Target } from "./proxies" +import { mapProxy, listProxy, ValueType } from "./proxies" +import type { Prop, ObjID } from "@automerge/automerge-wasm" +import { Automerge } from "@automerge/automerge-wasm" + +export type ConflictsF = { [key: string]: ValueType } +export type Conflicts = ConflictsF +export type UnstableConflicts = ConflictsF + +export function stableConflictAt( + context: Automerge, + objectId: ObjID, + prop: Prop +): Conflicts | undefined { + return conflictAt( + context, + objectId, + prop, + true, + (context: Automerge, conflictId: ObjID): AutomergeValue => { + return new Text(context.text(conflictId)) + } + ) +} + +export function unstableConflictAt( + context: Automerge, + objectId: ObjID, + prop: Prop +): UnstableConflicts | undefined { + return conflictAt( + context, + objectId, + prop, + true, + (context: Automerge, conflictId: ObjID): UnstableAutomergeValue => { + return context.text(conflictId) + } + ) +} + +function conflictAt( + context: Automerge, + objectId: ObjID, + prop: Prop, + textV2: boolean, + handleText: (a: Automerge, conflictId: ObjID) => ValueType +): ConflictsF | undefined { + const values = context.getAll(objectId, prop) + if (values.length <= 1) { + return + } + const result: ConflictsF = {} + for (const fullVal of values) { + switch (fullVal[0]) { + case "map": + result[fullVal[1]] = mapProxy( + context, + fullVal[1], + textV2, + [prop], + true + ) + break + case "list": + result[fullVal[1]] = listProxy( + context, + fullVal[1], + textV2, + [prop], + true + ) + break + case "text": + result[fullVal[1]] = handleText(context, fullVal[1] as ObjID) + break + case "str": + case "uint": + case "int": + case "f64": + case "boolean": + case "bytes": + case "null": + result[fullVal[2]] = fullVal[1] as ValueType + break + case "counter": + result[fullVal[2]] = new Counter(fullVal[1]) as ValueType + break + case "timestamp": + result[fullVal[2]] = new Date(fullVal[1]) as ValueType + break + default: + throw RangeError(`datatype ${fullVal[0]} unimplemented`) + } + } + return result +} diff --git a/javascript/src/counter.ts b/javascript/src/counter.ts index 873fa157..88adb840 100644 --- a/javascript/src/counter.ts +++ b/javascript/src/counter.ts @@ -100,7 +100,7 @@ export function getWriteableCounter( path: Prop[], objectId: ObjID, key: Prop -) { +): WriteableCounter { return new WriteableCounter(value, context, path, objectId, key) } diff --git a/javascript/src/low_level.ts b/javascript/src/low_level.ts index 63ef5546..f44f3a32 100644 --- a/javascript/src/low_level.ts +++ b/javascript/src/low_level.ts @@ -14,6 +14,7 @@ export type { ChangeToEncode } from "@automerge/automerge-wasm" export function UseApi(api: API) { for (const k in api) { + // eslint-disable-next-line @typescript-eslint/no-extra-semi,@typescript-eslint/no-explicit-any ;(ApiHandler as any)[k] = (api as any)[k] } } diff --git a/javascript/src/proxies.ts b/javascript/src/proxies.ts index 7a99cf80..54a8dd71 100644 --- a/javascript/src/proxies.ts +++ b/javascript/src/proxies.ts @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { Text } from "./text" import { Automerge, @@ -6,13 +7,12 @@ import { type Prop, } from "@automerge/automerge-wasm" -import type { - AutomergeValue, - ScalarValue, - MapValue, - ListValue, - TextValue, -} from "./types" +import type { AutomergeValue, ScalarValue, MapValue, ListValue } from "./types" +import { + type AutomergeValue as UnstableAutomergeValue, + MapValue as UnstableMapValue, + ListValue as UnstableListValue, +} from "./unstable_types" import { Counter, getWriteableCounter } from "./counter" import { STATE, @@ -26,19 +26,38 @@ import { } from "./constants" import { RawString } from "./raw_string" -type Target = { +type TargetCommon = { context: Automerge objectId: ObjID path: Array readonly: boolean heads?: Array - cache: {} + cache: object trace?: any frozen: boolean - textV2: boolean } -function parseListIndex(key) { +export type Text2Target = TargetCommon & { textV2: true } +export type Text1Target = TargetCommon & { textV2: false } +export type Target = Text1Target | Text2Target + +export type ValueType = T extends Text2Target + ? UnstableAutomergeValue + : T extends Text1Target + ? AutomergeValue + : never +type MapValueType = T extends Text2Target + ? UnstableMapValue + : T extends Text1Target + ? MapValue + : never +type ListValueType = T extends Text2Target + ? UnstableListValue + : T extends Text1Target + ? ListValue + : never + +function parseListIndex(key: any) { if (typeof key === "string" && /^[0-9]+$/.test(key)) key = parseInt(key, 10) if (typeof key !== "number") { return key @@ -49,7 +68,10 @@ function parseListIndex(key) { return key } -function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { +function valueAt( + target: T, + prop: Prop +): ValueType | undefined { const { context, objectId, path, readonly, heads, textV2 } = target const value = context.getWithType(objectId, prop, heads) if (value === null) { @@ -61,7 +83,7 @@ function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { case undefined: return case "map": - return mapProxy( + return mapProxy( context, val as ObjID, textV2, @@ -70,7 +92,7 @@ function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { heads ) case "list": - return listProxy( + return listProxy( context, val as ObjID, textV2, @@ -80,7 +102,7 @@ function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { ) case "text": if (textV2) { - return context.text(val as ObjID, heads) + return context.text(val as ObjID, heads) as ValueType } else { return textProxy( context, @@ -88,29 +110,36 @@ function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { [...path, prop], readonly, heads - ) + ) as unknown as ValueType } case "str": - return val + return val as ValueType case "uint": - return val + return val as ValueType case "int": - return val + return val as ValueType case "f64": - return val + return val as ValueType case "boolean": - return val + return val as ValueType case "null": - return null + return null as ValueType case "bytes": - return val + return val as ValueType case "timestamp": - return val + return val as ValueType case "counter": { if (readonly) { - return new Counter(val as number) + return new Counter(val as number) as ValueType } else { - return getWriteableCounter(val as number, context, path, objectId, prop) + const counter: Counter = getWriteableCounter( + val as number, + context, + path, + objectId, + prop + ) + return counter as ValueType } } default: @@ -118,7 +147,21 @@ function valueAt(target: Target, prop: Prop): AutomergeValue | undefined { } } -function import_value(value: any, textV2: boolean) { +type ImportedValue = + | [null, "null"] + | [number, "uint"] + | [number, "int"] + | [number, "f64"] + | [number, "counter"] + | [number, "timestamp"] + | [string, "str"] + | [Text | string, "text"] + | [Uint8Array, "bytes"] + | [Array, "list"] + | [Record, "map"] + | [boolean, "boolean"] + +function import_value(value: any, textV2: boolean): ImportedValue { switch (typeof value) { case "object": if (value == null) { @@ -170,7 +213,10 @@ function import_value(value: any, textV2: boolean) { } const MapHandler = { - get(target: Target, key): AutomergeValue | { handle: Automerge } { + get( + target: T, + key: any + ): ValueType | ObjID | boolean | { handle: Automerge } { const { context, objectId, cache } = target if (key === Symbol.toStringTag) { return target[Symbol.toStringTag] @@ -185,7 +231,7 @@ const MapHandler = { return cache[key] }, - set(target: Target, key, val) { + set(target: Target, key: any, val: any) { const { context, objectId, path, readonly, frozen, textV2 } = target target.cache = {} // reset cache on set if (val && val[OBJECT_ID]) { @@ -221,8 +267,10 @@ const MapHandler = { } case "text": { if (textV2) { + assertString(value) context.putObject(objectId, key, value) } else { + assertText(value) const text = context.putObject(objectId, key, "") const proxyText = textProxy(context, text, [...path, key], readonly) for (let i = 0; i < value.length; i++) { @@ -251,7 +299,7 @@ const MapHandler = { return true }, - deleteProperty(target: Target, key) { + deleteProperty(target: Target, key: any) { const { context, objectId, readonly } = target target.cache = {} // reset cache on delete if (readonly) { @@ -261,12 +309,12 @@ const MapHandler = { return true }, - has(target: Target, key) { + has(target: Target, key: any) { const value = this.get(target, key) return value !== undefined }, - getOwnPropertyDescriptor(target: Target, key) { + getOwnPropertyDescriptor(target: Target, key: any) { // const { context, objectId } = target const value = this.get(target, key) if (typeof value !== "undefined") { @@ -287,11 +335,20 @@ const MapHandler = { } const ListHandler = { - get(target: Target, index) { + get( + target: T, + index: any + ): + | ValueType + | boolean + | ObjID + | { handle: Automerge } + | number + | ((_: any) => boolean) { const { context, objectId, heads } = target index = parseListIndex(index) if (index === Symbol.hasInstance) { - return instance => { + return (instance: any) => { return Array.isArray(instance) } } @@ -304,13 +361,13 @@ const ListHandler = { if (index === STATE) return { handle: context } if (index === "length") return context.length(objectId, heads) if (typeof index === "number") { - return valueAt(target, index) + return valueAt(target, index) as ValueType } else { return listMethods(target)[index] } }, - set(target: Target, index, val) { + set(target: Target, index: any, val: any) { const { context, objectId, path, readonly, frozen, textV2 } = target index = parseListIndex(index) if (val && val[OBJECT_ID]) { @@ -334,7 +391,7 @@ const ListHandler = { } switch (datatype) { case "list": { - let list + let list: ObjID if (index >= context.length(objectId)) { list = context.insertObject(objectId, index, []) } else { @@ -352,13 +409,15 @@ const ListHandler = { } case "text": { if (textV2) { + assertString(value) if (index >= context.length(objectId)) { context.insertObject(objectId, index, value) } else { context.putObject(objectId, index, value) } } else { - let text + let text: ObjID + assertText(value) if (index >= context.length(objectId)) { text = context.insertObject(objectId, index, "") } else { @@ -370,7 +429,7 @@ const ListHandler = { break } case "map": { - let map + let map: ObjID if (index >= context.length(objectId)) { map = context.insertObject(objectId, index, {}) } else { @@ -398,7 +457,7 @@ const ListHandler = { return true }, - deleteProperty(target: Target, index) { + deleteProperty(target: Target, index: any) { const { context, objectId } = target index = parseListIndex(index) const elem = context.get(objectId, index) @@ -411,7 +470,7 @@ const ListHandler = { return true }, - has(target: Target, index) { + has(target: Target, index: any) { const { context, objectId, heads } = target index = parseListIndex(index) if (typeof index === "number") { @@ -420,7 +479,7 @@ const ListHandler = { return index === "length" }, - getOwnPropertyDescriptor(target: Target, index) { + getOwnPropertyDescriptor(target: Target, index: any) { const { context, objectId, heads } = target if (index === "length") @@ -434,7 +493,7 @@ const ListHandler = { return { configurable: true, enumerable: true, value } }, - getPrototypeOf(target) { + getPrototypeOf(target: Target) { return Object.getPrototypeOf(target) }, ownKeys(/*target*/): string[] { @@ -476,14 +535,14 @@ const TextHandler = Object.assign({}, ListHandler, { }, }) -export function mapProxy( +export function mapProxy( context: Automerge, objectId: ObjID, textV2: boolean, path?: Prop[], readonly?: boolean, heads?: Heads -): MapValue { +): MapValueType { const target: Target = { context, objectId, @@ -496,19 +555,19 @@ export function mapProxy( } const proxied = {} Object.assign(proxied, target) - let result = new Proxy(proxied, MapHandler) + const result = new Proxy(proxied, MapHandler) // conversion through unknown is necessary because the types are so different - return result as unknown as MapValue + return result as unknown as MapValueType } -export function listProxy( +export function listProxy( context: Automerge, objectId: ObjID, textV2: boolean, path?: Prop[], readonly?: boolean, heads?: Heads -): ListValue { +): ListValueType { const target: Target = { context, objectId, @@ -521,17 +580,22 @@ export function listProxy( } const proxied = [] Object.assign(proxied, target) + // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore return new Proxy(proxied, ListHandler) as unknown as ListValue } +interface TextProxy extends Text { + splice: (index: any, del: any, ...vals: any[]) => void +} + export function textProxy( context: Automerge, objectId: ObjID, path?: Prop[], readonly?: boolean, heads?: Heads -): TextValue { +): TextProxy { const target: Target = { context, objectId, @@ -542,7 +606,9 @@ export function textProxy( cache: {}, textV2: false, } - return new Proxy(target, TextHandler) as unknown as TextValue + const proxied = {} + Object.assign(proxied, target) + return new Proxy(proxied, TextHandler) as unknown as TextProxy } export function rootProxy( @@ -554,10 +620,10 @@ export function rootProxy( return mapProxy(context, "_root", textV2, [], !!readonly) } -function listMethods(target: Target) { +function listMethods(target: T) { const { context, objectId, path, readonly, frozen, heads, textV2 } = target const methods = { - deleteAt(index, numDelete) { + deleteAt(index: number, numDelete: number) { if (typeof numDelete === "number") { context.splice(objectId, index, numDelete) } else { @@ -572,8 +638,20 @@ function listMethods(target: Target) { start = parseListIndex(start || 0) end = parseListIndex(end || length) for (let i = start; i < Math.min(end, length); i++) { - if (datatype === "text" || datatype === "list" || datatype === "map") { + if (datatype === "list" || datatype === "map") { context.putObject(objectId, i, value) + } else if (datatype === "text") { + if (textV2) { + assertString(value) + context.putObject(objectId, i, value) + } else { + assertText(value) + const text = context.putObject(objectId, i, "") + const proxyText = textProxy(context, text, [...path, i], readonly) + for (let i = 0; i < value.length; i++) { + proxyText[i] = value.get(i) + } + } } else { context.put(objectId, i, value, datatype) } @@ -581,7 +659,7 @@ function listMethods(target: Target) { return this }, - indexOf(o, start = 0) { + indexOf(o: any, start = 0) { const length = context.length(objectId) for (let i = start; i < length; i++) { const value = context.getWithType(objectId, i, heads) @@ -592,7 +670,7 @@ function listMethods(target: Target) { return -1 }, - insertAt(index, ...values) { + insertAt(index: number, ...values: any[]) { this.splice(index, 0, ...values) return this }, @@ -607,7 +685,7 @@ function listMethods(target: Target) { return last }, - push(...values) { + push(...values: any[]) { const len = context.length(objectId) this.splice(len, 0, ...values) return context.length(objectId) @@ -620,7 +698,7 @@ function listMethods(target: Target) { return first }, - splice(index, del, ...vals) { + splice(index: any, del: any, ...vals: any[]) { index = parseListIndex(index) del = parseListIndex(del) for (const val of vals) { @@ -638,9 +716,9 @@ function listMethods(target: Target) { "Sequence object cannot be modified outside of a change block" ) } - const result: AutomergeValue[] = [] + const result: ValueType[] = [] for (let i = 0; i < del; i++) { - const value = valueAt(target, index) + const value = valueAt(target, index) if (value !== undefined) { result.push(value) } @@ -663,6 +741,7 @@ function listMethods(target: Target) { } case "text": { if (textV2) { + assertString(value) context.insertObject(objectId, index, value) } else { const text = context.insertObject(objectId, index, "") @@ -698,7 +777,7 @@ function listMethods(target: Target) { return result }, - unshift(...values) { + unshift(...values: any) { this.splice(0, 0, ...values) return context.length(objectId) }, @@ -749,11 +828,11 @@ function listMethods(target: Target) { return iterator }, - toArray(): AutomergeValue[] { - const list: AutomergeValue = [] - let value + toArray(): ValueType[] { + const list: Array> = [] + let value: ValueType | undefined do { - value = valueAt(target, list.length) + value = valueAt(target, list.length) if (value !== undefined) { list.push(value) } @@ -762,7 +841,7 @@ function listMethods(target: Target) { return list }, - map(f: (AutomergeValue, number) => T): T[] { + map(f: (_a: ValueType, _n: number) => U): U[] { return this.toArray().map(f) }, @@ -774,24 +853,26 @@ function listMethods(target: Target) { return this.toArray().toLocaleString() }, - forEach(f: (AutomergeValue, number) => undefined) { + forEach(f: (_a: ValueType, _n: number) => undefined) { return this.toArray().forEach(f) }, // todo: real concat function is different - concat(other: AutomergeValue[]): AutomergeValue[] { + concat(other: ValueType[]): ValueType[] { return this.toArray().concat(other) }, - every(f: (AutomergeValue, number) => boolean): boolean { + every(f: (_a: ValueType, _n: number) => boolean): boolean { return this.toArray().every(f) }, - filter(f: (AutomergeValue, number) => boolean): AutomergeValue[] { + filter(f: (_a: ValueType, _n: number) => boolean): ValueType[] { return this.toArray().filter(f) }, - find(f: (AutomergeValue, number) => boolean): AutomergeValue | undefined { + find( + f: (_a: ValueType, _n: number) => boolean + ): ValueType | undefined { let index = 0 for (const v of this) { if (f(v, index)) { @@ -801,7 +882,7 @@ function listMethods(target: Target) { } }, - findIndex(f: (AutomergeValue, number) => boolean): number { + findIndex(f: (_a: ValueType, _n: number) => boolean): number { let index = 0 for (const v of this) { if (f(v, index)) { @@ -812,7 +893,7 @@ function listMethods(target: Target) { return -1 }, - includes(elem: AutomergeValue): boolean { + includes(elem: ValueType): boolean { return this.find(e => e === elem) !== undefined }, @@ -820,29 +901,30 @@ function listMethods(target: Target) { return this.toArray().join(sep) }, - // todo: remove the any - reduce(f: (any, AutomergeValue) => T, initalValue?: T): T | undefined { - return this.toArray().reduce(f, initalValue) + reduce( + f: (acc: U, currentValue: ValueType) => U, + initialValue: U + ): U | undefined { + return this.toArray().reduce(f, initialValue) }, - // todo: remove the any - reduceRight( - f: (any, AutomergeValue) => T, - initalValue?: T - ): T | undefined { - return this.toArray().reduceRight(f, initalValue) + reduceRight( + f: (acc: U, item: ValueType) => U, + initialValue: U + ): U | undefined { + return this.toArray().reduceRight(f, initialValue) }, - lastIndexOf(search: AutomergeValue, fromIndex = +Infinity): number { + lastIndexOf(search: ValueType, fromIndex = +Infinity): number { // this can be faster return this.toArray().lastIndexOf(search, fromIndex) }, - slice(index?: number, num?: number): AutomergeValue[] { + slice(index?: number, num?: number): ValueType[] { return this.toArray().slice(index, num) }, - some(f: (AutomergeValue, number) => boolean): boolean { + some(f: (v: ValueType, i: number) => boolean): boolean { let index = 0 for (const v of this) { if (f(v, index)) { @@ -869,7 +951,7 @@ function listMethods(target: Target) { function textMethods(target: Target) { const { context, objectId, heads } = target const methods = { - set(index: number, value) { + set(index: number, value: any) { return (this[index] = value) }, get(index: number): AutomergeValue { @@ -902,10 +984,22 @@ function textMethods(target: Target) { toJSON(): string { return this.toString() }, - indexOf(o, start = 0) { + indexOf(o: any, start = 0) { const text = context.text(objectId) return text.indexOf(o, start) }, } return methods } + +function assertText(value: Text | string): asserts value is Text { + if (!(value instanceof Text)) { + throw new Error("value was not a Text instance") + } +} + +function assertString(value: Text | string): asserts value is string { + if (typeof value !== "string") { + throw new Error("value was not a string") + } +} diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts index 9db4d0e2..3b328240 100644 --- a/javascript/src/stable.ts +++ b/javascript/src/stable.ts @@ -1,7 +1,7 @@ /** @hidden **/ export { /** @hidden */ uuid } from "./uuid" -import { rootProxy, listProxy, mapProxy, textProxy } from "./proxies" +import { rootProxy } from "./proxies" import { STATE } from "./constants" import { @@ -20,10 +20,10 @@ export { type Patch, type PatchCallback, type ScalarValue, - Text, } from "./types" import { Text } from "./text" +export { Text } from "./text" import type { API, @@ -54,6 +54,8 @@ import { RawString } from "./raw_string" import { _state, _is_proxy, _trace, _obj } from "./internal_state" +import { stableConflictAt } from "./conflicts" + /** Options passed to {@link change}, and {@link emptyChange} * @typeParam T - The type of value contained in the document */ @@ -71,13 +73,36 @@ export type ChangeOptions = { */ export type ApplyOptions = { patchCallback?: PatchCallback } +/** + * A List is an extended Array that adds the two helper methods `deleteAt` and `insertAt`. + */ +export interface List extends Array { + insertAt(index: number, ...args: T[]): List + deleteAt(index: number, numDelete?: number): List +} + +/** + * To extend an arbitrary type, we have to turn any arrays that are part of the type's definition into Lists. + * So we recurse through the properties of T, turning any Arrays we find into Lists. + */ +export type Extend = + // is it an array? make it a list (we recursively extend the type of the array's elements as well) + T extends Array + ? List> + : // is it an object? recursively extend all of its properties + // eslint-disable-next-line @typescript-eslint/ban-types + T extends Object + ? { [P in keyof T]: Extend } + : // otherwise leave the type alone + T + /** * Function which is called by {@link change} when making changes to a `Doc` * @typeParam T - The type of value contained in the document * * This function may mutate `doc` */ -export type ChangeFn = (doc: T) => void +export type ChangeFn = (doc: Extend) => void /** @hidden **/ export interface State { @@ -136,11 +161,12 @@ export function init(_opts?: ActorId | InitOptions): Doc { const handle = ApiHandler.create(opts.enableTextV2 || false, opts.actor) handle.enablePatches(true) handle.enableFreeze(!!opts.freeze) - handle.registerDatatype("counter", (n: any) => new Counter(n)) - let textV2 = opts.enableTextV2 || false + handle.registerDatatype("counter", (n: number) => new Counter(n)) + const textV2 = opts.enableTextV2 || false if (textV2) { handle.registerDatatype("str", (n: string) => new RawString(n)) } else { + // eslint-disable-next-line @typescript-eslint/no-explicit-any handle.registerDatatype("text", (n: any) => new Text(n)) } const doc = handle.materialize("/", undefined, { @@ -204,7 +230,7 @@ export function clone( // `change` uses the presence of state.heads to determine if we are in a view // set it to undefined to indicate that this is a full fat document - const { heads: oldHeads, ...stateSansHeads } = state + const { heads: _oldHeads, ...stateSansHeads } = state return handle.applyPatches(doc, { ...stateSansHeads, handle }) } @@ -343,7 +369,7 @@ function _change( try { state.heads = heads const root: T = rootProxy(state.handle, state.textV2) - callback(root) + callback(root as Extend) if (state.handle.pendingOps() === 0) { state.heads = undefined return doc @@ -541,62 +567,6 @@ export function getActorId(doc: Doc): ActorId { */ type Conflicts = { [key: string]: AutomergeValue } -function conflictAt( - context: Automerge, - objectId: ObjID, - prop: Prop, - textV2: boolean -): Conflicts | undefined { - const values = context.getAll(objectId, prop) - if (values.length <= 1) { - return - } - const result: Conflicts = {} - for (const fullVal of values) { - switch (fullVal[0]) { - case "map": - result[fullVal[1]] = mapProxy(context, fullVal[1], textV2, [prop], true) - break - case "list": - result[fullVal[1]] = listProxy( - context, - fullVal[1], - textV2, - [prop], - true - ) - break - case "text": - if (textV2) { - result[fullVal[1]] = context.text(fullVal[1]) - } else { - result[fullVal[1]] = textProxy(context, objectId, [prop], true) - } - break - //case "table": - //case "cursor": - case "str": - case "uint": - case "int": - case "f64": - case "boolean": - case "bytes": - case "null": - result[fullVal[2]] = fullVal[1] - break - case "counter": - result[fullVal[2]] = new Counter(fullVal[1]) - break - case "timestamp": - result[fullVal[2]] = new Date(fullVal[1]) - break - default: - throw RangeError(`datatype ${fullVal[0]} unimplemented`) - } - } - return result -} - /** * Get the conflicts associated with a property * @@ -646,9 +616,12 @@ export function getConflicts( prop: Prop ): Conflicts | undefined { const state = _state(doc, false) + if (state.textV2) { + throw new Error("use unstable.getConflicts for an unstable document") + } const objectId = _obj(doc) if (objectId != null) { - return conflictAt(state.handle, objectId, prop, state.textV2) + return stableConflictAt(state.handle, objectId, prop) } else { return undefined } @@ -672,6 +645,7 @@ export function getLastLocalChange(doc: Doc): Change | undefined { * This is useful to determine if something is actually an automerge document, * if `doc` is not an automerge document this will return null. */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any export function getObjectId(doc: any, prop?: Prop): ObjID | null { if (prop) { const state = _state(doc, false) diff --git a/javascript/src/text.ts b/javascript/src/text.ts index f87af891..b01bd7db 100644 --- a/javascript/src/text.ts +++ b/javascript/src/text.ts @@ -3,9 +3,12 @@ import { TEXT, STATE } from "./constants" import type { InternalState } from "./internal_state" export class Text { + //eslint-disable-next-line @typescript-eslint/no-explicit-any elems: Array str: string | undefined + //eslint-disable-next-line @typescript-eslint/no-explicit-any spans: Array | undefined; + //eslint-disable-next-line @typescript-eslint/no-explicit-any [STATE]?: InternalState constructor(text?: string | string[] | Value[]) { @@ -25,6 +28,7 @@ export class Text { return this.elems.length } + //eslint-disable-next-line @typescript-eslint/no-explicit-any get(index: number): any { return this.elems[index] } @@ -73,7 +77,7 @@ export class Text { * For example, the value `['a', 'b', {x: 3}, 'c', 'd']` has spans: * `=> ['ab', {x: 3}, 'cd']` */ - toSpans(): Array { + toSpans(): Array { if (!this.spans) { this.spans = [] let chars = "" @@ -118,7 +122,7 @@ export class Text { /** * Inserts new list items `values` starting at position `index`. */ - insertAt(index: number, ...values: Array) { + insertAt(index: number, ...values: Array) { if (this[STATE]) { throw new RangeError( "object cannot be modified outside of a change block" @@ -140,7 +144,7 @@ export class Text { this.elems.splice(index, numDelete) } - map(callback: (e: Value | Object) => T) { + map(callback: (e: Value | object) => T) { this.elems.map(callback) } diff --git a/javascript/src/types.ts b/javascript/src/types.ts index e3cb81f8..beb5cf70 100644 --- a/javascript/src/types.ts +++ b/javascript/src/types.ts @@ -1,4 +1,5 @@ export { Text } from "./text" +import { Text } from "./text" export { Counter } from "./counter" export { Int, Uint, Float64 } from "./numbers" @@ -10,9 +11,9 @@ export type AutomergeValue = | ScalarValue | { [key: string]: AutomergeValue } | Array + | Text export type MapValue = { [key: string]: AutomergeValue } export type ListValue = Array -export type TextValue = Array export type ScalarValue = | string | number diff --git a/javascript/src/unstable.ts b/javascript/src/unstable.ts index 21b5be08..7c73afb9 100644 --- a/javascript/src/unstable.ts +++ b/javascript/src/unstable.ts @@ -22,9 +22,9 @@ * This leads to the following differences from `stable`: * * * There is no `unstable.Text` class, all strings are text objects - * * Reading strings in a `future` document is the same as reading any other + * * Reading strings in an `unstable` document is the same as reading any other * javascript string - * * To modify strings in a `future` document use {@link splice} + * * To modify strings in an `unstable` document use {@link splice} * * The {@link AutomergeValue} type does not include the {@link Text} * class but the {@link RawString} class is included in the {@link ScalarValue} * type @@ -35,7 +35,6 @@ * * @module */ -import { Counter } from "./types" export { Counter, @@ -45,27 +44,14 @@ export { Float64, type Patch, type PatchCallback, -} from "./types" + type AutomergeValue, + type ScalarValue, +} from "./unstable_types" import type { PatchCallback } from "./stable" -export type AutomergeValue = - | ScalarValue - | { [key: string]: AutomergeValue } - | Array -export type MapValue = { [key: string]: AutomergeValue } -export type ListValue = Array -export type ScalarValue = - | string - | number - | null - | boolean - | Date - | Counter - | Uint8Array - | RawString - -export type Conflicts = { [key: string]: AutomergeValue } +import { type UnstableConflicts as Conflicts } from "./conflicts" +import { unstableConflictAt } from "./conflicts" export type { PutPatch, @@ -125,7 +111,6 @@ export { RawString } from "./raw_string" export const getBackend = stable.getBackend import { _is_proxy, _state, _obj } from "./internal_state" -import { RawString } from "./raw_string" /** * Create a new automerge document @@ -137,7 +122,7 @@ import { RawString } from "./raw_string" * random actor ID */ export function init(_opts?: ActorId | InitOptions): Doc { - let opts = importOpts(_opts) + const opts = importOpts(_opts) opts.enableTextV2 = true return stable.init(opts) } @@ -161,7 +146,7 @@ export function clone( doc: Doc, _opts?: ActorId | InitOptions ): Doc { - let opts = importOpts(_opts) + const opts = importOpts(_opts) opts.enableTextV2 = true return stable.clone(doc, opts) } @@ -296,6 +281,14 @@ export function getConflicts( doc: Doc, prop: stable.Prop ): Conflicts | undefined { - // this function only exists to get the types to line up with future.AutomergeValue - return stable.getConflicts(doc, prop) + const state = _state(doc, false) + if (!state.textV2) { + throw new Error("use getConflicts for a stable document") + } + const objectId = _obj(doc) + if (objectId != null) { + return unstableConflictAt(state.handle, objectId, prop) + } else { + return undefined + } } diff --git a/javascript/src/unstable_types.ts b/javascript/src/unstable_types.ts new file mode 100644 index 00000000..071e2cc4 --- /dev/null +++ b/javascript/src/unstable_types.ts @@ -0,0 +1,30 @@ +import { Counter } from "./types" + +export { + Counter, + type Doc, + Int, + Uint, + Float64, + type Patch, + type PatchCallback, +} from "./types" + +import { RawString } from "./raw_string" +export { RawString } from "./raw_string" + +export type AutomergeValue = + | ScalarValue + | { [key: string]: AutomergeValue } + | Array +export type MapValue = { [key: string]: AutomergeValue } +export type ListValue = Array +export type ScalarValue = + | string + | number + | null + | boolean + | Date + | Counter + | Uint8Array + | RawString diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index 90e7a99d..5aa1ac34 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -267,7 +267,6 @@ describe("Automerge", () => { }) assert.deepEqual(doc5, { list: [2, 1, 9, 10, 3, 11, 12] }) let doc6 = Automerge.change(doc5, d => { - // @ts-ignore d.list.insertAt(3, 100, 101) }) assert.deepEqual(doc6, { list: [2, 1, 9, 100, 101, 10, 3, 11, 12] }) diff --git a/javascript/test/legacy_tests.ts b/javascript/test/legacy_tests.ts index a423b51f..90c731d9 100644 --- a/javascript/test/legacy_tests.ts +++ b/javascript/test/legacy_tests.ts @@ -461,12 +461,12 @@ describe("Automerge", () => { s1 = Automerge.change(s1, "set foo", doc => { doc.foo = "bar" }) - let deleted + let deleted: any s1 = Automerge.change(s1, "del foo", doc => { deleted = delete doc.foo }) assert.strictEqual(deleted, true) - let deleted2 + let deleted2: any assert.doesNotThrow(() => { s1 = Automerge.change(s1, "del baz", doc => { deleted2 = delete doc.baz @@ -515,7 +515,7 @@ describe("Automerge", () => { s1 = Automerge.change(s1, doc => { doc.nested = {} }) - let id = Automerge.getObjectId(s1.nested) + Automerge.getObjectId(s1.nested) assert.strictEqual( OPID_PATTERN.test(Automerge.getObjectId(s1.nested)!), true @@ -975,6 +975,7 @@ describe("Automerge", () => { it("should allow adding and removing list elements in the same change callback", () => { let s1 = Automerge.change( Automerge.init<{ noodles: Array }>(), + // @ts-ignore doc => (doc.noodles = []) ) s1 = Automerge.change(s1, doc => { diff --git a/javascript/test/stable_unstable_interop.ts b/javascript/test/stable_unstable_interop.ts index 2f58c256..dc57f338 100644 --- a/javascript/test/stable_unstable_interop.ts +++ b/javascript/test/stable_unstable_interop.ts @@ -38,4 +38,62 @@ describe("stable/unstable interop", () => { stableDoc = unstable.merge(stableDoc, unstableDoc) assert.deepStrictEqual(stableDoc.text, "abc") }) + + it("should show conflicts on text objects", () => { + let doc1 = stable.from({ text: new stable.Text("abc") }, "bb") + let doc2 = stable.from({ text: new stable.Text("def") }, "aa") + doc1 = stable.merge(doc1, doc2) + let conflicts = stable.getConflicts(doc1, "text")! + assert.equal(conflicts["1@bb"]!.toString(), "abc") + assert.equal(conflicts["1@aa"]!.toString(), "def") + + let unstableDoc = unstable.init() + unstableDoc = unstable.merge(unstableDoc, doc1) + let conflicts2 = unstable.getConflicts(unstableDoc, "text")! + assert.equal(conflicts2["1@bb"]!.toString(), "abc") + assert.equal(conflicts2["1@aa"]!.toString(), "def") + }) + + it("should allow filling a list with text in stable", () => { + let doc = stable.from<{ list: Array }>({ + list: [null, null, null], + }) + doc = stable.change(doc, doc => { + doc.list.fill(new stable.Text("abc"), 0, 3) + }) + assert.deepStrictEqual(doc.list, [ + new stable.Text("abc"), + new stable.Text("abc"), + new stable.Text("abc"), + ]) + }) + + it("should allow filling a list with text in unstable", () => { + let doc = unstable.from<{ list: Array }>({ + list: [null, null, null], + }) + doc = stable.change(doc, doc => { + doc.list.fill("abc", 0, 3) + }) + assert.deepStrictEqual(doc.list, ["abc", "abc", "abc"]) + }) + + it("should allow splicing text into a list on stable", () => { + let doc = stable.from<{ list: Array }>({ list: [] }) + doc = stable.change(doc, doc => { + doc.list.splice(0, 0, new stable.Text("abc"), new stable.Text("def")) + }) + assert.deepStrictEqual(doc.list, [ + new stable.Text("abc"), + new stable.Text("def"), + ]) + }) + + it("should allow splicing text into a list on unstable", () => { + let doc = unstable.from<{ list: Array }>({ list: [] }) + doc = unstable.change(doc, doc => { + doc.list.splice(0, 0, "abc", "def") + }) + assert.deepStrictEqual(doc.list, ["abc", "def"]) + }) }) From 58a7a06b754f58bee961012a96485634c9efa854 Mon Sep 17 00:00:00 2001 From: alexjg Date: Fri, 27 Jan 2023 20:27:11 +0000 Subject: [PATCH 44/72] @automerge/automerge-wasm@0.1.23 and @automerge/automerge@2.0.1-alpha.6 (#509) --- javascript/package.json | 4 ++-- rust/automerge-wasm/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index caeeb647..05358703 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1-alpha.5", + "version": "2.0.1-alpha.6", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -47,7 +47,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.22", + "@automerge/automerge-wasm": "0.1.23", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 0f133468..cce3199f 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.22", + "version": "0.1.23", "license": "MIT", "files": [ "README.md", From 9b6a3c8691de47f1751c916776555db18e012f80 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Sat, 28 Jan 2023 09:32:21 +0000 Subject: [PATCH 45/72] Update README --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d11e9d1c..94e1bbb8 100644 --- a/README.md +++ b/README.md @@ -42,9 +42,10 @@ In general we try and respect semver. ### JavaScript -An alpha release of the javascript package is currently available as -`@automerge/automerge@2.0.0-alpha.n` where `n` is an integer. We are gathering -feedback on the API and looking to release a `2.0.0` in the next few weeks. +A stable release of the javascript package is currently available as +`@automerge/automerge@2.0.0` where. pre-release verisions of the `2.0.1` are +available as `2.0.1-alpha.n`. `2.0.1*` packages are also available for Deno at +https://deno.land/x/automerge ### Rust @@ -52,7 +53,10 @@ The rust codebase is currently oriented around producing a performant backend for the Javascript wrapper and as such the API for Rust code is low level and not well documented. We will be returning to this over the next few months but for now you will need to be comfortable reading the tests and asking questions -to figure out how to use it. +to figure out how to use it. If you are looking to build rust applications which +use automerge you may want to look into +[autosurgeon](https://github.com/alexjg/autosurgeon) + ## Repository Organisation From 89a0866272502f6360221d6585e93990f932de24 Mon Sep 17 00:00:00 2001 From: alexjg Date: Sat, 28 Jan 2023 21:22:45 +0000 Subject: [PATCH 46/72] @automerge/automerge@2.0.1 (#510) --- javascript/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/package.json b/javascript/package.json index 05358703..017c5a54 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1-alpha.6", + "version": "2.0.1", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", From 08801ab580e31df472f5c33858aa85b94d99d0fe Mon Sep 17 00:00:00 2001 From: alexjg Date: Mon, 30 Jan 2023 19:37:03 +0000 Subject: [PATCH 47/72] automerge-rs: Introduce ReadDoc and SyncDoc traits and add documentation (#511) The Rust API has so far grown somewhat organically driven by the needs of the javascript implementation. This has led to an API which is quite awkward and unfamiliar to Rust programmers. Additionally there is no documentation to speak of. This commit is the first movement towards cleaning things up a bit. We touch a lot of files but the changes are all very mechanical. We introduce a few traits to abstract over the common operations between `Automerge` and `AutoCommit`, and add a whole bunch of documentation. * Add a `ReadDoc` trait to describe methods which read value from a document. make `Transactable` extend `ReadDoc` * Add a `SyncDoc` trait to describe methods necessary for synchronizing documents. * Put the `SyncDoc` implementation for `AutoCommit` behind `AutoCommit::sync` to ensure that any open transactions are closed before taking part in the sync protocol * Split `OpObserver` into two traits: `OpObserver` + `BranchableObserver`. `BranchableObserver` captures the methods which are only needed for observing transactions. * Add a whole bunch of documentation. The main changes Rust users will need to make is: * Import the `ReadDoc` trait wherever you are using the methods which have been moved to it. Optionally change concrete paramters on functions to `ReadDoc` constraints. * Likewise import the `SyncDoc` trait wherever you are doing synchronisation work * If you are using the `AutoCommit::*_sync_message` methods you will need to add a call to `AutoCommit::sync()` first. E.g. `doc.generate_sync_message` becomes `doc.sync().generate_sync_message` * If you have an implementation of `OpObserver` which you are using in an `AutoCommit` then split it into an implementation of `OpObserver` and `BranchableObserver` --- rust/automerge-c/src/doc.rs | 9 +- rust/automerge-c/src/doc/list.rs | 1 + rust/automerge-c/src/doc/map.rs | 1 + rust/automerge-cli/src/export.rs | 1 + rust/automerge-test/src/lib.rs | 21 +- rust/automerge-wasm/src/interop.rs | 2 +- rust/automerge-wasm/src/lib.rs | 8 +- rust/automerge-wasm/src/observer.rs | 42 +- rust/automerge/Cargo.toml | 1 + rust/automerge/README.md | 5 + rust/automerge/benches/range.rs | 18 +- rust/automerge/benches/sync.rs | 6 +- rust/automerge/examples/quickstart.rs | 2 +- rust/automerge/examples/watch.rs | 1 + rust/automerge/src/autocommit.rs | 286 +++++-- rust/automerge/src/automerge.rs | 810 +++++++++--------- rust/automerge/src/automerge/tests.rs | 2 +- rust/automerge/src/autoserde.rs | 45 +- rust/automerge/src/exid.rs | 9 +- rust/automerge/src/keys.rs | 4 + rust/automerge/src/keys_at.rs | 4 + rust/automerge/src/lib.rs | 193 ++++- rust/automerge/src/list_range.rs | 3 + rust/automerge/src/list_range_at.rs | 3 + rust/automerge/src/map_range.rs | 3 + rust/automerge/src/map_range_at.rs | 3 + rust/automerge/src/op_observer.rs | 135 +-- rust/automerge/src/op_observer/compose.rs | 102 +++ rust/automerge/src/parents.rs | 31 +- rust/automerge/src/read.rs | 199 +++++ rust/automerge/src/sync.rs | 278 ++++-- rust/automerge/src/sync/state.rs | 10 + rust/automerge/src/transaction/inner.rs | 2 +- .../src/transaction/manual_transaction.rs | 199 +++-- rust/automerge/src/transaction/observation.rs | 14 +- .../automerge/src/transaction/transactable.rs | 109 +-- rust/automerge/src/types.rs | 19 + rust/automerge/src/value.rs | 10 +- rust/automerge/src/values.rs | 9 +- rust/automerge/tests/test.rs | 72 +- rust/edit-trace/src/main.rs | 1 + 41 files changed, 1720 insertions(+), 953 deletions(-) create mode 100644 rust/automerge/README.md create mode 100644 rust/automerge/src/op_observer/compose.rs create mode 100644 rust/automerge/src/read.rs diff --git a/rust/automerge-c/src/doc.rs b/rust/automerge-c/src/doc.rs index 58625798..f02c01bf 100644 --- a/rust/automerge-c/src/doc.rs +++ b/rust/automerge-c/src/doc.rs @@ -1,5 +1,7 @@ use automerge as am; +use automerge::sync::SyncDoc; use automerge::transaction::{CommitOptions, Transactable}; +use automerge::ReadDoc; use std::ops::{Deref, DerefMut}; use crate::actor_id::{to_actor_id, AMactorId}; @@ -291,7 +293,7 @@ pub unsafe extern "C" fn AMgenerateSyncMessage( ) -> *mut AMresult { let doc = to_doc_mut!(doc); let sync_state = to_sync_state_mut!(sync_state); - to_result(doc.generate_sync_message(sync_state.as_mut())) + to_result(doc.sync().generate_sync_message(sync_state.as_mut())) } /// \memberof AMdoc @@ -708,7 +710,10 @@ pub unsafe extern "C" fn AMreceiveSyncMessage( let doc = to_doc_mut!(doc); let sync_state = to_sync_state_mut!(sync_state); let sync_message = to_sync_message!(sync_message); - to_result(doc.receive_sync_message(sync_state.as_mut(), sync_message.as_ref().clone())) + to_result( + doc.sync() + .receive_sync_message(sync_state.as_mut(), sync_message.as_ref().clone()), + ) } /// \memberof AMdoc diff --git a/rust/automerge-c/src/doc/list.rs b/rust/automerge-c/src/doc/list.rs index 48f26c21..6bcdeabf 100644 --- a/rust/automerge-c/src/doc/list.rs +++ b/rust/automerge-c/src/doc/list.rs @@ -1,5 +1,6 @@ use automerge as am; use automerge::transaction::Transactable; +use automerge::ReadDoc; use crate::byte_span::{to_str, AMbyteSpan}; use crate::change_hashes::AMchangeHashes; diff --git a/rust/automerge-c/src/doc/map.rs b/rust/automerge-c/src/doc/map.rs index a5801323..86c6b4a2 100644 --- a/rust/automerge-c/src/doc/map.rs +++ b/rust/automerge-c/src/doc/map.rs @@ -1,5 +1,6 @@ use automerge as am; use automerge::transaction::Transactable; +use automerge::ReadDoc; use crate::byte_span::{to_str, AMbyteSpan}; use crate::change_hashes::AMchangeHashes; diff --git a/rust/automerge-cli/src/export.rs b/rust/automerge-cli/src/export.rs index 45fd7b3b..45f39101 100644 --- a/rust/automerge-cli/src/export.rs +++ b/rust/automerge-cli/src/export.rs @@ -1,5 +1,6 @@ use anyhow::Result; use automerge as am; +use automerge::ReadDoc; use crate::{color_json::print_colored_json, SkipVerifyFlag}; diff --git a/rust/automerge-test/src/lib.rs b/rust/automerge-test/src/lib.rs index b2af72e1..a1d4ea89 100644 --- a/rust/automerge-test/src/lib.rs +++ b/rust/automerge-test/src/lib.rs @@ -4,6 +4,8 @@ use std::{ hash::Hash, }; +use automerge::ReadDoc; + use serde::ser::{SerializeMap, SerializeSeq}; pub fn new_doc() -> automerge::AutoCommit { @@ -48,7 +50,7 @@ pub fn sorted_actors() -> (automerge::ActorId, automerge::ActorId) { /// let title = doc.put(todo, "title", "water plants").unwrap(); /// /// assert_doc!( -/// &doc.document(), +/// &doc, /// map!{ /// "todos" => { /// list![ @@ -67,6 +69,7 @@ pub fn sorted_actors() -> (automerge::ActorId, automerge::ActorId) { /// ```rust /// # use automerge_test::{assert_doc, map}; /// # use automerge::transaction::Transactable; +/// # use automerge::ReadDoc; /// /// let mut doc1 = automerge::AutoCommit::new(); /// let mut doc2 = automerge::AutoCommit::new(); @@ -74,7 +77,7 @@ pub fn sorted_actors() -> (automerge::ActorId, automerge::ActorId) { /// doc2.put(automerge::ROOT, "field", "two").unwrap(); /// doc1.merge(&mut doc2); /// assert_doc!( -/// &doc1.document(), +/// doc1.document(), /// map!{ /// "field" => { /// "one", @@ -330,12 +333,12 @@ impl serde::Serialize for RealizedObject { } } -pub fn realize(doc: &automerge::Automerge) -> RealizedObject { +pub fn realize(doc: &R) -> RealizedObject { realize_obj(doc, &automerge::ROOT, automerge::ObjType::Map) } -pub fn realize_prop>( - doc: &automerge::Automerge, +pub fn realize_prop>( + doc: &R, obj_id: &automerge::ObjId, prop: P, ) -> RealizedObject { @@ -346,8 +349,8 @@ pub fn realize_prop>( } } -pub fn realize_obj( - doc: &automerge::Automerge, +pub fn realize_obj( + doc: &R, obj_id: &automerge::ObjId, objtype: automerge::ObjType, ) -> RealizedObject { @@ -370,8 +373,8 @@ pub fn realize_obj( } } -fn realize_values>( - doc: &automerge::Automerge, +fn realize_values>( + doc: &R, obj_id: &automerge::ObjId, key: K, ) -> BTreeSet { diff --git a/rust/automerge-wasm/src/interop.rs b/rust/automerge-wasm/src/interop.rs index 2881209a..1546ff10 100644 --- a/rust/automerge-wasm/src/interop.rs +++ b/rust/automerge-wasm/src/interop.rs @@ -2,7 +2,7 @@ use crate::error::InsertObject; use crate::value::Datatype; use crate::{Automerge, TextRepresentation}; use automerge as am; -use automerge::transaction::Transactable; +use automerge::ReadDoc; use automerge::ROOT; use automerge::{Change, ChangeHash, ObjType, Prop}; use js_sys::{Array, Function, JsString, Object, Reflect, Symbol, Uint8Array}; diff --git a/rust/automerge-wasm/src/lib.rs b/rust/automerge-wasm/src/lib.rs index d6ccc8c8..b53bf3b9 100644 --- a/rust/automerge-wasm/src/lib.rs +++ b/rust/automerge-wasm/src/lib.rs @@ -29,7 +29,7 @@ use am::transaction::CommitOptions; use am::transaction::{Observed, Transactable, UnObserved}; use am::ScalarValue; use automerge as am; -use automerge::{Change, ObjId, Prop, TextEncoding, Value, ROOT}; +use automerge::{sync::SyncDoc, Change, ObjId, Prop, ReadDoc, TextEncoding, Value, ROOT}; use js_sys::{Array, Function, Object, Uint8Array}; use serde::ser::Serialize; use std::borrow::Cow; @@ -746,13 +746,15 @@ impl Automerge { ) -> Result<(), error::ReceiveSyncMessage> { let message = message.to_vec(); let message = am::sync::Message::decode(message.as_slice())?; - self.doc.receive_sync_message(&mut state.0, message)?; + self.doc + .sync() + .receive_sync_message(&mut state.0, message)?; Ok(()) } #[wasm_bindgen(js_name = generateSyncMessage)] pub fn generate_sync_message(&mut self, state: &mut SyncState) -> JsValue { - if let Some(message) = self.doc.generate_sync_message(&mut state.0) { + if let Some(message) = self.doc.sync().generate_sync_message(&mut state.0) { Uint8Array::from(message.encode().as_slice()).into() } else { JsValue::null() diff --git a/rust/automerge-wasm/src/observer.rs b/rust/automerge-wasm/src/observer.rs index 83516597..c0b462a6 100644 --- a/rust/automerge-wasm/src/observer.rs +++ b/rust/automerge-wasm/src/observer.rs @@ -6,7 +6,7 @@ use crate::{ interop::{self, alloc, js_set}, TextRepresentation, }; -use automerge::{Automerge, ObjId, OpObserver, Prop, ScalarValue, SequenceTree, Value}; +use automerge::{ObjId, OpObserver, Prop, ReadDoc, ScalarValue, SequenceTree, Value}; use js_sys::{Array, Object}; use wasm_bindgen::prelude::*; @@ -30,9 +30,9 @@ impl Observer { old_enabled } - fn get_path(&mut self, doc: &Automerge, obj: &ObjId) -> Option> { + fn get_path(&mut self, doc: &R, obj: &ObjId) -> Option> { match doc.parents(obj) { - Ok(mut parents) => parents.visible_path(), + Ok(parents) => parents.visible_path(), Err(e) => { automerge::log!("error generating patch : {:?}", e); None @@ -98,9 +98,9 @@ pub(crate) enum Patch { } impl OpObserver for Observer { - fn insert( + fn insert( &mut self, - doc: &Automerge, + doc: &R, obj: ObjId, index: usize, tagged_value: (Value<'_>, ObjId), @@ -134,7 +134,7 @@ impl OpObserver for Observer { } } - fn splice_text(&mut self, doc: &Automerge, obj: ObjId, index: usize, value: &str) { + fn splice_text(&mut self, doc: &R, obj: ObjId, index: usize, value: &str) { if self.enabled { if self.text_rep == TextRepresentation::Array { for (i, c) in value.chars().enumerate() { @@ -182,7 +182,7 @@ impl OpObserver for Observer { } } - fn delete_seq(&mut self, doc: &Automerge, obj: ObjId, index: usize, length: usize) { + fn delete_seq(&mut self, doc: &R, obj: ObjId, index: usize, length: usize) { if self.enabled { match self.patches.last_mut() { Some(Patch::SpliceText { @@ -244,7 +244,7 @@ impl OpObserver for Observer { } } - fn delete_map(&mut self, doc: &Automerge, obj: ObjId, key: &str) { + fn delete_map(&mut self, doc: &R, obj: ObjId, key: &str) { if self.enabled { if let Some(path) = self.get_path(doc, &obj) { let patch = Patch::DeleteMap { @@ -257,9 +257,9 @@ impl OpObserver for Observer { } } - fn put( + fn put( &mut self, - doc: &Automerge, + doc: &R, obj: ObjId, prop: Prop, tagged_value: (Value<'_>, ObjId), @@ -290,9 +290,9 @@ impl OpObserver for Observer { } } - fn expose( + fn expose( &mut self, - doc: &Automerge, + doc: &R, obj: ObjId, prop: Prop, tagged_value: (Value<'_>, ObjId), @@ -323,7 +323,13 @@ impl OpObserver for Observer { } } - fn increment(&mut self, doc: &Automerge, obj: ObjId, prop: Prop, tagged_value: (i64, ObjId)) { + fn increment( + &mut self, + doc: &R, + obj: ObjId, + prop: Prop, + tagged_value: (i64, ObjId), + ) { if self.enabled { if let Some(path) = self.get_path(doc, &obj) { let value = tagged_value.0; @@ -337,6 +343,12 @@ impl OpObserver for Observer { } } + fn text_as_seq(&self) -> bool { + self.text_rep == TextRepresentation::Array + } +} + +impl automerge::op_observer::BranchableObserver for Observer { fn merge(&mut self, other: &Self) { self.patches.extend_from_slice(other.patches.as_slice()) } @@ -348,10 +360,6 @@ impl OpObserver for Observer { text_rep: self.text_rep, } } - - fn text_as_seq(&self) -> bool { - self.text_rep == TextRepresentation::Array - } } fn prop_to_js(p: &Prop) -> JsValue { diff --git a/rust/automerge/Cargo.toml b/rust/automerge/Cargo.toml index 89b48020..578878ae 100644 --- a/rust/automerge/Cargo.toml +++ b/rust/automerge/Cargo.toml @@ -7,6 +7,7 @@ repository = "https://github.com/automerge/automerge-rs" documentation = "https://automerge.org/automerge-rs/automerge/" rust-version = "1.57.0" description = "A JSON-like data structure (a CRDT) that can be modified concurrently by different users, and merged again automatically" +readme = "./README.md" [features] optree-visualisation = ["dot", "rand"] diff --git a/rust/automerge/README.md b/rust/automerge/README.md new file mode 100644 index 00000000..97dbe4f8 --- /dev/null +++ b/rust/automerge/README.md @@ -0,0 +1,5 @@ +# Automerge + +Automerge is a library of data structures for building collaborative +[local-first](https://www.inkandswitch.com/local-first/) applications. This is +the Rust implementation. See [automerge.org](https://automerge.org/) diff --git a/rust/automerge/benches/range.rs b/rust/automerge/benches/range.rs index aec5c293..008ae159 100644 --- a/rust/automerge/benches/range.rs +++ b/rust/automerge/benches/range.rs @@ -1,4 +1,4 @@ -use automerge::{transaction::Transactable, Automerge, ROOT}; +use automerge::{transaction::Transactable, Automerge, ReadDoc, ROOT}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn doc(n: u64) -> Automerge { @@ -16,36 +16,20 @@ fn range(doc: &Automerge) { range.for_each(drop); } -fn range_rev(doc: &Automerge) { - let range = doc.values(ROOT).rev(); - range.for_each(drop); -} - fn range_at(doc: &Automerge) { let range = doc.values_at(ROOT, &doc.get_heads()); range.for_each(drop); } -fn range_at_rev(doc: &Automerge) { - let range = doc.values_at(ROOT, &doc.get_heads()).rev(); - range.for_each(drop); -} - fn criterion_benchmark(c: &mut Criterion) { let n = 100_000; let doc = doc(n); c.bench_function(&format!("range {}", n), |b| { b.iter(|| range(black_box(&doc))) }); - c.bench_function(&format!("range rev {}", n), |b| { - b.iter(|| range_rev(black_box(&doc))) - }); c.bench_function(&format!("range_at {}", n), |b| { b.iter(|| range_at(black_box(&doc))) }); - c.bench_function(&format!("range_at rev {}", n), |b| { - b.iter(|| range_at_rev(black_box(&doc))) - }); } criterion_group!(benches, criterion_benchmark); diff --git a/rust/automerge/benches/sync.rs b/rust/automerge/benches/sync.rs index 483fd2b4..13965792 100644 --- a/rust/automerge/benches/sync.rs +++ b/rust/automerge/benches/sync.rs @@ -1,4 +1,8 @@ -use automerge::{sync, transaction::Transactable, Automerge, ROOT}; +use automerge::{ + sync::{self, SyncDoc}, + transaction::Transactable, + Automerge, ROOT, +}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; #[derive(Default)] diff --git a/rust/automerge/examples/quickstart.rs b/rust/automerge/examples/quickstart.rs index 76ef0470..fcb23d5e 100644 --- a/rust/automerge/examples/quickstart.rs +++ b/rust/automerge/examples/quickstart.rs @@ -2,7 +2,7 @@ use automerge::transaction::CommitOptions; use automerge::transaction::Transactable; use automerge::AutomergeError; use automerge::ObjType; -use automerge::{Automerge, ROOT}; +use automerge::{Automerge, ReadDoc, ROOT}; // Based on https://automerge.github.io/docs/quickstart fn main() { diff --git a/rust/automerge/examples/watch.rs b/rust/automerge/examples/watch.rs index 1618d6c4..4cd8f4ea 100644 --- a/rust/automerge/examples/watch.rs +++ b/rust/automerge/examples/watch.rs @@ -3,6 +3,7 @@ use automerge::transaction::Transactable; use automerge::Automerge; use automerge::AutomergeError; use automerge::Patch; +use automerge::ReadDoc; use automerge::VecOpObserver; use automerge::ROOT; diff --git a/rust/automerge/src/autocommit.rs b/rust/automerge/src/autocommit.rs index 2258fa2e..2c1c3adf 100644 --- a/rust/automerge/src/autocommit.rs +++ b/rust/automerge/src/autocommit.rs @@ -1,10 +1,12 @@ use std::ops::RangeBounds; use crate::exid::ExId; -use crate::op_observer::OpObserver; +use crate::op_observer::{BranchableObserver, OpObserver}; +use crate::sync::SyncDoc; use crate::transaction::{CommitOptions, Transactable}; use crate::{ - sync, Keys, KeysAt, ListRange, ListRangeAt, MapRange, MapRangeAt, ObjType, Parents, ScalarValue, + sync, Keys, KeysAt, ListRange, ListRangeAt, MapRange, MapRangeAt, ObjType, Parents, ReadDoc, + ScalarValue, }; use crate::{ transaction::{Observation, Observed, TransactionInner, UnObserved}, @@ -12,6 +14,41 @@ use crate::{ }; /// An automerge document that automatically manages transactions. +/// +/// An `AutoCommit` can optionally manage an [`OpObserver`]. This observer will be notified of all +/// changes made by both remote and local changes. The type parameter `O` tracks whether this +/// document is observed or not. +/// +/// ## Creating, loading, merging and forking documents +/// +/// A new document can be created with [`Self::new`], which will create a document with a random +/// [`ActorId`]. Existing documents can be loaded with [`Self::load`]. +/// +/// If you have two documents and you want to merge the changes from one into the other you can use +/// [`Self::merge`]. +/// +/// If you have a document you want to split into two concurrent threads of execution you can use +/// [`Self::fork`]. If you want to split a document from ealier in its history you can use +/// [`Self::fork_at`]. +/// +/// ## Reading values +/// +/// [`Self`] implements [`ReadDoc`], which provides methods for reading values from the document. +/// +/// ## Modifying a document +/// +/// This type implements [`Transactable`] directly, so you can modify it using methods from [`Transactable`]. +/// +/// ## Synchronization +/// +/// To synchronise call [`Self::sync`] which returns an implementation of [`SyncDoc`] +/// +/// ## Observers +/// +/// An `AutoCommit` can optionally manage an [`OpObserver`]. [`Self::new`] will return a document +/// with no observer but you can set an observer using [`Self::with_observer`]. The observer must +/// implement both [`OpObserver`] and [`BranchableObserver`]. If you have an observed autocommit +/// then you can obtain a mutable reference to the observer with [`Self::observer`] #[derive(Debug, Clone)] pub struct AutoCommitWithObs { doc: Automerge, @@ -19,19 +56,12 @@ pub struct AutoCommitWithObs { observation: Obs, } +/// An autocommit document with no observer +/// +/// See [`AutoCommitWithObs`] pub type AutoCommit = AutoCommitWithObs; -impl AutoCommitWithObs { - pub fn unobserved() -> AutoCommitWithObs { - AutoCommitWithObs { - doc: Automerge::new(), - transaction: None, - observation: UnObserved::new(), - } - } -} - -impl Default for AutoCommitWithObs> { +impl Default for AutoCommitWithObs> { fn default() -> Self { let op_observer = O::default(); AutoCommitWithObs { @@ -61,7 +91,7 @@ impl AutoCommit { } } -impl AutoCommitWithObs> { +impl AutoCommitWithObs> { pub fn observer(&mut self) -> &mut Obs { self.ensure_transaction_closed(); self.observation.observer() @@ -89,7 +119,7 @@ impl AutoCommitWithObs { } impl AutoCommitWithObs { - pub fn with_observer( + pub fn with_observer( self, op_observer: Obs2, ) -> AutoCommitWithObs> { @@ -125,6 +155,9 @@ impl AutoCommitWithObs { self.doc.get_actor() } + /// Change the text encoding of this view of the document + /// + /// This is a cheap operation, it just changes the way indexes are calculated pub fn with_encoding(mut self, encoding: TextEncoding) -> Self { self.doc.text_encoding = encoding; self @@ -145,6 +178,13 @@ impl AutoCommitWithObs { } } + /// Load an incremental save of a document. + /// + /// Unlike `load` this imports changes into an existing document. It will work with both the + /// output of [`Self::save`] and [`Self::save_incremental`] + /// + /// The return value is the number of ops which were applied, this is not useful and will + /// change in future. pub fn load_incremental(&mut self, data: &[u8]) -> Result { self.ensure_transaction_closed(); // TODO - would be nice to pass None here instead of &mut () @@ -181,17 +221,24 @@ impl AutoCommitWithObs { } } + /// Save the entirety of this document in a compact form. pub fn save(&mut self) -> Vec { self.ensure_transaction_closed(); self.doc.save() } + /// Save this document, but don't run it through DEFLATE afterwards pub fn save_nocompress(&mut self) -> Vec { self.ensure_transaction_closed(); self.doc.save_nocompress() } - // should this return an empty vec instead of None? + /// Save the changes since the last call to [Self::save`] + /// + /// The output of this will not be a compressed document format, but a series of individual + /// changes. This is useful if you know you have only made a small change since the last `save` + /// and you want to immediately send it somewhere (e.g. you've inserted a single character in a + /// text object). pub fn save_incremental(&mut self) -> Vec { self.ensure_transaction_closed(); self.doc.save_incremental() @@ -202,6 +249,7 @@ impl AutoCommitWithObs { self.doc.get_missing_deps(heads) } + /// Get the last change made by this documents actor ID pub fn get_last_local_change(&mut self) -> Option<&Change> { self.ensure_transaction_closed(); self.doc.get_last_local_change() @@ -220,40 +268,24 @@ impl AutoCommitWithObs { self.doc.get_change_by_hash(hash) } + /// Get changes in `other` that are not in `self pub fn get_changes_added<'a>(&mut self, other: &'a mut Self) -> Vec<&'a Change> { self.ensure_transaction_closed(); other.ensure_transaction_closed(); self.doc.get_changes_added(&other.doc) } + #[doc(hidden)] pub fn import(&self, s: &str) -> Result<(ExId, ObjType), AutomergeError> { self.doc.import(s) } + #[doc(hidden)] pub fn dump(&mut self) { self.ensure_transaction_closed(); self.doc.dump() } - pub fn generate_sync_message(&mut self, sync_state: &mut sync::State) -> Option { - self.ensure_transaction_closed(); - self.doc.generate_sync_message(sync_state) - } - - pub fn receive_sync_message( - &mut self, - sync_state: &mut sync::State, - message: sync::Message, - ) -> Result<(), AutomergeError> { - self.ensure_transaction_closed(); - if let Some(observer) = self.observation.observer() { - self.doc - .receive_sync_message_with(sync_state, message, Some(observer)) - } else { - self.doc.receive_sync_message(sync_state, message) - } - } - /// Return a graphviz representation of the opset. /// /// # Arguments @@ -305,6 +337,7 @@ impl AutoCommitWithObs { tx.commit(&mut self.doc, options.message, options.time) } + /// Remove any changes that have been made in the current transaction from the document pub fn rollback(&mut self) -> usize { self.transaction .take() @@ -326,14 +359,24 @@ impl AutoCommitWithObs { let args = self.doc.transaction_args(); TransactionInner::empty(&mut self.doc, args, options.message, options.time) } + + /// An implementation of [`crate::sync::SyncDoc`] for this autocommit + /// + /// This ensures that any outstanding transactions for this document are committed before + /// taking part in the sync protocol + pub fn sync(&mut self) -> impl SyncDoc + '_ { + self.ensure_transaction_closed(); + SyncWrapper { inner: self } + } } -impl Transactable for AutoCommitWithObs { - fn pending_ops(&self) -> usize { - self.transaction - .as_ref() - .map(|(_, t)| t.pending_ops()) - .unwrap_or(0) +impl ReadDoc for AutoCommitWithObs { + fn parents>(&self, obj: O) -> Result, AutomergeError> { + self.doc.parents(obj) + } + + fn path_to_object>(&self, obj: O) -> Result, AutomergeError> { + self.doc.path_to_object(obj) } fn keys>(&self, obj: O) -> Keys<'_, '_> { @@ -398,6 +441,69 @@ impl Transactable for AutoCommitWithObs { self.doc.object_type(obj) } + fn text>(&self, obj: O) -> Result { + self.doc.text(obj) + } + + fn text_at>( + &self, + obj: O, + heads: &[ChangeHash], + ) -> Result { + self.doc.text_at(obj, heads) + } + + fn get, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError> { + self.doc.get(obj, prop) + } + + fn get_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError> { + self.doc.get_at(obj, prop, heads) + } + + fn get_all, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError> { + self.doc.get_all(obj, prop) + } + + fn get_all_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError> { + self.doc.get_all_at(obj, prop, heads) + } + + fn get_missing_deps(&self, heads: &[ChangeHash]) -> Vec { + self.doc.get_missing_deps(heads) + } + + fn get_change_by_hash(&self, hash: &ChangeHash) -> Option<&Change> { + self.doc.get_change_by_hash(hash) + } +} + +impl Transactable for AutoCommitWithObs { + fn pending_ops(&self) -> usize { + self.transaction + .as_ref() + .map(|(_, t)| t.pending_ops()) + .unwrap_or(0) + } + fn put, P: Into, V: Into>( &mut self, obj: O, @@ -515,60 +621,52 @@ impl Transactable for AutoCommitWithObs { ) } - fn text>(&self, obj: O) -> Result { - self.doc.text(obj) - } - - fn text_at>( - &self, - obj: O, - heads: &[ChangeHash], - ) -> Result { - self.doc.text_at(obj, heads) - } - - // TODO - I need to return these OpId's here **only** to get - // the legacy conflicts format of { [opid]: value } - // Something better? - fn get, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError> { - self.doc.get(obj, prop) - } - - fn get_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError> { - self.doc.get_at(obj, prop, heads) - } - - fn get_all, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError> { - self.doc.get_all(obj, prop) - } - - fn get_all_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError> { - self.doc.get_all_at(obj, prop, heads) - } - - fn parents>(&self, obj: O) -> Result, AutomergeError> { - self.doc.parents(obj) - } - fn base_heads(&self) -> Vec { self.doc.get_heads() } } + +// A wrapper we return from `AutoCommit::sync` to ensure that transactions are closed before we +// start syncing +struct SyncWrapper<'a, Obs: Observation> { + inner: &'a mut AutoCommitWithObs, +} + +impl<'a, Obs: Observation> SyncDoc for SyncWrapper<'a, Obs> { + fn generate_sync_message(&self, sync_state: &mut sync::State) -> Option { + self.inner.doc.generate_sync_message(sync_state) + } + + fn receive_sync_message( + &mut self, + sync_state: &mut sync::State, + message: sync::Message, + ) -> Result<(), AutomergeError> { + self.inner.ensure_transaction_closed(); + if let Some(observer) = self.inner.observation.observer() { + self.inner + .doc + .receive_sync_message_with(sync_state, message, observer) + } else { + self.inner.doc.receive_sync_message(sync_state, message) + } + } + + fn receive_sync_message_with( + &mut self, + sync_state: &mut sync::State, + message: sync::Message, + op_observer: &mut Obs2, + ) -> Result<(), AutomergeError> { + if let Some(our_observer) = self.inner.observation.observer() { + let mut composed = crate::op_observer::compose(our_observer, op_observer); + self.inner + .doc + .receive_sync_message_with(sync_state, message, &mut composed) + } else { + self.inner + .doc + .receive_sync_message_with(sync_state, message, op_observer) + } + } +} diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 584f761d..86aa5f63 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -9,7 +9,7 @@ use crate::clocks::Clocks; use crate::columnar::Key as EncodedKey; use crate::exid::ExId; use crate::keys::Keys; -use crate::op_observer::OpObserver; +use crate::op_observer::{BranchableObserver, OpObserver}; use crate::op_set::OpSet; use crate::parents::Parents; use crate::storage::{self, load, CompressConfig, VerificationMode}; @@ -22,7 +22,7 @@ use crate::types::{ }; use crate::{ query, AutomergeError, Change, KeysAt, ListRange, ListRangeAt, MapRange, MapRangeAt, ObjType, - Prop, Values, + Prop, ReadDoc, Values, }; use serde::Serialize; @@ -35,7 +35,39 @@ pub(crate) enum Actor { Cached(usize), } -/// An automerge document. +/// An automerge document which does not manage transactions for you. +/// +/// ## Creating, loading, merging and forking documents +/// +/// A new document can be created with [`Self::new`], which will create a document with a random +/// [`ActorId`]. Existing documents can be loaded with [`Self::load`], or [`Self::load_with`]. +/// +/// If you have two documents and you want to merge the changes from one into the other you can use +/// [`Self::merge`] or [`Self::merge_with`]. +/// +/// If you have a document you want to split into two concurrent threads of execution you can use +/// [`Self::fork`]. If you want to split a document from ealier in its history you can use +/// [`Self::fork_at`]. +/// +/// ## Reading values +/// +/// [`Self`] implements [`ReadDoc`], which provides methods for reading values from the document. +/// +/// ## Modifying a document (Transactions) +/// +/// [`Automerge`] provides an interface for viewing and modifying automerge documents which does +/// not manage transactions for you. To create changes you use either [`Automerge::transaction`] or +/// [`Automerge::transact`] (or the `_with` variants). +/// +/// ## Sync +/// +/// This type implements [`crate::sync::SyncDoc`] +/// +/// ## Observers +/// +/// Many of the methods on this type have an `_with` or `_observed` variant +/// which allow you to pass in an [`OpObserver`] to observe any changes which +/// occur. #[derive(Debug, Clone)] pub struct Automerge { /// The list of unapplied changes that are not causally ready. @@ -79,6 +111,9 @@ impl Automerge { } } + /// Change the text encoding of this view of the document + /// + /// This is a cheap operation, it just changes the way indexes are calculated pub fn with_encoding(mut self, encoding: TextEncoding) -> Self { self.text_encoding = encoding; self @@ -125,7 +160,8 @@ impl Automerge { Transaction::new(self, args, UnObserved) } - pub fn transaction_with_observer( + /// Start a transaction with an observer + pub fn transaction_with_observer( &mut self, op_observer: Obs, ) -> Transaction<'_, Observed> { @@ -172,7 +208,6 @@ impl Automerge { self.transact_with_impl(Some(c), f) } - /// Like [`Self::transact`] but with a function for generating the commit options. fn transact_with_impl( &mut self, c: Option, @@ -210,7 +245,7 @@ impl Automerge { pub fn transact_observed(&mut self, f: F) -> transaction::Result where F: FnOnce(&mut Transaction<'_, Observed>) -> Result, - Obs: OpObserver + Default, + Obs: OpObserver + BranchableObserver + Default, { self.transact_observed_with_impl(None::<&dyn Fn(&O) -> CommitOptions>, f) } @@ -224,7 +259,7 @@ impl Automerge { where F: FnOnce(&mut Transaction<'_, Observed>) -> Result, C: FnOnce(&O) -> CommitOptions, - Obs: OpObserver + Default, + Obs: OpObserver + BranchableObserver + Default, { self.transact_observed_with_impl(Some(c), f) } @@ -237,7 +272,7 @@ impl Automerge { where F: FnOnce(&mut Transaction<'_, Observed>) -> Result, C: FnOnce(&O) -> CommitOptions, - Obs: OpObserver + Default, + Obs: OpObserver + BranchableObserver + Default, { let observer = Obs::default(); let mut tx = self.transaction_with_observer(observer); @@ -273,13 +308,17 @@ impl Automerge { } /// Fork this document at the current point for use by a different actor. + /// + /// This will create a new actor ID for the forked document pub fn fork(&self) -> Self { let mut f = self.clone(); f.set_actor(ActorId::random()); f } - /// Fork this document at the give heads + /// Fork this document at the given heads + /// + /// This will create a new actor ID for the forked document pub fn fork_at(&self, heads: &[ChangeHash]) -> Result { let mut seen = heads.iter().cloned().collect::>(); let mut heads = heads.to_vec(); @@ -304,182 +343,6 @@ impl Automerge { Ok(f) } - // KeysAt::() - // LenAt::() - // PropAt::() - // NthAt::() - - /// Get the parents of an object in the document tree. - /// - /// ### Errors - /// - /// Returns an error when the id given is not the id of an object in this document. - /// This function does not get the parents of scalar values contained within objects. - /// - /// ### Experimental - /// - /// This function may in future be changed to allow getting the parents from the id of a scalar - /// value. - pub fn parents>(&self, obj: O) -> Result, AutomergeError> { - let (obj_id, _) = self.exid_to_obj(obj.as_ref())?; - Ok(self.ops.parents(obj_id)) - } - - pub fn path_to_object>( - &self, - obj: O, - ) -> Result, AutomergeError> { - Ok(self.parents(obj.as_ref().clone())?.path()) - } - - /// Get the keys of the object `obj`. - /// - /// For a map this returns the keys of the map. - /// For a list this returns the element ids (opids) encoded as strings. - pub fn keys>(&self, obj: O) -> Keys<'_, '_> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - let iter_keys = self.ops.keys(obj); - Keys::new(self, iter_keys) - } else { - Keys::new(self, None) - } - } - - /// Historical version of [`keys`](Self::keys). - pub fn keys_at>(&self, obj: O, heads: &[ChangeHash]) -> KeysAt<'_, '_> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return KeysAt::new(self, self.ops.keys_at(obj, clock)); - } - } - KeysAt::new(self, None) - } - - /// Iterate over the keys and values of the map `obj` in the given range. - pub fn map_range, R: RangeBounds>( - &self, - obj: O, - range: R, - ) -> MapRange<'_, R> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - MapRange::new(self, self.ops.map_range(obj, range)) - } else { - MapRange::new(self, None) - } - } - - /// Historical version of [`map_range`](Self::map_range). - pub fn map_range_at, R: RangeBounds>( - &self, - obj: O, - range: R, - heads: &[ChangeHash], - ) -> MapRangeAt<'_, R> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - let iter_range = self.ops.map_range_at(obj, range, clock); - return MapRangeAt::new(self, iter_range); - } - } - MapRangeAt::new(self, None) - } - - /// Iterate over the indexes and values of the list `obj` in the given range. - pub fn list_range, R: RangeBounds>( - &self, - obj: O, - range: R, - ) -> ListRange<'_, R> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - ListRange::new(self, self.ops.list_range(obj, range)) - } else { - ListRange::new(self, None) - } - } - - /// Historical version of [`list_range`](Self::list_range). - pub fn list_range_at, R: RangeBounds>( - &self, - obj: O, - range: R, - heads: &[ChangeHash], - ) -> ListRangeAt<'_, R> { - if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - let iter_range = self.ops.list_range_at(obj, range, clock); - return ListRangeAt::new(self, iter_range); - } - } - ListRangeAt::new(self, None) - } - - pub fn values>(&self, obj: O) -> Values<'_> { - if let Ok((obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if obj_type.is_sequence() { - Values::new(self, self.ops.list_range(obj, ..)) - } else { - Values::new(self, self.ops.map_range(obj, ..)) - } - } else { - Values::empty(self) - } - } - - pub fn values_at>(&self, obj: O, heads: &[ChangeHash]) -> Values<'_> { - if let Ok((obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return match obj_type { - ObjType::Map | ObjType::Table => { - let iter_range = self.ops.map_range_at(obj, .., clock); - Values::new(self, iter_range) - } - ObjType::List | ObjType::Text => { - let iter_range = self.ops.list_range_at(obj, .., clock); - Values::new(self, iter_range) - } - }; - } - } - Values::empty(self) - } - - /// Get the length of the given object. - pub fn length>(&self, obj: O) -> usize { - if let Ok((inner_obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if obj_type == ObjType::Map || obj_type == ObjType::Table { - self.keys(obj).count() - } else { - let encoding = ListEncoding::new(obj_type, self.text_encoding); - self.ops.search(&inner_obj, query::Len::new(encoding)).len - } - } else { - 0 - } - } - - /// Historical version of [`length`](Self::length). - pub fn length_at>(&self, obj: O, heads: &[ChangeHash]) -> usize { - if let Ok((inner_obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return if obj_type == ObjType::Map || obj_type == ObjType::Table { - self.keys_at(obj, heads).count() - } else { - let encoding = ListEncoding::new(obj_type, self.text_encoding); - self.ops - .search(&inner_obj, query::LenAt::new(clock, encoding)) - .len - }; - } - } - 0 - } - - /// Get the type of this object, if it is an object. - pub fn object_type>(&self, obj: O) -> Result { - let (_, obj_type) = self.exid_to_obj(obj.as_ref())?; - Ok(obj_type) - } - pub(crate) fn exid_to_obj(&self, id: &ExId) -> Result<(ObjId, ObjType), AutomergeError> { match id { ExId::Root => Ok((ObjId::root(), ObjType::Map)), @@ -511,153 +374,19 @@ impl Automerge { self.ops.id_to_exid(id) } - /// Get the string represented by the given text object. - pub fn text>(&self, obj: O) -> Result { - let obj = self.exid_to_obj(obj.as_ref())?.0; - let query = self.ops.search(&obj, query::ListVals::new()); - let mut buffer = String::new(); - for q in &query.ops { - buffer.push_str(q.to_str()); - } - Ok(buffer) - } - - /// Historical version of [`text`](Self::text). - pub fn text_at>( - &self, - obj: O, - heads: &[ChangeHash], - ) -> Result { - let obj = self.exid_to_obj(obj.as_ref())?.0; - let clock = self.clock_at(heads)?; - let query = self.ops.search(&obj, query::ListValsAt::new(clock)); - let mut buffer = String::new(); - for q in &query.ops { - if let OpType::Put(ScalarValue::Str(s)) = &q.action { - buffer.push_str(s); - } else { - buffer.push('\u{fffc}'); - } - } - Ok(buffer) - } - - // TODO - I need to return these OpId's here **only** to get - // the legacy conflicts format of { [opid]: value } - // Something better? - /// Get a value out of the document. - /// - /// Returns both the value and the id of the operation that created it, useful for handling - /// conflicts and serves as the object id if the value is an object. - pub fn get, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError> { - Ok(self.get_all(obj, prop.into())?.last().cloned()) - } - - /// Historical version of [`get`](Self::get). - pub fn get_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError> { - Ok(self.get_all_at(obj, prop, heads)?.last().cloned()) - } - - /// Get all conflicting values out of the document at this prop that conflict. - /// - /// Returns both the value and the id of the operation that created it, useful for handling - /// conflicts and serves as the object id if the value is an object. - pub fn get_all, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError> { - let obj = self.exid_to_obj(obj.as_ref())?.0; - let mut result = match prop.into() { - Prop::Map(p) => { - let prop = self.ops.m.props.lookup(&p); - if let Some(p) = prop { - self.ops - .search(&obj, query::Prop::new(p)) - .ops - .into_iter() - .map(|o| (o.value(), self.id_to_exid(o.id))) - .collect() - } else { - vec![] - } - } - Prop::Seq(n) => { - let obj_type = self.ops.object_type(&obj); - let encoding = obj_type - .map(|o| ListEncoding::new(o, self.text_encoding)) - .unwrap_or_default(); - self.ops - .search(&obj, query::Nth::new(n, encoding)) - .ops - .into_iter() - .map(|o| (o.value(), self.id_to_exid(o.id))) - .collect() - } - }; - result.sort_by(|a, b| b.1.cmp(&a.1)); - Ok(result) - } - - /// Historical version of [`get_all`](Self::get_all). - pub fn get_all_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError> { - let prop = prop.into(); - let obj = self.exid_to_obj(obj.as_ref())?.0; - let clock = self.clock_at(heads)?; - let result = match prop { - Prop::Map(p) => { - let prop = self.ops.m.props.lookup(&p); - if let Some(p) = prop { - self.ops - .search(&obj, query::PropAt::new(p, clock)) - .ops - .into_iter() - .map(|o| (o.clone_value(), self.id_to_exid(o.id))) - .collect() - } else { - vec![] - } - } - Prop::Seq(n) => { - let obj_type = self.ops.object_type(&obj); - let encoding = obj_type - .map(|o| ListEncoding::new(o, self.text_encoding)) - .unwrap_or_default(); - self.ops - .search(&obj, query::NthAt::new(n, clock, encoding)) - .ops - .into_iter() - .map(|o| (o.clone_value(), self.id_to_exid(o.id))) - .collect() - } - }; - Ok(result) - } - /// Load a document. pub fn load(data: &[u8]) -> Result { Self::load_with::<()>(data, VerificationMode::Check, None) } + /// Load a document without verifying the head hashes + /// + /// This is useful for debugging as it allows you to examine a corrupted document. pub fn load_unverified_heads(data: &[u8]) -> Result { Self::load_with::<()>(data, VerificationMode::DontCheck, None) } - /// Load a document. + /// Load a document with an observer #[tracing::instrument(skip(data, observer), err)] pub fn load_with( data: &[u8], @@ -749,11 +478,17 @@ impl Automerge { } /// Load an incremental save of a document. + /// + /// Unlike `load` this imports changes into an existing document. It will work with both the + /// output of [`Self::save`] and [`Self::save_incremental`] + /// + /// The return value is the number of ops which were applied, this is not useful and will + /// change in future. pub fn load_incremental(&mut self, data: &[u8]) -> Result { self.load_incremental_with::<()>(data, None) } - /// Load an incremental save of a document. + /// Like [`Self::load_incremental`] but with an observer pub fn load_incremental_with( &mut self, data: &[u8], @@ -783,6 +518,9 @@ impl Automerge { } /// Apply changes to this document. + /// + /// This is idemptotent in the sense that if a change has already been applied it will be + /// ignored. pub fn apply_changes( &mut self, changes: impl IntoIterator, @@ -790,7 +528,7 @@ impl Automerge { self.apply_changes_with::<_, ()>(changes, None) } - /// Apply changes to this document. + /// Like [`Self::apply_changes`] but with an observer pub fn apply_changes_with, Obs: OpObserver>( &mut self, changes: I, @@ -925,6 +663,10 @@ impl Automerge { } /// Save the entirety of this document in a compact form. + /// + /// This takes a mutable reference to self because it saves the heads of the last save so that + /// `save_incremental` can be used to produce only the changes since the last `save`. This API + /// will be changing in future. pub fn save(&mut self) -> Vec { let heads = self.get_heads(); let c = self.history.iter(); @@ -940,6 +682,7 @@ impl Automerge { bytes } + /// Save this document, but don't run it through DEFLATE afterwards pub fn save_nocompress(&mut self) -> Vec { let heads = self.get_heads(); let c = self.history.iter(); @@ -955,7 +698,12 @@ impl Automerge { bytes } - /// Save the changes since last save in a compact form. + /// Save the changes since the last call to [Self::save`] + /// + /// The output of this will not be a compressed document format, but a series of individual + /// changes. This is useful if you know you have only made a small change since the last `save` + /// and you want to immediately send it somewhere (e.g. you've inserted a single character in a + /// text object). pub fn save_incremental(&mut self) -> Vec { let changes = self .get_changes(self.saved.as_slice()) @@ -997,33 +745,6 @@ impl Automerge { Ok(()) } - /// Get the hashes of the changes in this document that aren't transitive dependencies of the - /// given `heads`. - pub fn get_missing_deps(&self, heads: &[ChangeHash]) -> Vec { - let in_queue: HashSet<_> = self.queue.iter().map(|change| change.hash()).collect(); - let mut missing = HashSet::new(); - - for head in self.queue.iter().flat_map(|change| change.deps()) { - if !self.history_index.contains_key(head) { - missing.insert(head); - } - } - - for head in heads { - if !self.history_index.contains_key(head) { - missing.insert(head); - } - } - - let mut missing = missing - .into_iter() - .filter(|hash| !in_queue.contains(hash)) - .copied() - .collect::>(); - missing.sort(); - missing - } - /// Get the changes since `have_deps` in this document using a clock internally. fn get_changes_clock(&self, have_deps: &[ChangeHash]) -> Result, AutomergeError> { // get the clock for the given deps @@ -1052,10 +773,6 @@ impl Automerge { .collect()) } - pub fn get_changes(&self, have_deps: &[ChangeHash]) -> Result, AutomergeError> { - self.get_changes_clock(have_deps) - } - /// Get the last change this actor made to the document. pub fn get_last_local_change(&self) -> Option<&Change> { return self @@ -1087,47 +804,6 @@ impl Automerge { } } - /// Get a change by its hash. - pub fn get_change_by_hash(&self, hash: &ChangeHash) -> Option<&Change> { - self.history_index - .get(hash) - .and_then(|index| self.history.get(*index)) - } - - /// Get the changes that the other document added compared to this document. - #[tracing::instrument(skip(self, other))] - pub fn get_changes_added<'a>(&self, other: &'a Self) -> Vec<&'a Change> { - // Depth-first traversal from the heads through the dependency graph, - // until we reach a change that is already present in other - let mut stack: Vec<_> = other.get_heads(); - tracing::trace!(their_heads=?stack, "finding changes to merge"); - let mut seen_hashes = HashSet::new(); - let mut added_change_hashes = Vec::new(); - while let Some(hash) = stack.pop() { - if !seen_hashes.contains(&hash) && self.get_change_by_hash(&hash).is_none() { - seen_hashes.insert(hash); - added_change_hashes.push(hash); - if let Some(change) = other.get_change_by_hash(&hash) { - stack.extend(change.deps()); - } - } - } - // Return those changes in the reverse of the order in which the depth-first search - // found them. This is not necessarily a topological sort, but should usually be close. - added_change_hashes.reverse(); - added_change_hashes - .into_iter() - .filter_map(|h| other.get_change_by_hash(&h)) - .collect() - } - - /// Get the heads of this document. - pub fn get_heads(&self) -> Vec { - let mut deps: Vec<_> = self.deps.iter().copied().collect(); - deps.sort_unstable(); - deps - } - fn get_hash(&self, actor: usize, seq: u64) -> Result { self.states .get(&actor) @@ -1181,6 +857,7 @@ impl Automerge { self.deps.insert(change.hash()); } + #[doc(hidden)] pub fn import(&self, s: &str) -> Result<(ExId, ObjType), AutomergeError> { if s == "_root" { Ok((ExId::Root, ObjType::Map)) @@ -1367,6 +1044,343 @@ impl Automerge { op } + + /// Get the heads of this document. + pub fn get_heads(&self) -> Vec { + let mut deps: Vec<_> = self.deps.iter().copied().collect(); + deps.sort_unstable(); + deps + } + + pub fn get_changes(&self, have_deps: &[ChangeHash]) -> Result, AutomergeError> { + self.get_changes_clock(have_deps) + } + + /// Get changes in `other` that are not in `self + pub fn get_changes_added<'a>(&self, other: &'a Self) -> Vec<&'a Change> { + // Depth-first traversal from the heads through the dependency graph, + // until we reach a change that is already present in other + let mut stack: Vec<_> = other.get_heads(); + tracing::trace!(their_heads=?stack, "finding changes to merge"); + let mut seen_hashes = HashSet::new(); + let mut added_change_hashes = Vec::new(); + while let Some(hash) = stack.pop() { + if !seen_hashes.contains(&hash) && self.get_change_by_hash(&hash).is_none() { + seen_hashes.insert(hash); + added_change_hashes.push(hash); + if let Some(change) = other.get_change_by_hash(&hash) { + stack.extend(change.deps()); + } + } + } + // Return those changes in the reverse of the order in which the depth-first search + // found them. This is not necessarily a topological sort, but should usually be close. + added_change_hashes.reverse(); + added_change_hashes + .into_iter() + .filter_map(|h| other.get_change_by_hash(&h)) + .collect() + } +} + +impl ReadDoc for Automerge { + fn parents>(&self, obj: O) -> Result, AutomergeError> { + let (obj_id, _) = self.exid_to_obj(obj.as_ref())?; + Ok(self.ops.parents(obj_id)) + } + + fn path_to_object>(&self, obj: O) -> Result, AutomergeError> { + Ok(self.parents(obj.as_ref().clone())?.path()) + } + + fn keys>(&self, obj: O) -> Keys<'_, '_> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + let iter_keys = self.ops.keys(obj); + Keys::new(self, iter_keys) + } else { + Keys::new(self, None) + } + } + + fn keys_at>(&self, obj: O, heads: &[ChangeHash]) -> KeysAt<'_, '_> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + if let Ok(clock) = self.clock_at(heads) { + return KeysAt::new(self, self.ops.keys_at(obj, clock)); + } + } + KeysAt::new(self, None) + } + + fn map_range, R: RangeBounds>( + &self, + obj: O, + range: R, + ) -> MapRange<'_, R> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + MapRange::new(self, self.ops.map_range(obj, range)) + } else { + MapRange::new(self, None) + } + } + + fn map_range_at, R: RangeBounds>( + &self, + obj: O, + range: R, + heads: &[ChangeHash], + ) -> MapRangeAt<'_, R> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + if let Ok(clock) = self.clock_at(heads) { + let iter_range = self.ops.map_range_at(obj, range, clock); + return MapRangeAt::new(self, iter_range); + } + } + MapRangeAt::new(self, None) + } + + fn list_range, R: RangeBounds>( + &self, + obj: O, + range: R, + ) -> ListRange<'_, R> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + ListRange::new(self, self.ops.list_range(obj, range)) + } else { + ListRange::new(self, None) + } + } + + fn list_range_at, R: RangeBounds>( + &self, + obj: O, + range: R, + heads: &[ChangeHash], + ) -> ListRangeAt<'_, R> { + if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { + if let Ok(clock) = self.clock_at(heads) { + let iter_range = self.ops.list_range_at(obj, range, clock); + return ListRangeAt::new(self, iter_range); + } + } + ListRangeAt::new(self, None) + } + + fn values>(&self, obj: O) -> Values<'_> { + if let Ok((obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { + if obj_type.is_sequence() { + Values::new(self, self.ops.list_range(obj, ..)) + } else { + Values::new(self, self.ops.map_range(obj, ..)) + } + } else { + Values::empty(self) + } + } + + fn values_at>(&self, obj: O, heads: &[ChangeHash]) -> Values<'_> { + if let Ok((obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { + if let Ok(clock) = self.clock_at(heads) { + return match obj_type { + ObjType::Map | ObjType::Table => { + let iter_range = self.ops.map_range_at(obj, .., clock); + Values::new(self, iter_range) + } + ObjType::List | ObjType::Text => { + let iter_range = self.ops.list_range_at(obj, .., clock); + Values::new(self, iter_range) + } + }; + } + } + Values::empty(self) + } + + fn length>(&self, obj: O) -> usize { + if let Ok((inner_obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { + if obj_type == ObjType::Map || obj_type == ObjType::Table { + self.keys(obj).count() + } else { + let encoding = ListEncoding::new(obj_type, self.text_encoding); + self.ops.search(&inner_obj, query::Len::new(encoding)).len + } + } else { + 0 + } + } + + fn length_at>(&self, obj: O, heads: &[ChangeHash]) -> usize { + if let Ok((inner_obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { + if let Ok(clock) = self.clock_at(heads) { + return if obj_type == ObjType::Map || obj_type == ObjType::Table { + self.keys_at(obj, heads).count() + } else { + let encoding = ListEncoding::new(obj_type, self.text_encoding); + self.ops + .search(&inner_obj, query::LenAt::new(clock, encoding)) + .len + }; + } + } + 0 + } + + fn object_type>(&self, obj: O) -> Result { + let (_, obj_type) = self.exid_to_obj(obj.as_ref())?; + Ok(obj_type) + } + + fn text>(&self, obj: O) -> Result { + let obj = self.exid_to_obj(obj.as_ref())?.0; + let query = self.ops.search(&obj, query::ListVals::new()); + let mut buffer = String::new(); + for q in &query.ops { + buffer.push_str(q.to_str()); + } + Ok(buffer) + } + + fn text_at>( + &self, + obj: O, + heads: &[ChangeHash], + ) -> Result { + let obj = self.exid_to_obj(obj.as_ref())?.0; + let clock = self.clock_at(heads)?; + let query = self.ops.search(&obj, query::ListValsAt::new(clock)); + let mut buffer = String::new(); + for q in &query.ops { + if let OpType::Put(ScalarValue::Str(s)) = &q.action { + buffer.push_str(s); + } else { + buffer.push('\u{fffc}'); + } + } + Ok(buffer) + } + + fn get, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError> { + Ok(self.get_all(obj, prop.into())?.last().cloned()) + } + + fn get_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError> { + Ok(self.get_all_at(obj, prop, heads)?.last().cloned()) + } + + fn get_all, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError> { + let obj = self.exid_to_obj(obj.as_ref())?.0; + let mut result = match prop.into() { + Prop::Map(p) => { + let prop = self.ops.m.props.lookup(&p); + if let Some(p) = prop { + self.ops + .search(&obj, query::Prop::new(p)) + .ops + .into_iter() + .map(|o| (o.value(), self.id_to_exid(o.id))) + .collect() + } else { + vec![] + } + } + Prop::Seq(n) => { + let obj_type = self.ops.object_type(&obj); + let encoding = obj_type + .map(|o| ListEncoding::new(o, self.text_encoding)) + .unwrap_or_default(); + self.ops + .search(&obj, query::Nth::new(n, encoding)) + .ops + .into_iter() + .map(|o| (o.value(), self.id_to_exid(o.id))) + .collect() + } + }; + result.sort_by(|a, b| b.1.cmp(&a.1)); + Ok(result) + } + + fn get_all_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError> { + let prop = prop.into(); + let obj = self.exid_to_obj(obj.as_ref())?.0; + let clock = self.clock_at(heads)?; + let result = match prop { + Prop::Map(p) => { + let prop = self.ops.m.props.lookup(&p); + if let Some(p) = prop { + self.ops + .search(&obj, query::PropAt::new(p, clock)) + .ops + .into_iter() + .map(|o| (o.clone_value(), self.id_to_exid(o.id))) + .collect() + } else { + vec![] + } + } + Prop::Seq(n) => { + let obj_type = self.ops.object_type(&obj); + let encoding = obj_type + .map(|o| ListEncoding::new(o, self.text_encoding)) + .unwrap_or_default(); + self.ops + .search(&obj, query::NthAt::new(n, clock, encoding)) + .ops + .into_iter() + .map(|o| (o.clone_value(), self.id_to_exid(o.id))) + .collect() + } + }; + Ok(result) + } + + fn get_missing_deps(&self, heads: &[ChangeHash]) -> Vec { + let in_queue: HashSet<_> = self.queue.iter().map(|change| change.hash()).collect(); + let mut missing = HashSet::new(); + + for head in self.queue.iter().flat_map(|change| change.deps()) { + if !self.history_index.contains_key(head) { + missing.insert(head); + } + } + + for head in heads { + if !self.history_index.contains_key(head) { + missing.insert(head); + } + } + + let mut missing = missing + .into_iter() + .filter(|hash| !in_queue.contains(hash)) + .copied() + .collect::>(); + missing.sort(); + missing + } + + fn get_change_by_hash(&self, hash: &ChangeHash) -> Option<&Change> { + self.history_index + .get(hash) + .and_then(|index| self.history.get(*index)) + } } impl Default for Automerge { diff --git a/rust/automerge/src/automerge/tests.rs b/rust/automerge/src/automerge/tests.rs index 7eadaedd..8d533fed 100644 --- a/rust/automerge/src/automerge/tests.rs +++ b/rust/automerge/src/automerge/tests.rs @@ -1539,7 +1539,7 @@ fn observe_counter_change_application() { #[test] fn get_changes_heads_empty() { - let mut doc = AutoCommit::unobserved(); + let mut doc = AutoCommit::new(); doc.put(ROOT, "key1", 1).unwrap(); doc.commit(); doc.put(ROOT, "key2", 1).unwrap(); diff --git a/rust/automerge/src/autoserde.rs b/rust/automerge/src/autoserde.rs index 63b0848a..ccfc6ae6 100644 --- a/rust/automerge/src/autoserde.rs +++ b/rust/automerge/src/autoserde.rs @@ -1,18 +1,33 @@ use serde::ser::{SerializeMap, SerializeSeq}; -use crate::{Automerge, ObjId, ObjType, Value}; +use crate::{ObjId, ObjType, ReadDoc, Value}; -/// A wrapper type which implements [`serde::Serialize`] for an [`Automerge`]. +/// A wrapper type which implements [`serde::Serialize`] for a [`ReadDoc`]. +/// +/// # Example +/// +/// ``` +/// # fn main() -> Result<(), Box> { +/// use automerge::{AutoCommit, AutomergeError, Value, transaction::Transactable}; +/// let mut doc = AutoCommit::new(); +/// doc.put(automerge::ROOT, "key", "value")?; +/// +/// let serialized = serde_json::to_string(&automerge::AutoSerde::from(&doc)).unwrap(); +/// +/// assert_eq!(serialized, r#"{"key":"value"}"#); +/// # Ok(()) +/// # } +/// ``` #[derive(Debug)] -pub struct AutoSerde<'a>(&'a Automerge); +pub struct AutoSerde<'a, R: crate::ReadDoc>(&'a R); -impl<'a> From<&'a Automerge> for AutoSerde<'a> { - fn from(a: &'a Automerge) -> Self { +impl<'a, R: ReadDoc> From<&'a R> for AutoSerde<'a, R> { + fn from(a: &'a R) -> Self { AutoSerde(a) } } -impl<'a> serde::Serialize for AutoSerde<'a> { +impl<'a, R: crate::ReadDoc> serde::Serialize for AutoSerde<'a, R> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -25,12 +40,12 @@ impl<'a> serde::Serialize for AutoSerde<'a> { } } -struct AutoSerdeMap<'a> { - doc: &'a Automerge, +struct AutoSerdeMap<'a, R> { + doc: &'a R, obj: ObjId, } -impl<'a> serde::Serialize for AutoSerdeMap<'a> { +impl<'a, R: crate::ReadDoc> serde::Serialize for AutoSerdeMap<'a, R> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -51,12 +66,12 @@ impl<'a> serde::Serialize for AutoSerdeMap<'a> { } } -struct AutoSerdeSeq<'a> { - doc: &'a Automerge, +struct AutoSerdeSeq<'a, R> { + doc: &'a R, obj: ObjId, } -impl<'a> serde::Serialize for AutoSerdeSeq<'a> { +impl<'a, R: crate::ReadDoc> serde::Serialize for AutoSerdeSeq<'a, R> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -77,13 +92,13 @@ impl<'a> serde::Serialize for AutoSerdeSeq<'a> { } } -struct AutoSerdeVal<'a> { - doc: &'a Automerge, +struct AutoSerdeVal<'a, R> { + doc: &'a R, val: Value<'a>, obj: ObjId, } -impl<'a> serde::Serialize for AutoSerdeVal<'a> { +impl<'a, R: crate::ReadDoc> serde::Serialize for AutoSerdeVal<'a, R> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, diff --git a/rust/automerge/src/exid.rs b/rust/automerge/src/exid.rs index 3ff8fbb5..3a5a2ca2 100644 --- a/rust/automerge/src/exid.rs +++ b/rust/automerge/src/exid.rs @@ -6,6 +6,10 @@ use std::cmp::{Ord, Ordering}; use std::fmt; use std::hash::{Hash, Hasher}; +/// An identifier for an object in a document +/// +/// This can be persisted using `to_bytes` and `TryFrom<&[u8]>` breaking changes to the +/// serialization format will be considered breaking changes for this library version. #[derive(Debug, Clone)] pub enum ExId { Root, @@ -17,7 +21,10 @@ const TYPE_ROOT: u8 = 0; const TYPE_ID: u8 = 1; impl ExId { - /// Serialize the ExId to a byte array. + /// Serialize this object ID to a byte array. + /// + /// This serialization format is versioned and incompatible changes to it will be considered a + /// breaking change for the version of this library. pub fn to_bytes(&self) -> Vec { // The serialized format is // diff --git a/rust/automerge/src/keys.rs b/rust/automerge/src/keys.rs index f8e0c676..838015ef 100644 --- a/rust/automerge/src/keys.rs +++ b/rust/automerge/src/keys.rs @@ -1,5 +1,9 @@ use crate::{query, Automerge}; +/// An iterator over the keys of an object +/// +/// This is returned by [`crate::ReadDoc::keys`] and method. The returned item is either +/// the keys of a map, or the encoded element IDs of a sequence. #[derive(Debug)] pub struct Keys<'a, 'k> { keys: Option>, diff --git a/rust/automerge/src/keys_at.rs b/rust/automerge/src/keys_at.rs index c957e175..fd747bbc 100644 --- a/rust/automerge/src/keys_at.rs +++ b/rust/automerge/src/keys_at.rs @@ -1,5 +1,9 @@ use crate::{query, Automerge}; +/// An iterator over the keys of an object at a particular point in history +/// +/// This is returned by [`crate::ReadDoc::keys_at`] method. The returned item is either the keys of a map, +/// or the encoded element IDs of a sequence. #[derive(Debug)] pub struct KeysAt<'a, 'k> { keys: Option>, diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index 58f5b263..bafd8983 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -1,3 +1,190 @@ +//! # Automerge +//! +//! Automerge is a library of data structures for building collaborative, +//! [local-first](https://www.inkandswitch.com/local-first/) applications. The +//! idea of automerge is to provide a data structure which is quite general, +//! \- consisting of nested key/value maps and/or lists - which can be modified +//! entirely locally but which can at any time be merged with other instances of +//! the same data structure. +//! +//! In addition to the core data structure (which we generally refer to as a +//! "document"), we also provide an implementation of a sync protocol (in +//! [`crate::sync`]) which can be used over any reliable in-order transport; and +//! an efficient binary storage format. +//! +//! This crate is organised around two representations of a document - +//! [`Automerge`] and [`AutoCommit`]. The difference between the two is that +//! [`AutoCommit`] manages transactions for you. Both of these representations +//! implement [`ReadDoc`] for reading values from a document and +//! [`sync::SyncDoc`] for taking part in the sync protocol. [`AutoCommit`] +//! directly implements [`transaction::Transactable`] for making changes to a +//! document, whilst [`Automerge`] requires you to explicitly create a +//! [`transaction::Transaction`]. +//! +//! NOTE: The API this library provides for modifying data is quite low level +//! (somewhat analogous to directly creating JSON values rather than using +//! `serde` derive macros or equivalent). If you're writing a Rust application which uses automerge +//! you may want to look at [autosurgeon](https://github.com/automerge/autosurgeon). +//! +//! ## Data Model +//! +//! An automerge document is a map from strings to values +//! ([`Value`]) where values can be either +//! +//! * A nested composite value which is either +//! * A map from strings to values ([`ObjType::Map`]) +//! * A list of values ([`ObjType::List`]) +//! * A text object (a sequence of unicode characters) ([`ObjType::Text`]) +//! * A primitive value ([`ScalarValue`]) which is one of +//! * A string +//! * A 64 bit floating point number +//! * A signed 64 bit integer +//! * An unsigned 64 bit integer +//! * A boolean +//! * A counter object (a 64 bit integer which merges by addition) +//! ([`ScalarValue::Counter`]) +//! * A timestamp (a 64 bit integer which is milliseconds since the unix epoch) +//! +//! All composite values have an ID ([`ObjId`]) which is created when the value +//! is inserted into the document or is the root object ID [`ROOT`]. Values in +//! the document are then referred to by the pair (`object ID`, `key`). The +//! `key` is represented by the [`Prop`] type and is either a string for a maps, +//! or an index for sequences. +//! +//! ### Conflicts +//! +//! There are some things automerge cannot merge sensibly. For example, two +//! actors concurrently setting the key "name" to different values. In this case +//! automerge will pick a winning value in a random but deterministic way, but +//! the conflicting value is still available via the [`ReadDoc::get_all`] method. +//! +//! ### Change hashes and historical values +//! +//! Like git, points in the history of a document are identified by hash. Unlike +//! git there can be multiple hashes representing a particular point (because +//! automerge supports concurrent changes). These hashes can be obtained using +//! either [`Automerge::get_heads`] or [`AutoCommit::get_heads`] (note these +//! methods are not part of [`ReadDoc`] because in the case of [`AutoCommit`] it +//! requires a mutable reference to the document). +//! +//! These hashes can be used to read values from the document at a particular +//! point in history using the various `*_at` methods on [`ReadDoc`] which take a +//! slice of [`ChangeHash`] as an argument. +//! +//! ### Actor IDs +//! +//! Any change to an automerge document is made by an actor, represented by an +//! [`ActorId`]. An actor ID is any random sequence of bytes but each change by +//! the same actor ID must be sequential. This often means you will want to +//! maintain at least one actor ID per device. It is fine to generate a new +//! actor ID for each change, but be aware that each actor ID takes up space in +//! a document so if you expect a document to be long lived and/or to have many +//! changes then you should try to reuse actor IDs where possible. +//! +//! ### Text Encoding +//! +//! Both [`Automerge`] and [`AutoCommit`] provide a `with_encoding` method which +//! allows you to specify the [`crate::TextEncoding`] which is used for +//! interpreting the indexes passed to methods like [`ReadDoc::list_range`] or +//! [`transaction::Transactable::splice`]. The default encoding is UTF-8, but +//! you can switch to UTF-16. +//! +//! ## Sync Protocol +//! +//! See the [`sync`] module. +//! +//! ## Serde serialization +//! +//! Sometimes you just want to get the JSON value of an automerge document. For +//! this you can use [`AutoSerde`], which implements `serde::Serialize` for an +//! automerge document. +//! +//! ## Example +//! +//! Let's create a document representing an address book. +//! +//! ``` +//! use automerge::{ObjType, AutoCommit, transaction::Transactable, ReadDoc}; +//! +//! # fn main() -> Result<(), Box> { +//! let mut doc = AutoCommit::new(); +//! +//! // `put_object` creates a nested object in the root key/value map and +//! // returns the ID of the new object, in this case a list. +//! let contacts = doc.put_object(automerge::ROOT, "contacts", ObjType::List)?; +//! +//! // Now we can insert objects into the list +//! let alice = doc.insert_object(&contacts, 0, ObjType::Map)?; +//! +//! // Finally we can set keys in the "alice" map +//! doc.put(&alice, "name", "Alice")?; +//! doc.put(&alice, "email", "alice@example.com")?; +//! +//! // Create another contact +//! let bob = doc.insert_object(&contacts, 1, ObjType::Map)?; +//! doc.put(&bob, "name", "Bob")?; +//! doc.put(&bob, "email", "bob@example.com")?; +//! +//! // Now we save the address book, we can put this in a file +//! let data: Vec = doc.save(); +//! # Ok(()) +//! # } +//! ``` +//! +//! Now modify this document on two separate devices and merge the modifications. +//! +//! ``` +//! use std::borrow::Cow; +//! use automerge::{ObjType, AutoCommit, transaction::Transactable, ReadDoc}; +//! +//! # fn main() -> Result<(), Box> { +//! # let mut doc = AutoCommit::new(); +//! # let contacts = doc.put_object(automerge::ROOT, "contacts", ObjType::List)?; +//! # let alice = doc.insert_object(&contacts, 0, ObjType::Map)?; +//! # doc.put(&alice, "name", "Alice")?; +//! # doc.put(&alice, "email", "alice@example.com")?; +//! # let bob = doc.insert_object(&contacts, 1, ObjType::Map)?; +//! # doc.put(&bob, "name", "Bob")?; +//! # doc.put(&bob, "email", "bob@example.com")?; +//! # let saved: Vec = doc.save(); +//! +//! // Load the document on the first device and change alices email +//! let mut doc1 = AutoCommit::load(&saved)?; +//! let contacts = match doc1.get(automerge::ROOT, "contacts")? { +//! Some((automerge::Value::Object(ObjType::List), contacts)) => contacts, +//! _ => panic!("contacts should be a list"), +//! }; +//! let alice = match doc1.get(&contacts, 0)? { +//! Some((automerge::Value::Object(ObjType::Map), alice)) => alice, +//! _ => panic!("alice should be a map"), +//! }; +//! doc1.put(&alice, "email", "alicesnewemail@example.com")?; +//! +//! +//! // Load the document on the second device and change bobs name +//! let mut doc2 = AutoCommit::load(&saved)?; +//! let contacts = match doc2.get(automerge::ROOT, "contacts")? { +//! Some((automerge::Value::Object(ObjType::List), contacts)) => contacts, +//! _ => panic!("contacts should be a list"), +//! }; +//! let bob = match doc2.get(&contacts, 1)? { +//! Some((automerge::Value::Object(ObjType::Map), bob)) => bob, +//! _ => panic!("bob should be a map"), +//! }; +//! doc2.put(&bob, "name", "Robert")?; +//! +//! // Finally, we can merge the changes from the two devices +//! doc1.merge(&mut doc2)?; +//! let bobsname: Option = doc1.get(&bob, "name")?.map(|(v, _)| v); +//! assert_eq!(bobsname, Some(automerge::Value::Scalar(Cow::Owned("Robert".into())))); +//! +//! let alices_email: Option = doc1.get(&alice, "email")?.map(|(v, _)| v); +//! assert_eq!(alices_email, Some(automerge::Value::Scalar(Cow::Owned("alicesnewemail@example.com".into())))); +//! # Ok(()) +//! # } +//! ``` +//! + #![doc( html_logo_url = "https://raw.githubusercontent.com/automerge/automerge-rs/main/img/brandmark.svg", html_favicon_url = "https:///raw.githubusercontent.com/automerge/automerge-rs/main/img/favicon.ico" @@ -71,11 +258,12 @@ mod list_range; mod list_range_at; mod map_range; mod map_range_at; -mod op_observer; +pub mod op_observer; mod op_set; mod op_tree; mod parents; mod query; +mod read; mod sequence_tree; mod storage; pub mod sync; @@ -105,9 +293,12 @@ pub use op_observer::OpObserver; pub use op_observer::Patch; pub use op_observer::VecOpObserver; pub use parents::{Parent, Parents}; +pub use read::ReadDoc; +#[doc(hidden)] pub use sequence_tree::SequenceTree; pub use types::{ActorId, ChangeHash, ObjType, OpType, ParseChangeHashError, Prop, TextEncoding}; pub use value::{ScalarValue, Value}; pub use values::Values; +/// The object ID for the root map of a document pub const ROOT: ObjId = ObjId::Root; diff --git a/rust/automerge/src/list_range.rs b/rust/automerge/src/list_range.rs index ae7b2aa5..a043da72 100644 --- a/rust/automerge/src/list_range.rs +++ b/rust/automerge/src/list_range.rs @@ -3,6 +3,9 @@ use crate::{exid::ExId, Value}; use crate::{query, Automerge}; use std::ops::RangeBounds; +/// An iterator over the elements of a list object +/// +/// This is returned by the [`crate::ReadDoc::list_range`] method #[derive(Debug)] pub struct ListRange<'a, R: RangeBounds> { range: Option>, diff --git a/rust/automerge/src/list_range_at.rs b/rust/automerge/src/list_range_at.rs index 37db9677..ce8f5a46 100644 --- a/rust/automerge/src/list_range_at.rs +++ b/rust/automerge/src/list_range_at.rs @@ -3,6 +3,9 @@ use std::ops::RangeBounds; use crate::{query, Automerge}; +/// An iterator over the elements of a list object at a particular set of heads +/// +/// This is returned by the [`crate::ReadDoc::list_range_at`] method #[derive(Debug)] pub struct ListRangeAt<'a, R: RangeBounds> { range: Option>, diff --git a/rust/automerge/src/map_range.rs b/rust/automerge/src/map_range.rs index 8029b84d..ad33ebf5 100644 --- a/rust/automerge/src/map_range.rs +++ b/rust/automerge/src/map_range.rs @@ -3,6 +3,9 @@ use std::ops::RangeBounds; use crate::{query, Automerge}; +/// An iterator over the keys and values of a map object +/// +/// This is returned by the [`crate::ReadDoc::map_range`] method #[derive(Debug)] pub struct MapRange<'a, R: RangeBounds> { range: Option>, diff --git a/rust/automerge/src/map_range_at.rs b/rust/automerge/src/map_range_at.rs index b2eb3fb2..8d008e89 100644 --- a/rust/automerge/src/map_range_at.rs +++ b/rust/automerge/src/map_range_at.rs @@ -3,6 +3,9 @@ use std::ops::RangeBounds; use crate::{query, Automerge}; +/// An iterator over the keys and values of a map object as at a particuar heads +/// +/// This is returned by the [`crate::ReadDoc::map_range_at`] method #[derive(Debug)] pub struct MapRangeAt<'a, R: RangeBounds> { range: Option>, diff --git a/rust/automerge/src/op_observer.rs b/rust/automerge/src/op_observer.rs index 0d082219..5b33c21f 100644 --- a/rust/automerge/src/op_observer.rs +++ b/rust/automerge/src/op_observer.rs @@ -1,8 +1,11 @@ use crate::exid::ExId; -use crate::Automerge; use crate::Prop; +use crate::ReadDoc; use crate::Value; +mod compose; +pub use compose::compose; + /// An observer of operations applied to the document. pub trait OpObserver { /// A new value has been inserted into the given object. @@ -12,15 +15,16 @@ pub trait OpObserver { /// - `index`: the index the new value has been inserted at. /// - `tagged_value`: the value that has been inserted and the id of the operation that did the /// insert. - fn insert( + fn insert( &mut self, - doc: &Automerge, + doc: &R, objid: ExId, index: usize, tagged_value: (Value<'_>, ExId), ); - fn splice_text(&mut self, _doc: &Automerge, _objid: ExId, _index: usize, _value: &str); + /// Some text has been spliced into a text object + fn splice_text(&mut self, _doc: &R, _objid: ExId, _index: usize, _value: &str); /// A new value has been put into the given object. /// @@ -30,9 +34,9 @@ pub trait OpObserver { /// - `tagged_value`: the value that has been put into the object and the id of the operation /// that did the put. /// - `conflict`: whether this put conflicts with other operations. - fn put( + fn put( &mut self, - doc: &Automerge, + doc: &R, objid: ExId, prop: Prop, tagged_value: (Value<'_>, ExId), @@ -49,9 +53,9 @@ pub trait OpObserver { /// - `tagged_value`: the value that has been put into the object and the id of the operation /// that did the put. /// - `conflict`: whether this put conflicts with other operations. - fn expose( + fn expose( &mut self, - doc: &Automerge, + doc: &R, objid: ExId, prop: Prop, tagged_value: (Value<'_>, ExId), @@ -63,7 +67,7 @@ pub trait OpObserver { /// - `doc`: a handle to the doc after the op has been inserted, can be used to query information /// - `objid`: the object that has been put into. /// - `prop`: the prop that the value as been put at. - fn flag_conflict(&mut self, _doc: &Automerge, _objid: ExId, _prop: Prop) {} + fn flag_conflict(&mut self, _doc: &R, _objid: ExId, _prop: Prop) {} /// A counter has been incremented. /// @@ -72,14 +76,20 @@ pub trait OpObserver { /// - `prop`: they prop that the chounter is at. /// - `tagged_value`: the amount the counter has been incremented by, and the the id of the /// increment operation. - fn increment(&mut self, doc: &Automerge, objid: ExId, prop: Prop, tagged_value: (i64, ExId)); + fn increment( + &mut self, + doc: &R, + objid: ExId, + prop: Prop, + tagged_value: (i64, ExId), + ); /// A map value has beeen deleted. /// /// - `doc`: a handle to the doc after the op has been inserted, can be used to query information /// - `objid`: the object that has been deleted in. /// - `prop`: the prop to be deleted - fn delete(&mut self, doc: &Automerge, objid: ExId, prop: Prop) { + fn delete(&mut self, doc: &R, objid: ExId, prop: Prop) { match prop { Prop::Map(k) => self.delete_map(doc, objid, &k), Prop::Seq(i) => self.delete_seq(doc, objid, i, 1), @@ -91,7 +101,7 @@ pub trait OpObserver { /// - `doc`: a handle to the doc after the op has been inserted, can be used to query information /// - `objid`: the object that has been deleted in. /// - `key`: the map key to be deleted - fn delete_map(&mut self, doc: &Automerge, objid: ExId, key: &str); + fn delete_map(&mut self, doc: &R, objid: ExId, key: &str); /// A one or more list values have beeen deleted. /// @@ -99,21 +109,7 @@ pub trait OpObserver { /// - `objid`: the object that has been deleted in. /// - `index`: the index of the deletion /// - `num`: the number of sequential elements deleted - fn delete_seq(&mut self, doc: &Automerge, objid: ExId, index: usize, num: usize); - - /// Branch of a new op_observer later to be merged - /// - /// Called by AutoCommit when creating a new transaction. Observer branch - /// will be merged on `commit()` or thrown away on `rollback()` - /// - fn branch(&self) -> Self; - - /// Merge observed information from a transaction. - /// - /// Called by AutoCommit on `commit()` - /// - /// - `other`: Another Op Observer of the same type - fn merge(&mut self, other: &Self); + fn delete_seq(&mut self, doc: &R, objid: ExId, index: usize, num: usize); /// Whether to call sequence methods or `splice_text` when encountering changes in text /// @@ -123,21 +119,41 @@ pub trait OpObserver { } } +/// An observer which can be branched +/// +/// This is used when observing operations in a transaction. In this case `branch` will be called +/// at the beginning of the transaction to return a new observer and then `merge` will be called +/// with the branched observer as `other` when the transaction is comitted. +pub trait BranchableObserver { + /// Branch of a new op_observer later to be merged + /// + /// Called when creating a new transaction. Observer branch will be merged on `commit()` or + /// thrown away on `rollback()` + fn branch(&self) -> Self; + + /// Merge observed information from a transaction. + /// + /// Called by AutoCommit on `commit()` + /// + /// - `other`: Another Op Observer of the same type + fn merge(&mut self, other: &Self); +} + impl OpObserver for () { - fn insert( + fn insert( &mut self, - _doc: &Automerge, + _doc: &R, _objid: ExId, _index: usize, _tagged_value: (Value<'_>, ExId), ) { } - fn splice_text(&mut self, _doc: &Automerge, _objid: ExId, _index: usize, _value: &str) {} + fn splice_text(&mut self, _doc: &R, _objid: ExId, _index: usize, _value: &str) {} - fn put( + fn put( &mut self, - _doc: &Automerge, + _doc: &R, _objid: ExId, _prop: Prop, _tagged_value: (Value<'_>, ExId), @@ -145,9 +161,9 @@ impl OpObserver for () { ) { } - fn expose( + fn expose( &mut self, - _doc: &Automerge, + _doc: &R, _objid: ExId, _prop: Prop, _tagged_value: (Value<'_>, ExId), @@ -155,21 +171,22 @@ impl OpObserver for () { ) { } - fn increment( + fn increment( &mut self, - _doc: &Automerge, + _doc: &R, _objid: ExId, _prop: Prop, _tagged_value: (i64, ExId), ) { } - fn delete_map(&mut self, _doc: &Automerge, _objid: ExId, _key: &str) {} + fn delete_map(&mut self, _doc: &R, _objid: ExId, _key: &str) {} - fn delete_seq(&mut self, _doc: &Automerge, _objid: ExId, _index: usize, _num: usize) {} + fn delete_seq(&mut self, _doc: &R, _objid: ExId, _index: usize, _num: usize) {} +} +impl BranchableObserver for () { fn merge(&mut self, _other: &Self) {} - fn branch(&self) -> Self {} } @@ -188,8 +205,14 @@ impl VecOpObserver { } impl OpObserver for VecOpObserver { - fn insert(&mut self, doc: &Automerge, obj: ExId, index: usize, (value, id): (Value<'_>, ExId)) { - if let Ok(mut p) = doc.parents(&obj) { + fn insert( + &mut self, + doc: &R, + obj: ExId, + index: usize, + (value, id): (Value<'_>, ExId), + ) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Insert { obj, path: p.path(), @@ -199,8 +222,8 @@ impl OpObserver for VecOpObserver { } } - fn splice_text(&mut self, doc: &Automerge, obj: ExId, index: usize, value: &str) { - if let Ok(mut p) = doc.parents(&obj) { + fn splice_text(&mut self, doc: &R, obj: ExId, index: usize, value: &str) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Splice { obj, path: p.path(), @@ -210,15 +233,15 @@ impl OpObserver for VecOpObserver { } } - fn put( + fn put( &mut self, - doc: &Automerge, + doc: &R, obj: ExId, prop: Prop, (value, id): (Value<'_>, ExId), conflict: bool, ) { - if let Ok(mut p) = doc.parents(&obj) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Put { obj, path: p.path(), @@ -229,15 +252,15 @@ impl OpObserver for VecOpObserver { } } - fn expose( + fn expose( &mut self, - doc: &Automerge, + doc: &R, obj: ExId, prop: Prop, (value, id): (Value<'_>, ExId), conflict: bool, ) { - if let Ok(mut p) = doc.parents(&obj) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Expose { obj, path: p.path(), @@ -248,8 +271,8 @@ impl OpObserver for VecOpObserver { } } - fn increment(&mut self, doc: &Automerge, obj: ExId, prop: Prop, tagged_value: (i64, ExId)) { - if let Ok(mut p) = doc.parents(&obj) { + fn increment(&mut self, doc: &R, obj: ExId, prop: Prop, tagged_value: (i64, ExId)) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Increment { obj, path: p.path(), @@ -259,8 +282,8 @@ impl OpObserver for VecOpObserver { } } - fn delete_map(&mut self, doc: &Automerge, obj: ExId, key: &str) { - if let Ok(mut p) = doc.parents(&obj) { + fn delete_map(&mut self, doc: &R, obj: ExId, key: &str) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Delete { obj, path: p.path(), @@ -270,8 +293,8 @@ impl OpObserver for VecOpObserver { } } - fn delete_seq(&mut self, doc: &Automerge, obj: ExId, index: usize, num: usize) { - if let Ok(mut p) = doc.parents(&obj) { + fn delete_seq(&mut self, doc: &R, obj: ExId, index: usize, num: usize) { + if let Ok(p) = doc.parents(&obj) { self.patches.push(Patch::Delete { obj, path: p.path(), @@ -280,7 +303,9 @@ impl OpObserver for VecOpObserver { }) } } +} +impl BranchableObserver for VecOpObserver { fn merge(&mut self, other: &Self) { self.patches.extend_from_slice(other.patches.as_slice()) } diff --git a/rust/automerge/src/op_observer/compose.rs b/rust/automerge/src/op_observer/compose.rs new file mode 100644 index 00000000..92fe3b1e --- /dev/null +++ b/rust/automerge/src/op_observer/compose.rs @@ -0,0 +1,102 @@ +use super::OpObserver; + +pub fn compose<'a, O1: OpObserver, O2: OpObserver>( + obs1: &'a mut O1, + obs2: &'a mut O2, +) -> impl OpObserver + 'a { + ComposeObservers { obs1, obs2 } +} + +struct ComposeObservers<'a, O1: OpObserver, O2: OpObserver> { + obs1: &'a mut O1, + obs2: &'a mut O2, +} + +impl<'a, O1: OpObserver, O2: OpObserver> OpObserver for ComposeObservers<'a, O1, O2> { + fn insert( + &mut self, + doc: &R, + objid: crate::ObjId, + index: usize, + tagged_value: (crate::Value<'_>, crate::ObjId), + ) { + self.obs1 + .insert(doc, objid.clone(), index, tagged_value.clone()); + self.obs2.insert(doc, objid, index, tagged_value); + } + + fn splice_text( + &mut self, + doc: &R, + objid: crate::ObjId, + index: usize, + value: &str, + ) { + self.obs1.splice_text(doc, objid.clone(), index, value); + self.obs2.splice_text(doc, objid, index, value); + } + + fn put( + &mut self, + doc: &R, + objid: crate::ObjId, + prop: crate::Prop, + tagged_value: (crate::Value<'_>, crate::ObjId), + conflict: bool, + ) { + self.obs1.put( + doc, + objid.clone(), + prop.clone(), + tagged_value.clone(), + conflict, + ); + self.obs2.put(doc, objid, prop, tagged_value, conflict); + } + + fn expose( + &mut self, + doc: &R, + objid: crate::ObjId, + prop: crate::Prop, + tagged_value: (crate::Value<'_>, crate::ObjId), + conflict: bool, + ) { + self.obs1.expose( + doc, + objid.clone(), + prop.clone(), + tagged_value.clone(), + conflict, + ); + self.obs2.expose(doc, objid, prop, tagged_value, conflict); + } + + fn increment( + &mut self, + doc: &R, + objid: crate::ObjId, + prop: crate::Prop, + tagged_value: (i64, crate::ObjId), + ) { + self.obs1 + .increment(doc, objid.clone(), prop.clone(), tagged_value.clone()); + self.obs2.increment(doc, objid, prop, tagged_value); + } + + fn delete_map(&mut self, doc: &R, objid: crate::ObjId, key: &str) { + self.obs1.delete_map(doc, objid.clone(), key); + self.obs2.delete_map(doc, objid, key); + } + + fn delete_seq( + &mut self, + doc: &R, + objid: crate::ObjId, + index: usize, + num: usize, + ) { + self.obs2.delete_seq(doc, objid.clone(), index, num); + self.obs2.delete_seq(doc, objid, index, num); + } +} diff --git a/rust/automerge/src/parents.rs b/rust/automerge/src/parents.rs index 76c4bba1..e1c5cc66 100644 --- a/rust/automerge/src/parents.rs +++ b/rust/automerge/src/parents.rs @@ -3,6 +3,14 @@ use crate::op_set::OpSet; use crate::types::{ListEncoding, ObjId}; use crate::{exid::ExId, Prop}; +/// An iterator over the "parents" of an object +/// +/// The "parent" of an object in this context is the ([`ExId`], [`Prop`]) pair which specifies the +/// location of this object in the composite object which contains it. Each element in the iterator +/// is a [`Parent`], yielded in reverse order. This means that once the iterator returns `None` you +/// have reached the root of the document. +/// +/// This is returned by [`crate::ReadDoc::parents`] #[derive(Debug)] pub struct Parents<'a> { pub(crate) obj: ObjId, @@ -10,9 +18,10 @@ pub struct Parents<'a> { } impl<'a> Parents<'a> { - // returns the path to the object - // works even if the object or a parent has been deleted - pub fn path(&mut self) -> Vec<(ExId, Prop)> { + /// Return the path this `Parents` represents + /// + /// This is _not_ in reverse order. + pub fn path(self) -> Vec<(ExId, Prop)> { let mut path = self .map(|Parent { obj, prop, .. }| (obj, prop)) .collect::>(); @@ -20,10 +29,8 @@ impl<'a> Parents<'a> { path } - // returns the path to the object - // if the object or one of its parents has been deleted or conflicted out - // returns none - pub fn visible_path(&mut self) -> Option> { + /// Like `path` but returns `None` if the target is not visible + pub fn visible_path(self) -> Option> { let mut path = Vec::new(); for Parent { obj, prop, visible } in self { if !visible { @@ -59,17 +66,25 @@ impl<'a> Iterator for Parents<'a> { } } +/// A component of a path to an object #[derive(Debug, PartialEq, Eq)] pub struct Parent { + /// The object ID this component refers to pub obj: ExId, + /// The property within `obj` this component refers to pub prop: Prop, + /// Whether this component is "visible" + /// + /// An "invisible" component is one where the property is hidden, either because it has been + /// deleted or because there is a conflict on this (object, property) pair and this value does + /// not win the conflict. pub visible: bool, } #[cfg(test)] mod tests { use super::Parent; - use crate::{transaction::Transactable, Prop}; + use crate::{transaction::Transactable, Prop, ReadDoc}; #[test] fn test_invisible_parents() { diff --git a/rust/automerge/src/read.rs b/rust/automerge/src/read.rs new file mode 100644 index 00000000..6d479718 --- /dev/null +++ b/rust/automerge/src/read.rs @@ -0,0 +1,199 @@ +use crate::{ + error::AutomergeError, exid::ExId, keys::Keys, keys_at::KeysAt, list_range::ListRange, + list_range_at::ListRangeAt, map_range::MapRange, map_range_at::MapRangeAt, parents::Parents, + values::Values, Change, ChangeHash, ObjType, Prop, Value, +}; + +use std::ops::RangeBounds; + +/// Methods for reading values from an automerge document +/// +/// Many of the methods on this trait have an alternate `*_at` version which +/// takes an additional argument of `&[ChangeHash]`. This allows you to retrieve +/// the value at a particular point in the document history identified by the +/// given change hashes. +pub trait ReadDoc { + /// Get the parents of an object in the document tree. + /// + /// See the documentation for [`Parents`] for more details. + /// + /// ### Errors + /// + /// Returns an error when the id given is not the id of an object in this document. + /// This function does not get the parents of scalar values contained within objects. + /// + /// ### Experimental + /// + /// This function may in future be changed to allow getting the parents from the id of a scalar + /// value. + fn parents>(&self, obj: O) -> Result, AutomergeError>; + + /// Get the path to an object + /// + /// "path" here means the sequence of `(object Id, key)` pairs which leads + /// to the object in question. + /// + /// ### Errors + /// + /// * If the object ID `obj` is not in the document + fn path_to_object>(&self, obj: O) -> Result, AutomergeError>; + + /// Get the keys of the object `obj`. + /// + /// For a map this returns the keys of the map. + /// For a list this returns the element ids (opids) encoded as strings. + fn keys>(&self, obj: O) -> Keys<'_, '_>; + + /// Get the keys of the object `obj` as at `heads` + /// + /// See [`Self::keys`] + fn keys_at>(&self, obj: O, heads: &[ChangeHash]) -> KeysAt<'_, '_>; + + /// Iterate over the keys and values of the map `obj` in the given range. + /// + /// If the object correspoding to `obj` is a list then this will return an empty iterator + /// + /// The returned iterator yields `(key, value, exid)` tuples, where the + /// third element is the ID of the operation which created the value. + fn map_range, R: RangeBounds>( + &self, + obj: O, + range: R, + ) -> MapRange<'_, R>; + + /// Iterate over the keys and values of the map `obj` in the given range as + /// at `heads` + /// + /// If the object correspoding to `obj` is a list then this will return an empty iterator + /// + /// The returned iterator yields `(key, value, exid)` tuples, where the + /// third element is the ID of the operation which created the value. + /// + /// See [`Self::map_range`] + fn map_range_at, R: RangeBounds>( + &self, + obj: O, + range: R, + heads: &[ChangeHash], + ) -> MapRangeAt<'_, R>; + + /// Iterate over the indexes and values of the list or text `obj` in the given range. + /// + /// The reuturned iterator yields `(index, value, exid)` tuples, where the third + /// element is the ID of the operation which created the value. + fn list_range, R: RangeBounds>( + &self, + obj: O, + range: R, + ) -> ListRange<'_, R>; + + /// Iterate over the indexes and values of the list or text `obj` in the given range as at `heads` + /// + /// The returned iterator yields `(index, value, exid)` tuples, where the third + /// element is the ID of the operation which created the value. + /// + /// See [`Self::list_range`] + fn list_range_at, R: RangeBounds>( + &self, + obj: O, + range: R, + heads: &[ChangeHash], + ) -> ListRangeAt<'_, R>; + + /// Iterate over the values in a map, list, or text object + /// + /// The returned iterator yields `(value, exid)` tuples, where the second element + /// is the ID of the operation which created the value. + fn values>(&self, obj: O) -> Values<'_>; + + /// Iterate over the values in a map, list, or text object as at `heads` + /// + /// The returned iterator yields `(value, exid)` tuples, where the second element + /// is the ID of the operation which created the value. + /// + /// See [`Self::values`] + fn values_at>(&self, obj: O, heads: &[ChangeHash]) -> Values<'_>; + + /// Get the length of the given object. + /// + /// If the given object is not in this document this method will return `0` + fn length>(&self, obj: O) -> usize; + + /// Get the length of the given object as at `heads` + /// + /// If the given object is not in this document this method will return `0` + /// + /// See [`Self::length`] + fn length_at>(&self, obj: O, heads: &[ChangeHash]) -> usize; + + /// Get the type of this object, if it is an object. + fn object_type>(&self, obj: O) -> Result; + + /// Get the string represented by the given text object. + fn text>(&self, obj: O) -> Result; + + /// Get the string represented by the given text object as at `heads`, see + /// [`Self::text`] + fn text_at>( + &self, + obj: O, + heads: &[ChangeHash], + ) -> Result; + + /// Get a value out of the document. + /// + /// This returns a tuple of `(value, object ID)`. This is for two reasons: + /// + /// 1. If `value` is an object (represented by `Value::Object`) then the ID + /// is the ID of that object. This can then be used to retrieve nested + /// values from the document. + /// 2. Even if `value` is a scalar, the ID represents the operation which + /// created the value. This is useful if there are conflicting values for + /// this key as each value is tagged with the ID. + /// + /// In the case of a key which has conflicting values, this method will + /// return a single arbitrarily chosen value. This value will be chosen + /// deterministically on all nodes. If you want to get all the values for a + /// key use [`Self::get_all`]. + fn get, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError>; + + /// Get the value of the given key as at `heads`, see `[Self::get]` + fn get_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError>; + + /// Get all conflicting values out of the document at this prop that conflict. + /// + /// If there are multiple conflicting values for a given key this method + /// will return all of them, with each value tagged by the ID of the + /// operation which created it. + fn get_all, P: Into>( + &self, + obj: O, + prop: P, + ) -> Result, ExId)>, AutomergeError>; + + /// Get all possibly conflicting values for a key as at `heads` + /// + /// See `[Self::get_all]` + fn get_all_at, P: Into>( + &self, + obj: O, + prop: P, + heads: &[ChangeHash], + ) -> Result, ExId)>, AutomergeError>; + + /// Get the hashes of the changes in this document that aren't transitive dependencies of the + /// given `heads`. + fn get_missing_deps(&self, heads: &[ChangeHash]) -> Vec; + + /// Get a change by its hash. + fn get_change_by_hash(&self, hash: &ChangeHash) -> Option<&Change>; +} diff --git a/rust/automerge/src/sync.rs b/rust/automerge/src/sync.rs index 1545f954..5d71d989 100644 --- a/rust/automerge/src/sync.rs +++ b/rust/automerge/src/sync.rs @@ -1,10 +1,79 @@ +//! # Sync Protocol +//! +//! The sync protocol is based on this paper: +//! , it assumes a reliable in-order stream +//! between two peers who are synchronizing a document. +//! +//! Each peer maintains a [`State`] for each peer they are synchronizing with. +//! This state tracks things like what the heads of the other peer are and +//! whether there are in-flight messages. Anything which implements [`SyncDoc`] +//! can take part in the sync protocol. The flow goes something like this: +//! +//! * The initiating peer creates an empty [`State`] and then calls +//! [`SyncDoc::generate_sync_message`] to generate new sync message and sends +//! it to the receiving peer. +//! * The receiving peer receives a message from the initiator, creates a new +//! [`State`], and calls [`SyncDoc::receive_sync_message`] on it's view of the +//! document +//! * The receiving peer then calls [`SyncDoc::generate_sync_message`] to generate +//! a new sync message and send it back to the initiator +//! * From this point on each peer operates in a loop, receiving a sync message +//! from the other peer and then generating a new message to send back. +//! +//! ## Example +//! +//! ``` +//! use automerge::{transaction::Transactable, sync::{self, SyncDoc}, ReadDoc}; +//! # fn main() -> Result<(), automerge::AutomergeError> { +//! // Create a document on peer1 +//! let mut peer1 = automerge::AutoCommit::new(); +//! peer1.put(automerge::ROOT, "key", "value")?; +//! +//! // Create a state to track our sync with peer2 +//! let mut peer1_state = sync::State::new(); +//! // Generate the initial message to send to peer2, unwrap for brevity +//! let message1to2 = peer1.sync().generate_sync_message(&mut peer1_state).unwrap(); +//! +//! // We receive the message on peer2. We don't have a document at all yet +//! // so we create one +//! let mut peer2 = automerge::AutoCommit::new(); +//! // We don't have a state for peer1 (it's a new connection), so we create one +//! let mut peer2_state = sync::State::new(); +//! // Now receive the message from peer 1 +//! peer2.sync().receive_sync_message(&mut peer2_state, message1to2)?; +//! +//! // Now we loop, sending messages from one to two and two to one until +//! // neither has anything new to send +//! +//! loop { +//! let two_to_one = peer2.sync().generate_sync_message(&mut peer2_state); +//! if let Some(message) = two_to_one.as_ref() { +//! println!("two to one"); +//! peer1.sync().receive_sync_message(&mut peer1_state, message.clone())?; +//! } +//! let one_to_two = peer1.sync().generate_sync_message(&mut peer1_state); +//! if let Some(message) = one_to_two.as_ref() { +//! println!("one to two"); +//! peer2.sync().receive_sync_message(&mut peer2_state, message.clone())?; +//! } +//! if two_to_one.is_none() && one_to_two.is_none() { +//! break; +//! } +//! } +//! +//! assert_eq!(peer2.get(automerge::ROOT, "key")?.unwrap().0.to_str(), Some("value")); +//! +//! # Ok(()) +//! # } +//! ``` + use itertools::Itertools; use serde::ser::SerializeMap; use std::collections::{HashMap, HashSet}; use crate::{ storage::{parse, Change as StoredChange, ReadChangeOpError}, - Automerge, AutomergeError, Change, ChangeHash, OpObserver, + Automerge, AutomergeError, Change, ChangeHash, OpObserver, ReadDoc, }; mod bloom; @@ -14,10 +83,38 @@ pub use bloom::{BloomFilter, DecodeError as DecodeBloomError}; pub use state::DecodeError as DecodeStateError; pub use state::{Have, State}; +/// A document which can take part in the sync protocol +/// +/// See the [module level documentation](crate::sync) for more details. +pub trait SyncDoc { + /// Generate a sync message for the remote peer represented by `sync_state` + /// + /// If this returns `None` then there are no new messages to send, either because we are + /// waiting for an acknolwedgement of an in-flight message, or because the remote is up to + /// date. + fn generate_sync_message(&self, sync_state: &mut State) -> Option; + + /// Apply a received sync message to this document and `sync_state` + fn receive_sync_message( + &mut self, + sync_state: &mut State, + message: Message, + ) -> Result<(), AutomergeError>; + + /// Apply a received sync message to this document and `sync_state`, observing any changes with + /// `op_observer` + fn receive_sync_message_with( + &mut self, + sync_state: &mut State, + message: Message, + op_observer: &mut Obs, + ) -> Result<(), AutomergeError>; +} + const MESSAGE_TYPE_SYNC: u8 = 0x42; // first byte of a sync message, for identification -impl Automerge { - pub fn generate_sync_message(&self, sync_state: &mut State) -> Option { +impl SyncDoc for Automerge { + fn generate_sync_message(&self, sync_state: &mut State) -> Option { let our_heads = self.get_heads(); let our_need = self.get_missing_deps(sync_state.their_heads.as_ref().unwrap_or(&vec![])); @@ -106,80 +203,25 @@ impl Automerge { Some(sync_message) } - pub fn receive_sync_message( + fn receive_sync_message( &mut self, sync_state: &mut State, message: Message, ) -> Result<(), AutomergeError> { - self.receive_sync_message_with::<()>(sync_state, message, None) + self.do_receive_sync_message::<()>(sync_state, message, None) } - pub fn receive_sync_message_with( + fn receive_sync_message_with( &mut self, sync_state: &mut State, message: Message, - op_observer: Option<&mut Obs>, + op_observer: &mut Obs, ) -> Result<(), AutomergeError> { - let before_heads = self.get_heads(); - - let Message { - heads: message_heads, - changes: message_changes, - need: message_need, - have: message_have, - } = message; - - let changes_is_empty = message_changes.is_empty(); - if !changes_is_empty { - self.apply_changes_with(message_changes, op_observer)?; - sync_state.shared_heads = advance_heads( - &before_heads.iter().collect(), - &self.get_heads().into_iter().collect(), - &sync_state.shared_heads, - ); - } - - // trim down the sent hashes to those that we know they haven't seen - self.filter_changes(&message_heads, &mut sync_state.sent_hashes)?; - - if changes_is_empty && message_heads == before_heads { - sync_state.last_sent_heads = message_heads.clone(); - } - - if sync_state.sent_hashes.is_empty() { - sync_state.in_flight = false; - } - - let known_heads = message_heads - .iter() - .filter(|head| self.get_change_by_hash(head).is_some()) - .collect::>(); - if known_heads.len() == message_heads.len() { - sync_state.shared_heads = message_heads.clone(); - sync_state.in_flight = false; - // If the remote peer has lost all its data, reset our state to perform a full resync - if message_heads.is_empty() { - sync_state.last_sent_heads = Default::default(); - sync_state.sent_hashes = Default::default(); - } - } else { - sync_state.shared_heads = sync_state - .shared_heads - .iter() - .chain(known_heads) - .copied() - .unique() - .sorted() - .collect::>(); - } - - sync_state.their_have = Some(message_have); - sync_state.their_heads = Some(message_heads); - sync_state.their_need = Some(message_need); - - Ok(()) + self.do_receive_sync_message(sync_state, message, Some(op_observer)) } +} +impl Automerge { fn make_bloom_filter(&self, last_sync: Vec) -> Have { let new_changes = self .get_changes(&last_sync) @@ -261,6 +303,72 @@ impl Automerge { Ok(changes_to_send) } } + + fn do_receive_sync_message( + &mut self, + sync_state: &mut State, + message: Message, + op_observer: Option<&mut Obs>, + ) -> Result<(), AutomergeError> { + let before_heads = self.get_heads(); + + let Message { + heads: message_heads, + changes: message_changes, + need: message_need, + have: message_have, + } = message; + + let changes_is_empty = message_changes.is_empty(); + if !changes_is_empty { + self.apply_changes_with(message_changes, op_observer)?; + sync_state.shared_heads = advance_heads( + &before_heads.iter().collect(), + &self.get_heads().into_iter().collect(), + &sync_state.shared_heads, + ); + } + + // trim down the sent hashes to those that we know they haven't seen + self.filter_changes(&message_heads, &mut sync_state.sent_hashes)?; + + if changes_is_empty && message_heads == before_heads { + sync_state.last_sent_heads = message_heads.clone(); + } + + if sync_state.sent_hashes.is_empty() { + sync_state.in_flight = false; + } + + let known_heads = message_heads + .iter() + .filter(|head| self.get_change_by_hash(head).is_some()) + .collect::>(); + if known_heads.len() == message_heads.len() { + sync_state.shared_heads = message_heads.clone(); + sync_state.in_flight = false; + // If the remote peer has lost all its data, reset our state to perform a full resync + if message_heads.is_empty() { + sync_state.last_sent_heads = Default::default(); + sync_state.sent_hashes = Default::default(); + } + } else { + sync_state.shared_heads = sync_state + .shared_heads + .iter() + .chain(known_heads) + .copied() + .unique() + .sorted() + .collect::>(); + } + + sync_state.their_have = Some(message_have); + sync_state.their_heads = Some(message_heads); + sync_state.their_need = Some(message_need); + + Ok(()) + } } #[derive(Debug, thiserror::Error)] @@ -545,8 +653,8 @@ mod tests { doc.put(crate::ROOT, "key", "value").unwrap(); let mut sync_state = State::new(); - assert!(doc.generate_sync_message(&mut sync_state).is_some()); - assert!(doc.generate_sync_message(&mut sync_state).is_none()); + assert!(doc.sync().generate_sync_message(&mut sync_state).is_some()); + assert!(doc.sync().generate_sync_message(&mut sync_state).is_none()); } #[test] @@ -556,11 +664,12 @@ mod tests { let mut s1 = State::new(); let mut s2 = State::new(); let m1 = doc1 + .sync() .generate_sync_message(&mut s1) .expect("message was none"); - doc2.receive_sync_message(&mut s2, m1).unwrap(); - let m2 = doc2.generate_sync_message(&mut s2); + doc2.sync().receive_sync_message(&mut s2, m1).unwrap(); + let m2 = doc2.sync().generate_sync_message(&mut s2); assert!(m2.is_none()); } @@ -584,9 +693,11 @@ mod tests { //// both sides report what they have but have no shared peer state let msg1to2 = doc1 + .sync() .generate_sync_message(&mut s1) .expect("initial sync from 1 to 2 was None"); let msg2to1 = doc2 + .sync() .generate_sync_message(&mut s2) .expect("initial sync message from 2 to 1 was None"); assert_eq!(msg1to2.changes.len(), 0); @@ -595,52 +706,57 @@ mod tests { assert_eq!(msg2to1.have[0].last_sync.len(), 0); //// doc1 and doc2 receive that message and update sync state - doc1.receive_sync_message(&mut s1, msg2to1).unwrap(); - doc2.receive_sync_message(&mut s2, msg1to2).unwrap(); + doc1.sync().receive_sync_message(&mut s1, msg2to1).unwrap(); + doc2.sync().receive_sync_message(&mut s2, msg1to2).unwrap(); //// now both reply with their local changes the other lacks //// (standard warning that 1% of the time this will result in a "need" message) let msg1to2 = doc1 + .sync() .generate_sync_message(&mut s1) .expect("first reply from 1 to 2 was None"); assert_eq!(msg1to2.changes.len(), 5); let msg2to1 = doc2 + .sync() .generate_sync_message(&mut s2) .expect("first reply from 2 to 1 was None"); assert_eq!(msg2to1.changes.len(), 5); //// both should now apply the changes - doc1.receive_sync_message(&mut s1, msg2to1).unwrap(); + doc1.sync().receive_sync_message(&mut s1, msg2to1).unwrap(); assert_eq!(doc1.get_missing_deps(&[]), Vec::new()); - doc2.receive_sync_message(&mut s2, msg1to2).unwrap(); + doc2.sync().receive_sync_message(&mut s2, msg1to2).unwrap(); assert_eq!(doc2.get_missing_deps(&[]), Vec::new()); //// The response acknowledges the changes received and sends no further changes let msg1to2 = doc1 + .sync() .generate_sync_message(&mut s1) .expect("second reply from 1 to 2 was None"); assert_eq!(msg1to2.changes.len(), 0); let msg2to1 = doc2 + .sync() .generate_sync_message(&mut s2) .expect("second reply from 2 to 1 was None"); assert_eq!(msg2to1.changes.len(), 0); //// After receiving acknowledgements, their shared heads should be equal - doc1.receive_sync_message(&mut s1, msg2to1).unwrap(); - doc2.receive_sync_message(&mut s2, msg1to2).unwrap(); + doc1.sync().receive_sync_message(&mut s1, msg2to1).unwrap(); + doc2.sync().receive_sync_message(&mut s2, msg1to2).unwrap(); assert_eq!(s1.shared_heads, s2.shared_heads); //// We're in sync, no more messages required - assert!(doc1.generate_sync_message(&mut s1).is_none()); - assert!(doc2.generate_sync_message(&mut s2).is_none()); + assert!(doc1.sync().generate_sync_message(&mut s1).is_none()); + assert!(doc2.sync().generate_sync_message(&mut s2).is_none()); //// If we make one more change and start another sync then its lastSync should be updated doc1.put(crate::ROOT, "x", 5).unwrap(); doc1.commit(); let msg1to2 = doc1 + .sync() .generate_sync_message(&mut s1) .expect("third reply from 1 to 2 was None"); let mut expected_heads = vec![head1, head2]; @@ -782,8 +898,8 @@ mod tests { let mut iterations = 0; loop { - let a_to_b = a.generate_sync_message(a_sync_state); - let b_to_a = b.generate_sync_message(b_sync_state); + let a_to_b = a.sync().generate_sync_message(a_sync_state); + let b_to_a = b.sync().generate_sync_message(b_sync_state); if a_to_b.is_none() && b_to_a.is_none() { break; } @@ -791,10 +907,10 @@ mod tests { panic!("failed to sync in {} iterations", MAX_ITER); } if let Some(msg) = a_to_b { - b.receive_sync_message(b_sync_state, msg).unwrap() + b.sync().receive_sync_message(b_sync_state, msg).unwrap() } if let Some(msg) = b_to_a { - a.receive_sync_message(a_sync_state, msg).unwrap() + a.sync().receive_sync_message(a_sync_state, msg).unwrap() } iterations += 1; } diff --git a/rust/automerge/src/sync/state.rs b/rust/automerge/src/sync/state.rs index 00775196..354c605f 100644 --- a/rust/automerge/src/sync/state.rs +++ b/rust/automerge/src/sync/state.rs @@ -23,13 +23,23 @@ impl From for DecodeError { } /// The state of synchronisation with a peer. +/// +/// This should be persisted using [`Self::encode`] when you know you will be interacting with the +/// same peer in multiple sessions. [`Self::encode`] only encodes state which should be reused +/// across connections. #[derive(Debug, Clone, Default, PartialEq, Eq, Hash)] pub struct State { + /// The hashes which we know both peers have pub shared_heads: Vec, + /// The heads we last sent pub last_sent_heads: Vec, + /// The heads we last received from them pub their_heads: Option>, + /// Any specific changes they last said they needed pub their_need: Option>, + /// The bloom filters summarising what they said they have pub their_have: Option>, + /// The hashes we have sent in this session pub sent_hashes: BTreeSet, /// `generate_sync_message` should return `None` if there are no new changes to send. In diff --git a/rust/automerge/src/transaction/inner.rs b/rust/automerge/src/transaction/inner.rs index cba4e723..7e7db17d 100644 --- a/rust/automerge/src/transaction/inner.rs +++ b/rust/automerge/src/transaction/inner.rs @@ -717,7 +717,7 @@ struct SpliceArgs<'a> { #[cfg(test)] mod tests { - use crate::{transaction::Transactable, ROOT}; + use crate::{transaction::Transactable, ReadDoc, ROOT}; use super::*; diff --git a/rust/automerge/src/transaction/manual_transaction.rs b/rust/automerge/src/transaction/manual_transaction.rs index 22115aab..fa5f6340 100644 --- a/rust/automerge/src/transaction/manual_transaction.rs +++ b/rust/automerge/src/transaction/manual_transaction.rs @@ -1,7 +1,10 @@ use std::ops::RangeBounds; use crate::exid::ExId; -use crate::{Automerge, ChangeHash, KeysAt, ObjType, OpObserver, Prop, ScalarValue, Value, Values}; +use crate::op_observer::BranchableObserver; +use crate::{ + Automerge, ChangeHash, KeysAt, ObjType, OpObserver, Prop, ReadDoc, ScalarValue, Value, Values, +}; use crate::{AutomergeError, Keys}; use crate::{ListRange, ListRangeAt, MapRange, MapRangeAt}; @@ -49,7 +52,7 @@ impl<'a> Transaction<'a, observation::UnObserved> { } } -impl<'a, Obs: OpObserver> Transaction<'a, observation::Observed> { +impl<'a, Obs: OpObserver + BranchableObserver> Transaction<'a, observation::Observed> { pub fn observer(&mut self) -> &mut Obs { self.observation.as_mut().unwrap().observer() } @@ -112,95 +115,7 @@ impl<'a, Obs: observation::Observation> Transaction<'a, Obs> { } } -impl<'a, Obs: observation::Observation> Transactable for Transaction<'a, Obs> { - /// Get the number of pending operations in this transaction. - fn pending_ops(&self) -> usize { - self.inner.as_ref().unwrap().pending_ops() - } - - /// Set the value of property `P` to value `V` in object `obj`. - /// - /// # Errors - /// - /// This will return an error if - /// - The object does not exist - /// - The key is the wrong type for the object - /// - The key does not exist in the object - fn put, P: Into, V: Into>( - &mut self, - obj: O, - prop: P, - value: V, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.put(doc, obs, obj.as_ref(), prop, value)) - } - - fn put_object, P: Into>( - &mut self, - obj: O, - prop: P, - value: ObjType, - ) -> Result { - self.do_tx(|tx, doc, obs| tx.put_object(doc, obs, obj.as_ref(), prop, value)) - } - - fn insert, V: Into>( - &mut self, - obj: O, - index: usize, - value: V, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.insert(doc, obs, obj.as_ref(), index, value)) - } - - fn insert_object>( - &mut self, - obj: O, - index: usize, - value: ObjType, - ) -> Result { - self.do_tx(|tx, doc, obs| tx.insert_object(doc, obs, obj.as_ref(), index, value)) - } - - fn increment, P: Into>( - &mut self, - obj: O, - prop: P, - value: i64, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.increment(doc, obs, obj.as_ref(), prop, value)) - } - - fn delete, P: Into>( - &mut self, - obj: O, - prop: P, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.delete(doc, obs, obj.as_ref(), prop)) - } - - /// Splice new elements into the given sequence. Returns a vector of the OpIds used to insert - /// the new elements - fn splice, V: IntoIterator>( - &mut self, - obj: O, - pos: usize, - del: usize, - vals: V, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.splice(doc, obs, obj.as_ref(), pos, del, vals)) - } - - fn splice_text>( - &mut self, - obj: O, - pos: usize, - del: usize, - text: &str, - ) -> Result<(), AutomergeError> { - self.do_tx(|tx, doc, obs| tx.splice_text(doc, obs, obj.as_ref(), pos, del, text)) - } - +impl<'a, Obs: observation::Observation> ReadDoc for Transaction<'a, Obs> { fn keys>(&self, obj: O) -> Keys<'_, '_> { self.doc.keys(obj) } @@ -313,6 +228,108 @@ impl<'a, Obs: observation::Observation> Transactable for Transaction<'a, Obs> { self.doc.parents(obj) } + fn path_to_object>(&self, obj: O) -> Result, AutomergeError> { + self.doc.path_to_object(obj) + } + + fn get_missing_deps(&self, heads: &[ChangeHash]) -> Vec { + self.doc.get_missing_deps(heads) + } + + fn get_change_by_hash(&self, hash: &ChangeHash) -> Option<&crate::Change> { + self.doc.get_change_by_hash(hash) + } +} + +impl<'a, Obs: observation::Observation> Transactable for Transaction<'a, Obs> { + /// Get the number of pending operations in this transaction. + fn pending_ops(&self) -> usize { + self.inner.as_ref().unwrap().pending_ops() + } + + /// Set the value of property `P` to value `V` in object `obj`. + /// + /// # Errors + /// + /// This will return an error if + /// - The object does not exist + /// - The key is the wrong type for the object + /// - The key does not exist in the object + fn put, P: Into, V: Into>( + &mut self, + obj: O, + prop: P, + value: V, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.put(doc, obs, obj.as_ref(), prop, value)) + } + + fn put_object, P: Into>( + &mut self, + obj: O, + prop: P, + value: ObjType, + ) -> Result { + self.do_tx(|tx, doc, obs| tx.put_object(doc, obs, obj.as_ref(), prop, value)) + } + + fn insert, V: Into>( + &mut self, + obj: O, + index: usize, + value: V, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.insert(doc, obs, obj.as_ref(), index, value)) + } + + fn insert_object>( + &mut self, + obj: O, + index: usize, + value: ObjType, + ) -> Result { + self.do_tx(|tx, doc, obs| tx.insert_object(doc, obs, obj.as_ref(), index, value)) + } + + fn increment, P: Into>( + &mut self, + obj: O, + prop: P, + value: i64, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.increment(doc, obs, obj.as_ref(), prop, value)) + } + + fn delete, P: Into>( + &mut self, + obj: O, + prop: P, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.delete(doc, obs, obj.as_ref(), prop)) + } + + /// Splice new elements into the given sequence. Returns a vector of the OpIds used to insert + /// the new elements + fn splice, V: IntoIterator>( + &mut self, + obj: O, + pos: usize, + del: usize, + vals: V, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.splice(doc, obs, obj.as_ref(), pos, del, vals)) + } + + fn splice_text>( + &mut self, + obj: O, + pos: usize, + del: usize, + text: &str, + ) -> Result<(), AutomergeError> { + self.do_tx(|tx, doc, obs| tx.splice_text(doc, obs, obj.as_ref(), pos, del, text)) + } + fn base_heads(&self) -> Vec { self.doc.get_heads() } diff --git a/rust/automerge/src/transaction/observation.rs b/rust/automerge/src/transaction/observation.rs index 974004cf..53723711 100644 --- a/rust/automerge/src/transaction/observation.rs +++ b/rust/automerge/src/transaction/observation.rs @@ -1,15 +1,17 @@ //! This module is essentially a type level Option. It is used in sitations where we know at //! compile time whether an `OpObserver` is available to track changes in a transaction. -use crate::{ChangeHash, OpObserver}; +use crate::{op_observer::BranchableObserver, ChangeHash, OpObserver}; mod private { + use crate::op_observer::BranchableObserver; + pub trait Sealed {} - impl Sealed for super::Observed {} + impl Sealed for super::Observed {} impl Sealed for super::UnObserved {} } pub trait Observation: private::Sealed { - type Obs: OpObserver; + type Obs: OpObserver + BranchableObserver; type CommitResult; fn observer(&mut self) -> Option<&mut Self::Obs>; @@ -19,9 +21,9 @@ pub trait Observation: private::Sealed { } #[derive(Clone, Debug)] -pub struct Observed(Obs); +pub struct Observed(Obs); -impl Observed { +impl Observed { pub(crate) fn new(o: O) -> Self { Self(o) } @@ -31,7 +33,7 @@ impl Observed { } } -impl Observation for Observed { +impl Observation for Observed { type Obs = Obs; type CommitResult = (Obs, Option); fn observer(&mut self) -> Option<&mut Self::Obs> { diff --git a/rust/automerge/src/transaction/transactable.rs b/rust/automerge/src/transaction/transactable.rs index 7f38edbe..05c48c79 100644 --- a/rust/automerge/src/transaction/transactable.rs +++ b/rust/automerge/src/transaction/transactable.rs @@ -1,13 +1,8 @@ -use std::ops::RangeBounds; - use crate::exid::ExId; -use crate::{ - AutomergeError, ChangeHash, Keys, KeysAt, ListRange, ListRangeAt, MapRange, MapRangeAt, - ObjType, Parents, Prop, ScalarValue, Value, Values, -}; +use crate::{AutomergeError, ChangeHash, ObjType, Prop, ReadDoc, ScalarValue}; /// A way of mutating a document within a single change. -pub trait Transactable { +pub trait Transactable: ReadDoc { /// Get the number of pending operations in this transaction. fn pending_ops(&self) -> usize; @@ -93,106 +88,6 @@ pub trait Transactable { text: &str, ) -> Result<(), AutomergeError>; - /// Get the keys of the given object, it should be a map. - fn keys>(&self, obj: O) -> Keys<'_, '_>; - - /// Get the keys of the given object at a point in history. - fn keys_at>(&self, obj: O, heads: &[ChangeHash]) -> KeysAt<'_, '_>; - - fn map_range, R: RangeBounds>( - &self, - obj: O, - range: R, - ) -> MapRange<'_, R>; - - fn map_range_at, R: RangeBounds>( - &self, - obj: O, - range: R, - heads: &[ChangeHash], - ) -> MapRangeAt<'_, R>; - - fn list_range, R: RangeBounds>( - &self, - obj: O, - range: R, - ) -> ListRange<'_, R>; - - fn list_range_at, R: RangeBounds>( - &self, - obj: O, - range: R, - heads: &[ChangeHash], - ) -> ListRangeAt<'_, R>; - - fn values>(&self, obj: O) -> Values<'_>; - - fn values_at>(&self, obj: O, heads: &[ChangeHash]) -> Values<'_>; - - /// Get the length of the given object. - fn length>(&self, obj: O) -> usize; - - /// Get the length of the given object at a point in history. - fn length_at>(&self, obj: O, heads: &[ChangeHash]) -> usize; - - /// Get type for object - fn object_type>(&self, obj: O) -> Result; - - /// Get the string that this text object represents. - fn text>(&self, obj: O) -> Result; - - /// Get the string that this text object represents at a point in history. - fn text_at>( - &self, - obj: O, - heads: &[ChangeHash], - ) -> Result; - - /// Get the value at this prop in the object. - fn get, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError>; - - /// Get the value at this prop in the object at a point in history. - fn get_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError>; - - fn get_all, P: Into>( - &self, - obj: O, - prop: P, - ) -> Result, ExId)>, AutomergeError>; - - fn get_all_at, P: Into>( - &self, - obj: O, - prop: P, - heads: &[ChangeHash], - ) -> Result, ExId)>, AutomergeError>; - - /// Get the parents of an object in the document tree. - /// - /// ### Errors - /// - /// Returns an error when the id given is not the id of an object in this document. - /// This function does not get the parents of scalar values contained within objects. - /// - /// ### Experimental - /// - /// This function may in future be changed to allow getting the parents from the id of a scalar - /// value. - fn parents>(&self, obj: O) -> Result, AutomergeError>; - - fn path_to_object>(&self, obj: O) -> Result, AutomergeError> { - Ok(self.parents(obj.as_ref().clone())?.path()) - } - /// The heads this transaction will be based on fn base_heads(&self) -> Vec; } diff --git a/rust/automerge/src/types.rs b/rust/automerge/src/types.rs index 7bbf4353..870569e9 100644 --- a/rust/automerge/src/types.rs +++ b/rust/automerge/src/types.rs @@ -143,12 +143,17 @@ impl fmt::Display for ActorId { } } +/// The type of an object #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq, Copy, Hash)] #[serde(rename_all = "camelCase", untagged)] pub enum ObjType { + /// A map Map, + /// Retained for backwards compatibility, tables are identical to maps Table, + /// A sequence of arbitrary values List, + /// A sequence of characters Text, } @@ -378,9 +383,15 @@ pub(crate) enum Key { Seq(ElemId), } +/// A property of an object +/// +/// This is either a string representing a property in a map, or an integer +/// which is the index into a sequence #[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone)] pub enum Prop { + /// A property in a map Map(String), + /// An index into a sequence Seq(usize), } @@ -454,9 +465,17 @@ impl ObjId { } } +/// How indexes into text sequeces are calculated +/// +/// Automerge text objects are internally sequences of utf8 characters. This +/// means that in environments (such as javascript) which use a different +/// encoding the indexes into the text sequence will be different. This enum +/// represents the different ways indexes can be calculated. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum TextEncoding { + /// The indexes are calculated using the utf8 encoding Utf8, + /// The indexes are calculated using the utf16 encoding Utf16, } diff --git a/rust/automerge/src/value.rs b/rust/automerge/src/value.rs index d8429f4e..be128787 100644 --- a/rust/automerge/src/value.rs +++ b/rust/automerge/src/value.rs @@ -5,9 +5,12 @@ use smol_str::SmolStr; use std::borrow::Cow; use std::fmt; +/// The type of values in an automerge document #[derive(Debug, Clone, PartialEq)] pub enum Value<'a> { + /// An composite object of type `ObjType` Object(ObjType), + /// A non composite value // TODO: if we don't have to store this in patches any more then it might be able to be just a // &'a ScalarValue rather than a Cow Scalar(Cow<'a, ScalarValue>), @@ -431,6 +434,7 @@ impl From<&Counter> for f64 { } } +/// A value which is not a composite value #[derive(Serialize, PartialEq, Debug, Clone)] #[serde(untagged)] pub enum ScalarValue { @@ -442,7 +446,11 @@ pub enum ScalarValue { Counter(Counter), Timestamp(i64), Boolean(bool), - Unknown { type_code: u8, bytes: Vec }, + /// A value from a future version of automerge + Unknown { + type_code: u8, + bytes: Vec, + }, Null, } diff --git a/rust/automerge/src/values.rs b/rust/automerge/src/values.rs index 90f596f3..15ccb4cb 100644 --- a/rust/automerge/src/values.rs +++ b/rust/automerge/src/values.rs @@ -2,6 +2,9 @@ use crate::exid::ExId; use crate::{Automerge, Value}; use std::fmt; +/// An iterator over the values in an object +/// +/// This is returned by the [`crate::ReadDoc::values`] and [`crate::ReadDoc::values_at`] methods pub struct Values<'a> { range: Box>, doc: &'a Automerge, @@ -52,9 +55,3 @@ impl<'a> Iterator for Values<'a> { self.range.next_value(self.doc) } } - -impl<'a> DoubleEndedIterator for Values<'a> { - fn next_back(&mut self) -> Option { - unimplemented!() - } -} diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index df0e4cff..ca6c64c0 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -1,7 +1,7 @@ use automerge::transaction::Transactable; use automerge::{ - ActorId, AutoCommit, Automerge, AutomergeError, Change, ExpandedChange, ObjType, ScalarValue, - VecOpObserver, ROOT, + ActorId, AutoCommit, Automerge, AutomergeError, Change, ExpandedChange, ObjType, ReadDoc, + ScalarValue, VecOpObserver, ROOT, }; use std::fs; @@ -21,7 +21,7 @@ fn no_conflict_on_repeated_assignment() { doc.put(&automerge::ROOT, "foo", 1).unwrap(); doc.put(&automerge::ROOT, "foo", 2).unwrap(); assert_doc!( - doc.document(), + &doc, map! { "foo" => { 2 }, } @@ -41,7 +41,7 @@ fn repeated_map_assignment_which_resolves_conflict_not_ignored() { doc1.put(&automerge::ROOT, "field", 123).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "field" => { 123 } } @@ -62,7 +62,7 @@ fn repeated_list_assignment_which_resolves_conflict_not_ignored() { doc1.put(&list_id, 0, 789).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "list" => { list![ @@ -84,7 +84,7 @@ fn list_deletion() { doc.insert(&list_id, 2, 789).unwrap(); doc.delete(&list_id, 1).unwrap(); assert_doc!( - doc.document(), + &doc, map! { "list" => { list![ { 123 }, @@ -106,7 +106,7 @@ fn merge_concurrent_map_prop_updates() { "bar".into() ); assert_doc!( - doc1.document(), + &doc1, map! { "foo" => { "bar" }, "hello" => { "world" }, @@ -114,7 +114,7 @@ fn merge_concurrent_map_prop_updates() { ); doc2.merge(&mut doc1).unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "foo" => { "bar" }, "hello" => { "world" }, @@ -134,7 +134,7 @@ fn add_concurrent_increments_of_same_property() { doc2.increment(&automerge::ROOT, "counter", 2).unwrap(); doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "counter" => { mk_counter(3) @@ -161,7 +161,7 @@ fn add_increments_only_to_preceeded_values() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "counter" => { mk_counter(1), @@ -181,7 +181,7 @@ fn concurrent_updates_of_same_field() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "field" => { "one", @@ -206,7 +206,7 @@ fn concurrent_updates_of_same_list_element() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => { list![{ @@ -232,7 +232,7 @@ fn assignment_conflicts_of_different_types() { doc1.merge(&mut doc3).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "field" => { "string", @@ -255,7 +255,7 @@ fn changes_within_conflicting_map_field() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "field" => { "string", @@ -292,7 +292,7 @@ fn changes_within_conflicting_list_element() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "list" => { list![ @@ -330,7 +330,7 @@ fn concurrently_assigned_nested_maps_should_not_merge() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "config" => { map!{ @@ -364,7 +364,7 @@ fn concurrent_insertions_at_different_list_positions() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "list" => { list![ @@ -396,7 +396,7 @@ fn concurrent_insertions_at_same_list_position() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => { list![ @@ -427,7 +427,7 @@ fn concurrent_assignment_and_deletion_of_a_map_entry() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "bestBird" => { "magpie", @@ -451,7 +451,7 @@ fn concurrent_assignment_and_deletion_of_list_entry() { doc2.delete(&list_id, 1).unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "birds" => {list![ {"blackbird"}, @@ -461,7 +461,7 @@ fn concurrent_assignment_and_deletion_of_list_entry() { ); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => {list![ { "blackbird" }, @@ -474,7 +474,7 @@ fn concurrent_assignment_and_deletion_of_list_entry() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => {list![ { "blackbird" }, @@ -507,7 +507,7 @@ fn insertion_after_a_deleted_list_element() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => {list![ { "blackbird" }, @@ -518,7 +518,7 @@ fn insertion_after_a_deleted_list_element() { doc2.merge(&mut doc1).unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "birds" => {list![ { "blackbird" }, @@ -549,7 +549,7 @@ fn concurrent_deletion_of_same_list_element() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => {list![ { "albatross" }, @@ -560,7 +560,7 @@ fn concurrent_deletion_of_same_list_element() { doc2.merge(&mut doc1).unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "birds" => {list![ { "albatross" }, @@ -593,7 +593,7 @@ fn concurrent_updates_at_different_levels() { doc1.merge(&mut doc2).unwrap(); assert_obj!( - doc1.document(), + &doc1, &automerge::ROOT, "animals", map! { @@ -635,7 +635,7 @@ fn concurrent_updates_of_concurrently_deleted_objects() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "birds" => { map!{}, @@ -686,7 +686,7 @@ fn does_not_interleave_sequence_insertions_at_same_position() { doc1.merge(&mut doc2).unwrap(); assert_doc!( - doc1.document(), + &doc1, map! { "wisdom" => {list![ {"to"}, @@ -719,7 +719,7 @@ fn mutliple_insertions_at_same_list_position_with_insertion_by_greater_actor_id( doc2.insert(&list, 0, "one").unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "list" => { list![ { "one" }, @@ -744,7 +744,7 @@ fn mutliple_insertions_at_same_list_position_with_insertion_by_lesser_actor_id() doc2.insert(&list, 0, "one").unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "list" => { list![ { "one" }, @@ -771,7 +771,7 @@ fn insertion_consistent_with_causality() { doc2.insert(&list, 0, "one").unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "list" => { list![ {"one"}, @@ -1129,7 +1129,7 @@ fn test_merging_test_conflicts_then_saving_and_loading() { let mut doc2 = AutoCommit::load(&doc1.save()).unwrap(); doc2.set_actor(actor2); - assert_doc! {doc2.document(), map!{ + assert_doc! {&doc2, map!{ "text" => { list![{"h"}, {"e"}, {"l"}, {"l"}, {"o"}]}, }}; @@ -1139,16 +1139,16 @@ fn test_merging_test_conflicts_then_saving_and_loading() { doc2.splice_text(&text, 6, 0, "world").unwrap(); assert_doc!( - doc2.document(), + &doc2, map! { "text" => { list![{"h"}, {"e"}, {"l"}, {"l"}, {"!"}, {" "}, {"w"} , {"o"}, {"r"}, {"l"}, {"d"}]} } ); - let mut doc3 = AutoCommit::load(&doc2.save()).unwrap(); + let doc3 = AutoCommit::load(&doc2.save()).unwrap(); assert_doc!( - doc3.document(), + &doc3, map! { "text" => { list![{"h"}, {"e"}, {"l"}, {"l"}, {"!"}, {" "}, {"w"} , {"o"}, {"r"}, {"l"}, {"d"}]} } diff --git a/rust/edit-trace/src/main.rs b/rust/edit-trace/src/main.rs index debe52db..9724a109 100644 --- a/rust/edit-trace/src/main.rs +++ b/rust/edit-trace/src/main.rs @@ -1,4 +1,5 @@ use automerge::ObjType; +use automerge::ReadDoc; use automerge::{transaction::Transactable, Automerge, AutomergeError, ROOT}; use std::time::Instant; From de5af2fffa957a0dda7cfb388a57389e216621aa Mon Sep 17 00:00:00 2001 From: alexjg Date: Mon, 30 Jan 2023 19:58:35 +0000 Subject: [PATCH 48/72] automerge-rs 0.3.0 and automerge-test 0.2.0 (#512) --- rust/automerge-test/Cargo.toml | 4 ++-- rust/automerge/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rust/automerge-test/Cargo.toml b/rust/automerge-test/Cargo.toml index 4fba0379..9290d7ac 100644 --- a/rust/automerge-test/Cargo.toml +++ b/rust/automerge-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "automerge-test" -version = "0.1.0" +version = "0.2.0" edition = "2021" license = "MIT" repository = "https://github.com/automerge/automerge-rs" @@ -10,7 +10,7 @@ description = "Utilities for testing automerge libraries" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -automerge = { version = "^0.2", path = "../automerge" } +automerge = { version = "^0.3", path = "../automerge" } smol_str = { version = "^0.1.21", features=["serde"] } serde = { version = "^1.0", features=["derive"] } decorum = "0.3.1" diff --git a/rust/automerge/Cargo.toml b/rust/automerge/Cargo.toml index 578878ae..e5a9125d 100644 --- a/rust/automerge/Cargo.toml +++ b/rust/automerge/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "automerge" -version = "0.2.0" +version = "0.3.0" edition = "2021" license = "MIT" repository = "https://github.com/automerge/automerge-rs" From a6959e70e87aa9d882f68683144ede925ce62042 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Tue, 31 Jan 2023 10:54:54 -0700 Subject: [PATCH 49/72] More robust leb128 parsing (#515) Before this change i64 decoding did not work for negative numbers (not a real problem because it is only used for the timestamp of a change), and both u64 and i64 would allow overlong LEB encodings. --- rust/automerge/src/storage/parse.rs | 2 +- rust/automerge/src/storage/parse/leb128.rs | 292 +++++++++++++++++---- 2 files changed, 239 insertions(+), 55 deletions(-) diff --git a/rust/automerge/src/storage/parse.rs b/rust/automerge/src/storage/parse.rs index 64419fda..54668da4 100644 --- a/rust/automerge/src/storage/parse.rs +++ b/rust/automerge/src/storage/parse.rs @@ -110,7 +110,7 @@ use crate::{ActorId, ChangeHash}; const HASH_SIZE: usize = 32; // 256 bits = 32 bytes #[allow(unused_imports)] -pub(crate) use self::leb128::{leb128_i32, leb128_i64, leb128_u32, leb128_u64, nonzero_leb128_u64}; +pub(crate) use self::leb128::{leb128_i64, leb128_u32, leb128_u64, nonzero_leb128_u64}; pub(crate) type ParseResult<'a, O, E> = Result<(Input<'a>, O), ParseError>; diff --git a/rust/automerge/src/storage/parse/leb128.rs b/rust/automerge/src/storage/parse/leb128.rs index 800253c9..9f5e72a2 100644 --- a/rust/automerge/src/storage/parse/leb128.rs +++ b/rust/automerge/src/storage/parse/leb128.rs @@ -1,4 +1,3 @@ -use core::mem::size_of; use std::num::NonZeroU64; use super::{take1, Input, ParseError, ParseResult}; @@ -7,44 +6,83 @@ use super::{take1, Input, ParseError, ParseResult}; pub(crate) enum Error { #[error("leb128 was too large for the destination type")] Leb128TooLarge, + #[error("leb128 was improperly encoded")] + Leb128Overlong, #[error("leb128 was zero when it was expected to be nonzero")] UnexpectedZero, } -macro_rules! impl_leb { - ($parser_name: ident, $ty: ty) => { - #[allow(dead_code)] - pub(crate) fn $parser_name<'a, E>(input: Input<'a>) -> ParseResult<'a, $ty, E> - where - E: From, - { - let mut res = 0; - let mut shift = 0; +pub(crate) fn leb128_u64(input: Input<'_>) -> ParseResult<'_, u64, E> +where + E: From, +{ + let mut res = 0; + let mut shift = 0; + let mut input = input; - let mut input = input; - let mut pos = 0; - loop { - let (i, byte) = take1(input)?; - input = i; - if (byte & 0x80) == 0 { - res |= (byte as $ty) << shift; - return Ok((input, res)); - } else if pos == leb128_size::<$ty>() - 1 { - return Err(ParseError::Error(Error::Leb128TooLarge.into())); - } else { - res |= ((byte & 0x7F) as $ty) << shift; - } - pos += 1; - shift += 7; + loop { + let (i, byte) = take1(input)?; + input = i; + res |= ((byte & 0x7F) as u64) << shift; + shift += 7; + + if (byte & 0x80) == 0 { + if shift > 64 && byte > 1 { + return Err(ParseError::Error(Error::Leb128TooLarge.into())); + } else if shift > 7 && byte == 0 { + return Err(ParseError::Error(Error::Leb128Overlong.into())); } + return Ok((input, res)); + } else if shift > 64 { + return Err(ParseError::Error(Error::Leb128TooLarge.into())); } - }; + } } -impl_leb!(leb128_u64, u64); -impl_leb!(leb128_u32, u32); -impl_leb!(leb128_i64, i64); -impl_leb!(leb128_i32, i32); +pub(crate) fn leb128_i64(input: Input<'_>) -> ParseResult<'_, i64, E> +where + E: From, +{ + let mut res = 0; + let mut shift = 0; + + let mut input = input; + let mut prev = 0; + loop { + let (i, byte) = take1(input)?; + input = i; + res |= ((byte & 0x7F) as i64) << shift; + shift += 7; + + if (byte & 0x80) == 0 { + if shift > 64 && byte != 0 && byte != 0x7f { + // the 10th byte (if present) must contain only the sign-extended sign bit + return Err(ParseError::Error(Error::Leb128TooLarge.into())); + } else if shift > 7 + && ((byte == 0 && prev & 0x40 == 0) || (byte == 0x7f && prev & 0x40 > 0)) + { + // overlong if the sign bit of penultimate byte has been extended + return Err(ParseError::Error(Error::Leb128Overlong.into())); + } else if shift < 64 && byte & 0x40 > 0 { + // sign extend negative numbers + res |= -1 << shift; + } + return Ok((input, res)); + } else if shift > 64 { + return Err(ParseError::Error(Error::Leb128TooLarge.into())); + } + prev = byte; + } +} + +pub(crate) fn leb128_u32(input: Input<'_>) -> ParseResult<'_, u32, E> +where + E: From, +{ + let (i, num) = leb128_u64(input)?; + let result = u32::try_from(num).map_err(|_| ParseError::Error(Error::Leb128TooLarge.into()))?; + Ok((i, result)) +} /// Parse a LEB128 encoded u64 from the input, throwing an error if it is `0` pub(crate) fn nonzero_leb128_u64(input: Input<'_>) -> ParseResult<'_, NonZeroU64, E> @@ -57,38 +95,27 @@ where Ok((input, result)) } -/// Maximum LEB128-encoded size of an integer type -const fn leb128_size() -> usize { - let bits = size_of::() * 8; - (bits + 6) / 7 // equivalent to ceil(bits/7) w/o floats -} - #[cfg(test)] mod tests { use super::super::Needed; use super::*; - use std::{convert::TryFrom, num::NonZeroUsize}; + use std::num::NonZeroUsize; const NEED_ONE: Needed = Needed::Size(unsafe { NonZeroUsize::new_unchecked(1) }); #[test] - fn leb_128_unsigned() { + fn leb_128_u64() { let one = &[0b00000001_u8]; let one_two_nine = &[0b10000001, 0b00000001]; let one_and_more = &[0b00000001, 0b00000011]; let scenarios: Vec<(&'static [u8], ParseResult<'_, u64, Error>)> = vec![ (one, Ok((Input::with_position(one, 1), 1))), - (&[0b10000001_u8], Err(ParseError::Incomplete(NEED_ONE))), ( one_two_nine, Ok((Input::with_position(one_two_nine, 2), 129)), ), (one_and_more, Ok((Input::with_position(one_and_more, 1), 1))), - ( - &[129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129], - Err(ParseError::Error(Error::Leb128TooLarge)), - ), ]; for (index, (input, expected)) in scenarios.clone().into_iter().enumerate() { let result = leb128_u64(Input::new(input)); @@ -102,17 +129,174 @@ mod tests { } } - for (index, (input, expected)) in scenarios.into_iter().enumerate() { - let u32_expected = expected.map(|(i, e)| (i, u32::try_from(e).unwrap())); - let result = leb128_u32(Input::new(input)); - if result != u32_expected { - panic!( - "Scenario {} failed for u32: expected {:?} got {:?}", - index + 1, - u32_expected, - result - ); + let error_cases: Vec<(&'static str, &'static [u8], ParseError<_>)> = vec![ + ( + "too many bytes", + &[129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "too many bits", + &[129, 129, 129, 129, 129, 129, 129, 129, 129, 2], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "overlong encoding", + &[129, 0], + ParseError::Error(Error::Leb128Overlong), + ), + ("missing data", &[255], ParseError::Incomplete(NEED_ONE)), + ]; + error_cases.into_iter().for_each(|(desc, input, expected)| { + match leb128_u64::(Input::new(input)) { + Ok((_, x)) => panic!("leb128_u64 should fail with {}, got {}", desc, x), + Err(error) => { + if error != expected { + panic!("leb128_u64 should fail with {}, got {}", expected, error) + } + } } - } + }); + + let success_cases: Vec<(&'static [u8], u64)> = vec![ + (&[0], 0), + (&[0x7f], 127), + (&[0x80, 0x01], 128), + (&[0xff, 0x7f], 16383), + ( + &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1], + u64::MAX, + ), + ]; + success_cases.into_iter().for_each(|(input, expected)| { + match leb128_u64::(Input::new(input)) { + Ok((_, x)) => { + if x != expected { + panic!("leb128_u64 should succeed with {}, got {}", expected, x) + } + } + Err(error) => panic!("leb128_u64 should succeed with {}, got {}", expected, error), + } + }); + } + + #[test] + fn leb_128_u32() { + let error_cases: Vec<(&'static str, &'static [u8], ParseError<_>)> = vec![ + ( + "too many bytes", + &[129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "too many bits", + &[0xff, 0xff, 0xff, 0xff, 0x1f], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "overlong encoding", + &[129, 0], + ParseError::Error(Error::Leb128Overlong), + ), + ("missing data", &[0xaa], ParseError::Incomplete(NEED_ONE)), + ]; + error_cases.into_iter().for_each(|(desc, input, expected)| { + match leb128_u32::(Input::new(input)) { + Ok((_, x)) => panic!("leb128_u32 should fail with {}, got {}", desc, x), + Err(error) => { + if error != expected { + panic!("leb128_u32 should fail with {}, got {}", expected, error) + } + } + } + }); + + let success_cases: Vec<(&'static [u8], u32)> = vec![ + (&[0], 0), + (&[0x7f], 127), + (&[0x80, 0x01], 128), + (&[0xff, 0x7f], 16383), + (&[0xff, 0xff, 0xff, 0xff, 0x0f], u32::MAX), + ]; + success_cases.into_iter().for_each(|(input, expected)| { + match leb128_u32::(Input::new(input)) { + Ok((_, x)) => { + if x != expected { + panic!("leb128_u32 should succeed with {}, got {}", expected, x) + } + } + Err(error) => panic!("leb128_u64 should succeed with {}, got {}", expected, error), + } + }); + } + + #[test] + fn leb_128_i64() { + let error_cases: Vec<(&'static str, &'static [u8], ParseError<_>)> = vec![ + ( + "too many bytes", + &[129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "too many positive bits", + &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "too many negative bits", + &[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7e], + ParseError::Error(Error::Leb128TooLarge), + ), + ( + "overlong positive encoding", + &[0xbf, 0], + ParseError::Error(Error::Leb128Overlong), + ), + ( + "overlong negative encoding", + &[0x81, 0xff, 0x7f], + ParseError::Error(Error::Leb128Overlong), + ), + ("missing data", &[0x90], ParseError::Incomplete(NEED_ONE)), + ]; + error_cases.into_iter().for_each(|(desc, input, expected)| { + match leb128_i64::(Input::new(input)) { + Ok((_, x)) => panic!("leb128_i64 should fail with {}, got {}", desc, x), + Err(error) => { + if error != expected { + panic!("leb128_i64 should fail with {}, got {}", expected, error) + } + } + } + }); + + let success_cases: Vec<(&'static [u8], i64)> = vec![ + (&[0], 0), + (&[0x7f], -1), + (&[0x3f], 63), + (&[0x40], -64), + (&[0x80, 0x01], 128), + (&[0xff, 0x3f], 8191), + (&[0x80, 0x40], -8192), + ( + &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0], + i64::MAX, + ), + ( + &[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f], + i64::MIN, + ), + ]; + success_cases.into_iter().for_each(|(input, expected)| { + match leb128_i64::(Input::new(input)) { + Ok((_, x)) => { + if x != expected { + panic!("leb128_i64 should succeed with {}, got {}", expected, x) + } + } + Err(error) => panic!("leb128_u64 should succeed with {}, got {}", expected, error), + } + }); } } From 2a9652e642fbf7296a85180d790d4e297559f93f Mon Sep 17 00:00:00 2001 From: alexjg Date: Wed, 1 Feb 2023 09:15:00 +0000 Subject: [PATCH 50/72] typescript: Hide API type and make SyncState opaque (#514) --- javascript/src/stable.ts | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts index 3b328240..74410346 100644 --- a/javascript/src/stable.ts +++ b/javascript/src/stable.ts @@ -26,7 +26,7 @@ import { Text } from "./text" export { Text } from "./text" import type { - API, + API as WasmAPI, Actor as ActorId, Prop, ObjID, @@ -34,7 +34,7 @@ import type { DecodedChange, Heads, MaterializeValue, - JsSyncState as SyncState, + JsSyncState, SyncMessage, DecodedSyncMessage, } from "@automerge/automerge-wasm" @@ -46,6 +46,17 @@ export type { IncPatch, SyncMessage, } from "@automerge/automerge-wasm" + +/** @hidden **/ +type API = WasmAPI + +const SyncStateSymbol = Symbol("_syncstate") + +/** + * An opaque type tracking the state of sync with a remote peer + */ +type SyncState = JsSyncState & { _opaque: typeof SyncStateSymbol } + import { ApiHandler, type ChangeToEncode, UseApi } from "./low_level" import { Automerge } from "@automerge/automerge-wasm" @@ -772,7 +783,7 @@ export function decodeSyncState(state: Uint8Array): SyncState { const sync = ApiHandler.decodeSyncState(state) const result = ApiHandler.exportSyncState(sync) sync.free() - return result + return result as SyncState } /** @@ -793,7 +804,7 @@ export function generateSyncMessage( const state = _state(doc) const syncState = ApiHandler.importSyncState(inState) const message = state.handle.generateSyncMessage(syncState) - const outState = ApiHandler.exportSyncState(syncState) + const outState = ApiHandler.exportSyncState(syncState) as SyncState return [outState, message] } @@ -835,7 +846,7 @@ export function receiveSyncMessage( } const heads = state.handle.getHeads() state.handle.receiveSyncMessage(syncState, message) - const outSyncState = ApiHandler.exportSyncState(syncState) + const outSyncState = ApiHandler.exportSyncState(syncState) as SyncState return [ progressDocument(doc, heads, opts.patchCallback || state.patchCallback), outSyncState, @@ -852,7 +863,7 @@ export function receiveSyncMessage( * @group sync */ export function initSyncState(): SyncState { - return ApiHandler.exportSyncState(ApiHandler.initSyncState()) + return ApiHandler.exportSyncState(ApiHandler.initSyncState()) as SyncState } /** @hidden */ From f8d5a8ea989580ab54d0dc541859a79b31a70107 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Feb 2023 09:15:54 +0000 Subject: [PATCH 51/72] Bump json5 from 1.0.1 to 1.0.2 in /javascript/examples/create-react-app (#487) Bumps [json5](https://github.com/json5/json5) from 1.0.1 to 1.0.2. in javascript/examples/create-react-app --- javascript/examples/create-react-app/yarn.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/javascript/examples/create-react-app/yarn.lock b/javascript/examples/create-react-app/yarn.lock index d6e5d93f..ec83af3b 100644 --- a/javascript/examples/create-react-app/yarn.lock +++ b/javascript/examples/create-react-app/yarn.lock @@ -5845,9 +5845,9 @@ json-stable-stringify-without-jsonify@^1.0.1: integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== json5@^1.0.1: - version "1.0.1" - resolved "http://localhost:4873/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" - integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== + version "1.0.2" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" + integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA== dependencies: minimist "^1.2.0" @@ -6165,9 +6165,9 @@ minimatch@^5.0.1: brace-expansion "^2.0.1" minimist@^1.2.0, minimist@^1.2.6: - version "1.2.6" - resolved "http://localhost:4873/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" - integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== + version "1.2.7" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" + integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== mkdirp@~0.5.1: version "0.5.6" From 9195e9cb7628ad380650d4e6ec727fbd481bfb7a Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 2 Feb 2023 15:02:53 +0000 Subject: [PATCH 52/72] Fix deny errors (#518) * Ignore deny errors on duplicate windows-sys * Delete spurious lockfile in automerge-cli --- rust/automerge-cli/Cargo.lock | 857 ---------------------------------- rust/deny.toml | 6 + 2 files changed, 6 insertions(+), 857 deletions(-) delete mode 100644 rust/automerge-cli/Cargo.lock diff --git a/rust/automerge-cli/Cargo.lock b/rust/automerge-cli/Cargo.lock deleted file mode 100644 index a330ee89..00000000 --- a/rust/automerge-cli/Cargo.lock +++ /dev/null @@ -1,857 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "automerge" -version = "0.1.0" -dependencies = [ - "flate2", - "fxhash", - "hex", - "itertools", - "js-sys", - "leb128", - "nonzero_ext", - "rand", - "serde", - "sha2", - "smol_str", - "thiserror", - "tinyvec", - "tracing", - "unicode-segmentation", - "uuid", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "automerge-cli" -version = "0.1.0" -dependencies = [ - "anyhow", - "atty", - "automerge", - "clap", - "colored_json", - "combine", - "duct", - "maplit", - "serde_json", - "thiserror", - "tracing-subscriber", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "block-buffer" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clap" -version = "3.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced1892c55c910c1219e98d6fc8d71f6bddba7905866ce740066d8bfea859312" -dependencies = [ - "atty", - "bitflags", - "clap_derive", - "indexmap", - "lazy_static", - "os_str_bytes", - "strsim", - "termcolor", - "textwrap", -] - -[[package]] -name = "clap_derive" -version = "3.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95d038ede1a964ce99f49cbe27a7fb538d1da595e4b4f70b8c8f338d17bf16" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "colored_json" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd32eb54d016e203b7c2600e3a7802c75843a92e38ccc4869aefeca21771a64" -dependencies = [ - "ansi_term", - "atty", - "libc", - "serde", - "serde_json", -] - -[[package]] -name = "combine" -version = "4.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50b727aacc797f9fc28e355d21f34709ac4fc9adecfe470ad07b8f4464f53062" -dependencies = [ - "bytes", - "memchr", -] - -[[package]] -name = "cpufeatures" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "digest" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "duct" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc6a0a59ed0888e0041cf708e66357b7ae1a82f1c67247e1f93b5e0818f7d8d" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "flate2" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" -dependencies = [ - "cfg-if", - "crc32fast", - "libc", - "miniz_oxide", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "generic-array" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi", - "wasm-bindgen", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - -[[package]] -name = "heck" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "indexmap" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "itertools" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" - -[[package]] -name = "js-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "leb128" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" - -[[package]] -name = "libc" -version = "0.2.119" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4" - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "nonzero_ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44a1290799eababa63ea60af0cbc3f03363e328e58f32fb0294798ed3e85f444" - -[[package]] -name = "once_cell" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" - -[[package]] -name = "os_pipe" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb233f06c2307e1f5ce2ecad9f8121cffbbee2c95428f44ea85222e460d0d213" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "os_str_bytes" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom", -] - -[[package]] -name = "ryu" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" - -[[package]] -name = "serde" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha2" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shared_child" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6be9f7d5565b1483af3e72975e2dee33879b3b86bd48c0929fccf6585d79e65a" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" - -[[package]] -name = "smol_str" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61d15c83e300cce35b7c8cd39ff567c1ef42dde6d4a1a38dbdbf9a59902261bd" -dependencies = [ - "serde", -] - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "syn" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - -[[package]] -name = "thiserror" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" -dependencies = [ - "once_cell", -] - -[[package]] -name = "tinyvec" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tracing" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c650a8ef0cd2dd93736f033d21cbd1224c5a967aa0c258d00fcf7dafef9b9f" -dependencies = [ - "cfg-if", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8276d9a4a3a558d7b7ad5303ad50b53d58264641b82914b7ada36bd762e7a716" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" -dependencies = [ - "lazy_static", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" -dependencies = [ - "ansi_term", - "sharded-slab", - "smallvec", - "thread_local", - "tracing-core", - "tracing-log", -] - -[[package]] -name = "typenum" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" - -[[package]] -name = "unicode-segmentation" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom", - "serde", -] - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasm-bindgen" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" - -[[package]] -name = "web-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/rust/deny.toml b/rust/deny.toml index 54a68a60..12a562ce 100644 --- a/rust/deny.toml +++ b/rust/deny.toml @@ -172,6 +172,12 @@ deny = [ ] # Certain crates/versions that will be skipped when doing duplicate detection. skip = [ + # duct, which we only depend on for integration tests in automerge-cli, + # pulls in a version of os_pipe which in turn pulls in a version of + # windows-sys which is different to the version in pulled in by is-terminal. + # This is fine to ignore for now because it doesn't end up in downstream + # dependencies. + { name = "windows-sys", version = "0.42.0" } ] # Similarly to `skip` allows you to skip certain crates during duplicate # detection. Unlike skip, it also includes the entire tree of transitive From da55dfac7ae3baa0892d98b64fcd41be61733c37 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Mon, 23 Jan 2023 18:30:54 +0000 Subject: [PATCH 53/72] refactor: make fields of Automerge private The fields of `automerge::Automerge` were crate public, which made it hard to change the structure of `Automerge` with confidence. Make all fields private and put them behind accessors where necessary to allow for easy internal changes. --- rust/automerge/src/autocommit.rs | 2 +- rust/automerge/src/automerge.rs | 65 +++++++++++++++++++---- rust/automerge/src/op_set/load.rs | 6 +-- rust/automerge/src/transaction/inner.rs | 69 ++++++++++++------------- 4 files changed, 92 insertions(+), 50 deletions(-) diff --git a/rust/automerge/src/autocommit.rs b/rust/automerge/src/autocommit.rs index 2c1c3adf..ae28596e 100644 --- a/rust/automerge/src/autocommit.rs +++ b/rust/automerge/src/autocommit.rs @@ -159,7 +159,7 @@ impl AutoCommitWithObs { /// /// This is a cheap operation, it just changes the way indexes are calculated pub fn with_encoding(mut self, encoding: TextEncoding) -> Self { - self.doc.text_encoding = encoding; + self.doc = self.doc.with_encoding(encoding); self } diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 86aa5f63..1b789337 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -71,26 +71,26 @@ pub(crate) enum Actor { #[derive(Debug, Clone)] pub struct Automerge { /// The list of unapplied changes that are not causally ready. - pub(crate) queue: Vec, + queue: Vec, /// The history of changes that form this document, topologically sorted too. - pub(crate) history: Vec, + history: Vec, /// Mapping from change hash to index into the history list. - pub(crate) history_index: HashMap, + history_index: HashMap, /// Mapping from change hash to vector clock at this state. - pub(crate) clocks: HashMap, + clocks: HashMap, /// Mapping from actor index to list of seqs seen for them. - pub(crate) states: HashMap>, + states: HashMap>, /// Current dependencies of this document (heads hashes). - pub(crate) deps: HashSet, + deps: HashSet, /// Heads at the last save. - pub(crate) saved: Vec, + saved: Vec, /// The set of operations that form this document. - pub(crate) ops: OpSet, + ops: OpSet, /// The current actor. - pub(crate) actor: Actor, + actor: Actor, /// The maximum operation counter this document has seen. - pub(crate) max_op: u64, - pub(crate) text_encoding: TextEncoding, + max_op: u64, + text_encoding: TextEncoding, } impl Automerge { @@ -111,6 +111,49 @@ impl Automerge { } } + pub(crate) fn ops_mut(&mut self) -> &mut OpSet { + &mut self.ops + } + + pub(crate) fn ops(&self) -> &OpSet { + &self.ops + } + + pub(crate) fn into_ops(self) -> OpSet { + self.ops + } + + pub(crate) fn actor_id(&self) -> &ActorId { + match &self.actor { + Actor::Unused(id) => id, + Actor::Cached(idx) => self.ops.m.actors.get(*idx), + } + } + + /// Remove the current actor from the opset if it has no ops + /// + /// If the current actor ID has no ops in the opset then remove it from the cache of actor IDs. + /// This us used when rolling back a transaction. If the rolled back ops are the only ops for + /// the current actor then we want to remove that actor from the opset so it doesn't end up in + /// any saved version of the document. + /// + /// # Panics + /// + /// If the last actor in the OpSet is not the actor ID of this document + pub(crate) fn rollback_last_actor(&mut self) { + if let Actor::Cached(actor_idx) = self.actor { + if self.states.get(&actor_idx).is_none() && self.ops.m.actors.len() > 0 { + assert!(self.ops.m.actors.len() == actor_idx + 1); + let actor = self.ops.m.actors.remove_last(); + self.actor = Actor::Unused(actor); + } + } + } + + pub(crate) fn text_encoding(&self) -> TextEncoding { + self.text_encoding + } + /// Change the text encoding of this view of the document /// /// This is a cheap operation, it just changes the way indexes are calculated diff --git a/rust/automerge/src/op_set/load.rs b/rust/automerge/src/op_set/load.rs index 6cc64e79..0df7f6ef 100644 --- a/rust/automerge/src/op_set/load.rs +++ b/rust/automerge/src/op_set/load.rs @@ -79,10 +79,10 @@ impl<'a, O: OpObserver> DocObserver for ObservedOpSetBuilder<'a, O> { } fn finish(self, _metadata: super::OpSetMetadata) -> Self::Output { - let mut opset = Automerge::new(); + let mut doc = Automerge::new(); for (obj, op) in self.ops { - opset.insert_op_with_observer(&obj, op, self.observer); + doc.insert_op_with_observer(&obj, op, self.observer); } - opset.ops + doc.into_ops() } } diff --git a/rust/automerge/src/transaction/inner.rs b/rust/automerge/src/transaction/inner.rs index 7e7db17d..95f922f3 100644 --- a/rust/automerge/src/transaction/inner.rs +++ b/rust/automerge/src/transaction/inner.rs @@ -98,7 +98,7 @@ impl TransactionInner { } let num_ops = self.pending_ops(); - let change = self.export(&doc.ops.m); + let change = self.export(&doc.ops().m); let hash = change.hash(); #[cfg(not(debug_assertions))] tracing::trace!(commit=?hash, deps=?change.deps(), "committing transaction"); @@ -153,20 +153,16 @@ impl TransactionInner { // remove in reverse order so sets are removed before makes etc... for (obj, op) in self.operations.into_iter().rev() { for pred_id in &op.pred { - if let Some(p) = doc.ops.search(&obj, OpIdSearch::new(*pred_id)).index() { - doc.ops.change_vis(&obj, p, |o| o.remove_succ(&op)); + if let Some(p) = doc.ops().search(&obj, OpIdSearch::new(*pred_id)).index() { + doc.ops_mut().change_vis(&obj, p, |o| o.remove_succ(&op)); } } - if let Some(pos) = doc.ops.search(&obj, OpIdSearch::new(op.id)).index() { - doc.ops.remove(&obj, pos); + if let Some(pos) = doc.ops().search(&obj, OpIdSearch::new(op.id)).index() { + doc.ops_mut().remove(&obj, pos); } } - // remove the actor from the cache so that it doesn't end up in the saved document - if doc.states.get(&self.actor).is_none() && doc.ops.m.actors.len() > 0 { - let actor = doc.ops.m.actors.remove_last(); - doc.actor = Actor::Unused(actor); - } + doc.rollback_last_actor(); num } @@ -277,10 +273,10 @@ impl TransactionInner { obj: ObjId, succ_pos: &[usize], ) { - doc.ops.add_succ(&obj, succ_pos, &op); + doc.ops_mut().add_succ(&obj, succ_pos, &op); if !op.is_delete() { - doc.ops.insert(pos, &obj, op.clone()); + doc.ops_mut().insert(pos, &obj, op.clone()); } self.finalize_op(doc, op_observer, obj, prop, op); @@ -332,7 +328,7 @@ impl TransactionInner { let id = self.next_id(); let query = doc - .ops + .ops() .search(&obj, query::InsertNth::new(index, ListEncoding::List)); let key = query.key()?; @@ -346,7 +342,7 @@ impl TransactionInner { insert: true, }; - doc.ops.insert(query.pos(), &obj, op.clone()); + doc.ops_mut().insert(query.pos(), &obj, op.clone()); self.finalize_op(doc, op_observer, obj, Prop::Seq(index), op); @@ -380,8 +376,8 @@ impl TransactionInner { } let id = self.next_id(); - let prop_index = doc.ops.m.props.cache(prop.clone()); - let query = doc.ops.search(&obj, query::Prop::new(prop_index)); + let prop_index = doc.ops_mut().m.props.cache(prop.clone()); + let query = doc.ops().search(&obj, query::Prop::new(prop_index)); // no key present to delete if query.ops.is_empty() && action == OpType::Delete { @@ -398,7 +394,7 @@ impl TransactionInner { return Err(AutomergeError::MissingCounter); } - let pred = doc.ops.m.sorted_opids(query.ops.iter().map(|o| o.id)); + let pred = doc.ops().m.sorted_opids(query.ops.iter().map(|o| o.id)); let op = Op { id, @@ -425,11 +421,11 @@ impl TransactionInner { action: OpType, ) -> Result, AutomergeError> { let query = doc - .ops + .ops() .search(&obj, query::Nth::new(index, ListEncoding::List)); let id = self.next_id(); - let pred = doc.ops.m.sorted_opids(query.ops.iter().map(|o| o.id)); + let pred = doc.ops().m.sorted_opids(query.ops.iter().map(|o| o.id)); let key = query.key()?; if query.ops.len() == 1 && query.ops[0].is_noop(&action) { @@ -490,7 +486,7 @@ impl TransactionInner { index, del: 1, values: vec![], - splice_type: SpliceType::Text("", doc.text_encoding), + splice_type: SpliceType::Text("", doc.text_encoding()), }, )?; } else { @@ -551,7 +547,7 @@ impl TransactionInner { index, del, values, - splice_type: SpliceType::Text(text, doc.text_encoding), + splice_type: SpliceType::Text(text, doc.text_encoding()), }, ) } @@ -568,13 +564,13 @@ impl TransactionInner { splice_type, }: SpliceArgs<'_>, ) -> Result<(), AutomergeError> { - let ex_obj = doc.ops.id_to_exid(obj.0); + let ex_obj = doc.ops().id_to_exid(obj.0); let encoding = splice_type.encoding(); // delete `del` items - performing the query for each one let mut deleted = 0; while deleted < del { // TODO: could do this with a single custom query - let query = doc.ops.search(&obj, query::Nth::new(index, encoding)); + let query = doc.ops().search(&obj, query::Nth::new(index, encoding)); // if we delete in the middle of a multi-character // move cursor back to the beginning and expand the del width @@ -590,9 +586,10 @@ impl TransactionInner { break; }; - let op = self.next_delete(query.key()?, query.pred(&doc.ops)); + let op = self.next_delete(query.key()?, query.pred(doc.ops())); - doc.ops.add_succ(&obj, &query.ops_pos, &op); + let ops_pos = query.ops_pos; + doc.ops_mut().add_succ(&obj, &ops_pos, &op); self.operations.push((obj, op)); @@ -608,7 +605,9 @@ impl TransactionInner { // do the insert query for the first item and then // insert the remaining ops one after the other if !values.is_empty() { - let query = doc.ops.search(&obj, query::InsertNth::new(index, encoding)); + let query = doc + .ops() + .search(&obj, query::InsertNth::new(index, encoding)); let mut pos = query.pos(); let mut key = query.key()?; let mut cursor = index; @@ -617,7 +616,7 @@ impl TransactionInner { for v in &values { let op = self.next_insert(key, v.clone()); - doc.ops.insert(pos, &obj, op.clone()); + doc.ops_mut().insert(pos, &obj, op.clone()); width = op.width(encoding); cursor += width; @@ -627,7 +626,7 @@ impl TransactionInner { self.operations.push((obj, op)); } - doc.ops.hint(&obj, cursor - width, pos - 1); + doc.ops_mut().hint(&obj, cursor - width, pos - 1); // handle the observer if let Some(obs) = op_observer.as_mut() { @@ -639,7 +638,7 @@ impl TransactionInner { let start = self.operations.len() - values.len(); for (offset, v) in values.iter().enumerate() { let op = &self.operations[start + offset].1; - let value = (v.clone().into(), doc.ops.id_to_exid(op.id)); + let value = (v.clone().into(), doc.ops().id_to_exid(op.id)); obs.insert(doc, ex_obj.clone(), index + offset, value) } } @@ -660,19 +659,19 @@ impl TransactionInner { ) { // TODO - id_to_exid should be a noop if not used - change type to Into? if let Some(op_observer) = op_observer { - let ex_obj = doc.ops.id_to_exid(obj.0); + let ex_obj = doc.ops().id_to_exid(obj.0); if op.insert { - let obj_type = doc.ops.object_type(&obj); + let obj_type = doc.ops().object_type(&obj); assert!(obj_type.unwrap().is_sequence()); match (obj_type, prop) { (Some(ObjType::List), Prop::Seq(index)) => { - let value = (op.value(), doc.ops.id_to_exid(op.id)); + let value = (op.value(), doc.ops().id_to_exid(op.id)); op_observer.insert(doc, ex_obj, index, value) } (Some(ObjType::Text), Prop::Seq(index)) => { // FIXME if op_observer.text_as_seq() { - let value = (op.value(), doc.ops.id_to_exid(op.id)); + let value = (op.value(), doc.ops().id_to_exid(op.id)); op_observer.insert(doc, ex_obj, index, value) } else { op_observer.splice_text(doc, ex_obj, index, op.to_str()) @@ -683,9 +682,9 @@ impl TransactionInner { } else if op.is_delete() { op_observer.delete(doc, ex_obj, prop); } else if let Some(value) = op.get_increment_value() { - op_observer.increment(doc, ex_obj, prop, (value, doc.ops.id_to_exid(op.id))); + op_observer.increment(doc, ex_obj, prop, (value, doc.ops().id_to_exid(op.id))); } else { - let value = (op.value(), doc.ops.id_to_exid(op.id)); + let value = (op.value(), doc.ops().id_to_exid(op.id)); op_observer.put(doc, ex_obj, prop, value, false); } } From c3c04128f5f1703007f650ea3104d98334334aab Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 26 Jan 2023 09:45:26 +0000 Subject: [PATCH 54/72] Only observe the current state on load Problem: When loading a document whilst passing an `OpObserver` we call the OpObserver for every change in the loaded document. This slows down the loading process for two reasons: 1) we have to make a call to the observer for every op 2) we cannot just stream the ops into the OpSet in topological order but must instead buffer them to pass to the observer. Solution: Construct the OpSet first, then only traverse the visible ops in the OpSet, calling the observer. For documents with a deep history this results in vastly fewer calls to the observer and also allows us to construct the OpSet much more quickly. It is slightly different semantically because the observer never gets notified of changes which are not visible, but that shouldn't matter to most observers. --- rust/automerge/Cargo.toml | 1 + rust/automerge/src/automerge.rs | 31 +- rust/automerge/src/automerge/current_state.rs | 890 ++++++++++++++++++ rust/automerge/src/op_set.rs | 55 +- rust/automerge/src/op_set/load.rs | 38 +- rust/automerge/src/storage/chunk.rs | 2 +- rust/automerge/src/sync.rs | 2 +- rust/automerge/src/transaction/inner.rs | 1 - rust/deny.toml | 3 + 9 files changed, 944 insertions(+), 79 deletions(-) create mode 100644 rust/automerge/src/automerge/current_state.rs diff --git a/rust/automerge/Cargo.toml b/rust/automerge/Cargo.toml index e5a9125d..0c10cc2b 100644 --- a/rust/automerge/Cargo.toml +++ b/rust/automerge/Cargo.toml @@ -47,6 +47,7 @@ criterion = "0.4.0" test-log = { version = "0.2.10", features=["trace"], default-features = false} tracing-subscriber = {version = "0.3.9", features = ["fmt", "env-filter"] } automerge-test = { path = "../automerge-test" } +prettytable = "0.10.0" [[bench]] name = "range" diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 1b789337..e0db8b5a 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -26,6 +26,8 @@ use crate::{ }; use serde::Serialize; +mod current_state; + #[cfg(test)] mod tests; @@ -119,17 +121,6 @@ impl Automerge { &self.ops } - pub(crate) fn into_ops(self) -> OpSet { - self.ops - } - - pub(crate) fn actor_id(&self) -> &ActorId { - match &self.actor { - Actor::Unused(id) => id, - Actor::Cached(idx) => self.ops.m.actors.get(*idx), - } - } - /// Remove the current actor from the opset if it has no ops /// /// If the current actor ID has no ops in the opset then remove it from the cache of actor IDs. @@ -455,13 +446,8 @@ impl Automerge { result: op_set, changes, heads, - } = match &mut observer { - Some(o) => { - storage::load::reconstruct_document(&d, mode, OpSet::observed_builder(*o)) - } - None => storage::load::reconstruct_document(&d, mode, OpSet::builder()), - } - .map_err(|e| load::Error::InflateDocument(Box::new(e)))?; + } = storage::load::reconstruct_document(&d, mode, OpSet::builder()) + .map_err(|e| load::Error::InflateDocument(Box::new(e)))?; let mut hashes_by_index = HashMap::new(); let mut actor_to_history: HashMap> = HashMap::new(); let mut clocks = Clocks::new(); @@ -517,6 +503,9 @@ impl Automerge { } load::LoadedChanges::Partial { error, .. } => return Err(error.into()), } + if let Some(observer) = &mut observer { + current_state::observe_current_state(&am, *observer); + } Ok(am) } @@ -715,7 +704,7 @@ impl Automerge { let c = self.history.iter(); let bytes = crate::storage::save::save_document( c, - self.ops.iter(), + self.ops.iter().map(|(objid, _, op)| (objid, op)), &self.ops.m.actors, &self.ops.m.props, &heads, @@ -731,7 +720,7 @@ impl Automerge { let c = self.history.iter(); let bytes = crate::storage::save::save_document( c, - self.ops.iter(), + self.ops.iter().map(|(objid, _, op)| (objid, op)), &self.ops.m.actors, &self.ops.m.props, &heads, @@ -944,7 +933,7 @@ impl Automerge { "pred", "succ" ); - for (obj, op) in self.ops.iter() { + for (obj, _, op) in self.ops.iter() { let id = self.to_string(op.id); let obj = self.to_string(obj); let key = match op.key { diff --git a/rust/automerge/src/automerge/current_state.rs b/rust/automerge/src/automerge/current_state.rs new file mode 100644 index 00000000..1c1bceed --- /dev/null +++ b/rust/automerge/src/automerge/current_state.rs @@ -0,0 +1,890 @@ +use std::{borrow::Cow, collections::HashSet, iter::Peekable}; + +use itertools::Itertools; + +use crate::{ + types::{ElemId, Key, ListEncoding, ObjId, Op, OpId}, + ObjType, OpObserver, OpType, ScalarValue, Value, +}; + +/// Traverse the "current" state of the document, notifying `observer` +/// +/// The "current" state of the document is the set of visible operations. This function will +/// traverse that set of operations and call the corresponding methods on the `observer` as it +/// encounters values. The `observer` methods will be called in the order in which they appear in +/// the document. That is to say that the observer will be notified of parent objects before the +/// objects they contain and elements of a sequence will be notified in the order they occur. +/// +/// Due to only notifying of visible operations the observer will only be called with `put`, +/// `insert`, and `splice`, operations. +pub(super) fn observe_current_state(doc: &crate::Automerge, observer: &mut O) { + // The OpSet already exposes operations in the order they appear in the document. + // `OpSet::iter_objs` iterates over the objects in causal order, this means that parent objects + // will always appear before their children. Furthermore, the operations within each object are + // ordered by key (which means by their position in a sequence for sequences). + // + // Effectively then we iterate over each object, then we group the operations in the object by + // key and for each key find the visible operations for that key. Then we notify the observer + // for each of those visible operations. + let mut visible_objs = HashSet::new(); + visible_objs.insert(ObjId::root()); + for (obj, typ, ops) in doc.ops().iter_objs() { + if !visible_objs.contains(obj) { + continue; + } + let ops_by_key = ops.group_by(|o| o.key); + let actions = ops_by_key + .into_iter() + .flat_map(|(key, key_ops)| key_actions(key, key_ops)); + if typ == ObjType::Text && !observer.text_as_seq() { + track_new_objs_and_notify( + &mut visible_objs, + doc, + obj, + typ, + observer, + text_actions(actions), + ) + } else if typ == ObjType::List { + track_new_objs_and_notify( + &mut visible_objs, + doc, + obj, + typ, + observer, + list_actions(actions), + ) + } else { + track_new_objs_and_notify(&mut visible_objs, doc, obj, typ, observer, actions) + } + } +} + +fn track_new_objs_and_notify, O: OpObserver>( + visible_objs: &mut HashSet, + doc: &crate::Automerge, + obj: &ObjId, + typ: ObjType, + observer: &mut O, + actions: I, +) { + let exid = doc.id_to_exid(obj.0); + for action in actions { + if let Some(obj) = action.made_object() { + visible_objs.insert(obj); + } + action.notify_observer(doc, &exid, obj, typ, observer); + } +} + +trait Action { + /// Notify an observer of whatever this action does + fn notify_observer( + self, + doc: &crate::Automerge, + exid: &crate::ObjId, + obj: &ObjId, + typ: ObjType, + observer: &mut O, + ); + + /// If this action created an object, return the ID of that object + fn made_object(&self) -> Option; +} + +fn key_actions<'a, I: Iterator>( + key: Key, + key_ops: I, +) -> impl Iterator> { + #[derive(Clone)] + enum CurrentOp<'a> { + Put { + value: Value<'a>, + id: OpId, + conflicted: bool, + }, + Insert(Value<'a>, OpId), + } + let current_ops = key_ops + .filter(|o| o.visible()) + .filter_map(|o| match o.action { + OpType::Make(obj_type) => { + let value = Value::Object(obj_type); + if o.insert { + Some(CurrentOp::Insert(value, o.id)) + } else { + Some(CurrentOp::Put { + value, + id: o.id, + conflicted: false, + }) + } + } + OpType::Put(ref value) => { + let value = Value::Scalar(Cow::Borrowed(value)); + if o.insert { + Some(CurrentOp::Insert(value, o.id)) + } else { + Some(CurrentOp::Put { + value, + id: o.id, + conflicted: false, + }) + } + } + _ => None, + }); + current_ops + .coalesce(|previous, current| match (previous, current) { + (CurrentOp::Put { .. }, CurrentOp::Put { value, id, .. }) => Ok(CurrentOp::Put { + value, + id, + conflicted: true, + }), + (previous, current) => Err((previous, current)), + }) + .map(move |op| match op { + CurrentOp::Put { + value, + id, + conflicted, + } => SimpleAction::Put { + prop: key, + tagged_value: (value, id), + conflict: conflicted, + }, + CurrentOp::Insert(val, id) => SimpleAction::Insert { + elem_id: ElemId(id), + tagged_value: (val, id), + }, + }) +} + +/// Either a "put" or "insert" action. i.e. not splicing for text values +enum SimpleAction<'a> { + Put { + prop: Key, + tagged_value: (Value<'a>, OpId), + conflict: bool, + }, + Insert { + elem_id: ElemId, + tagged_value: (Value<'a>, OpId), + }, +} + +impl<'a> Action for SimpleAction<'a> { + fn notify_observer( + self, + doc: &crate::Automerge, + exid: &crate::ObjId, + obj: &ObjId, + typ: ObjType, + observer: &mut O, + ) { + let encoding = match typ { + ObjType::Text => ListEncoding::Text(doc.text_encoding()), + _ => ListEncoding::List, + }; + match self { + Self::Put { + prop, + tagged_value, + conflict, + } => { + let tagged_value = (tagged_value.0, doc.id_to_exid(tagged_value.1)); + let prop = doc.ops().export_key(*obj, prop, encoding).unwrap(); + observer.put(doc, exid.clone(), prop, tagged_value, conflict); + } + Self::Insert { + elem_id, + tagged_value: (value, opid), + } => { + let index = doc + .ops() + .search(obj, crate::query::ElemIdPos::new(elem_id, encoding)) + .index() + .unwrap(); + let tagged_value = (value, doc.id_to_exid(opid)); + observer.insert(doc, doc.id_to_exid(obj.0), index, tagged_value); + } + } + } + + fn made_object(&self) -> Option { + match self { + Self::Put { + tagged_value: (Value::Object(_), id), + .. + } => Some((*id).into()), + Self::Insert { + tagged_value: (Value::Object(_), id), + .. + } => Some((*id).into()), + _ => None, + } + } +} + +/// An `Action` which splices for text values +enum TextAction<'a> { + Action(SimpleAction<'a>), + Splice { start: ElemId, chars: String }, +} + +impl<'a> Action for TextAction<'a> { + fn notify_observer( + self, + doc: &crate::Automerge, + exid: &crate::ObjId, + obj: &ObjId, + typ: ObjType, + observer: &mut O, + ) { + match self { + Self::Action(action) => action.notify_observer(doc, exid, obj, typ, observer), + Self::Splice { start, chars } => { + let index = doc + .ops() + .search( + obj, + crate::query::ElemIdPos::new( + start, + ListEncoding::Text(doc.text_encoding()), + ), + ) + .index() + .unwrap(); + observer.splice_text(doc, doc.id_to_exid(obj.0), index, chars.as_str()); + } + } + } + + fn made_object(&self) -> Option { + match self { + Self::Action(action) => action.made_object(), + _ => None, + } + } +} + +fn list_actions<'a, I: Iterator>>( + actions: I, +) -> impl Iterator> { + actions.map(|a| match a { + SimpleAction::Put { + prop: Key::Seq(elem_id), + tagged_value, + .. + } => SimpleAction::Insert { + elem_id, + tagged_value, + }, + a => a, + }) +} + +/// Condense consecutive `SimpleAction::Insert` actions into one `TextAction::Splice` +fn text_actions<'a, I>(actions: I) -> impl Iterator> +where + I: Iterator>, +{ + TextActions { + ops: actions.peekable(), + } +} + +struct TextActions<'a, I: Iterator>> { + ops: Peekable, +} + +impl<'a, I: Iterator>> Iterator for TextActions<'a, I> { + type Item = TextAction<'a>; + + fn next(&mut self) -> Option { + if let Some(SimpleAction::Insert { .. }) = self.ops.peek() { + let (start, value) = match self.ops.next() { + Some(SimpleAction::Insert { + tagged_value: (value, opid), + .. + }) => (opid, value), + _ => unreachable!(), + }; + let mut chars = match value { + Value::Scalar(Cow::Borrowed(ScalarValue::Str(s))) => s.to_string(), + _ => "\u{fffc}".to_string(), + }; + while let Some(SimpleAction::Insert { .. }) = self.ops.peek() { + if let Some(SimpleAction::Insert { + tagged_value: (value, _), + .. + }) = self.ops.next() + { + match value { + Value::Scalar(Cow::Borrowed(ScalarValue::Str(s))) => chars.push_str(s), + _ => chars.push('\u{fffc}'), + } + } + } + Some(TextAction::Splice { + start: ElemId(start), + chars, + }) + } else { + self.ops.next().map(TextAction::Action) + } + } +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + + use crate::{transaction::Transactable, ObjType, OpObserver, Prop, ReadDoc, Value}; + + // Observer ops often carry a "tagged value", which is a value and the OpID of the op which + // created that value. For a lot of values (i.e. any scalar value) we don't care about the + // opid. This type implements `PartialEq` for the `Untagged` variant by ignoring the tag, which + // allows us to express tests which don't care about the tag. + #[derive(Clone, Debug)] + enum ObservedValue { + Tagged(crate::Value<'static>, crate::ObjId), + Untagged(crate::Value<'static>), + } + + impl<'a> From<(Value<'a>, crate::ObjId)> for ObservedValue { + fn from(value: (Value<'a>, crate::ObjId)) -> Self { + Self::Tagged(value.0.into_owned(), value.1) + } + } + + impl PartialEq for ObservedValue { + fn eq(&self, other: &ObservedValue) -> bool { + match (self, other) { + (Self::Tagged(v1, o1), Self::Tagged(v2, o2)) => equal_vals(v1, v2) && o1 == o2, + (Self::Untagged(v1), Self::Untagged(v2)) => equal_vals(v1, v2), + (Self::Tagged(v1, _), Self::Untagged(v2)) => equal_vals(v1, v2), + (Self::Untagged(v1), Self::Tagged(v2, _)) => equal_vals(v1, v2), + } + } + } + + /// Consider counters equal if they have the same current value + fn equal_vals(v1: &Value<'_>, v2: &Value<'_>) -> bool { + match (v1, v2) { + (Value::Scalar(v1), Value::Scalar(v2)) => match (v1.as_ref(), v2.as_ref()) { + (crate::ScalarValue::Counter(c1), crate::ScalarValue::Counter(c2)) => { + c1.current == c2.current + } + _ => v1 == v2, + }, + _ => v1 == v2, + } + } + + #[derive(Debug, Clone, PartialEq)] + enum ObserverCall { + Put { + obj: crate::ObjId, + prop: Prop, + value: ObservedValue, + conflict: bool, + }, + Insert { + obj: crate::ObjId, + index: usize, + value: ObservedValue, + }, + SpliceText { + obj: crate::ObjId, + index: usize, + chars: String, + }, + } + + // A Vec is pretty hard to look at in a test failure. This wrapper prints the + // calls out in a nice table so it's easier to see what's different + #[derive(Clone, PartialEq)] + struct Calls(Vec); + + impl std::fmt::Debug for Calls { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut table = prettytable::Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); + table.set_titles(prettytable::row![ + "Op", "Object", "Property", "Value", "Conflict" + ]); + for call in &self.0 { + match call { + ObserverCall::Put { + obj, + prop, + value, + conflict, + } => { + table.add_row(prettytable::row![ + "Put", + format!("{}", obj), + prop, + match value { + ObservedValue::Tagged(v, o) => format!("{} ({})", v, o), + ObservedValue::Untagged(v) => format!("{}", v), + }, + conflict + ]); + } + ObserverCall::Insert { obj, index, value } => { + table.add_row(prettytable::row![ + "Insert", + format!("{}", obj), + index, + match value { + ObservedValue::Tagged(v, o) => format!("{} ({})", v, o), + ObservedValue::Untagged(v) => format!("{}", v), + }, + "" + ]); + } + ObserverCall::SpliceText { obj, index, chars } => { + table.add_row(prettytable::row![ + "SpliceText", + format!("{}", obj), + index, + chars, + "" + ]); + } + } + } + let mut out = Vec::new(); + table.print(&mut out).unwrap(); + write!(f, "\n{}\n", String::from_utf8(out).unwrap()) + } + } + + struct ObserverStub { + ops: Vec, + text_as_seq: bool, + } + + impl ObserverStub { + fn new() -> Self { + Self { + ops: Vec::new(), + text_as_seq: true, + } + } + + fn new_text_v2() -> Self { + Self { + ops: Vec::new(), + text_as_seq: false, + } + } + } + + impl OpObserver for ObserverStub { + fn insert( + &mut self, + _doc: &R, + objid: crate::ObjId, + index: usize, + tagged_value: (crate::Value<'_>, crate::ObjId), + ) { + self.ops.push(ObserverCall::Insert { + obj: objid, + index, + value: tagged_value.into(), + }); + } + + fn splice_text( + &mut self, + _doc: &R, + objid: crate::ObjId, + index: usize, + value: &str, + ) { + self.ops.push(ObserverCall::SpliceText { + obj: objid, + index, + chars: value.to_string(), + }); + } + + fn put( + &mut self, + _doc: &R, + objid: crate::ObjId, + prop: crate::Prop, + tagged_value: (crate::Value<'_>, crate::ObjId), + conflict: bool, + ) { + self.ops.push(ObserverCall::Put { + obj: objid, + prop, + value: tagged_value.into(), + conflict, + }); + } + + fn expose( + &mut self, + _doc: &R, + _objid: crate::ObjId, + _prop: crate::Prop, + _tagged_value: (crate::Value<'_>, crate::ObjId), + _conflict: bool, + ) { + panic!("expose not expected"); + } + + fn increment( + &mut self, + _doc: &R, + _objid: crate::ObjId, + _prop: crate::Prop, + _tagged_value: (i64, crate::ObjId), + ) { + panic!("increment not expected"); + } + + fn delete_map(&mut self, _doc: &R, _objid: crate::ObjId, _key: &str) { + panic!("delete not expected"); + } + + fn delete_seq( + &mut self, + _doc: &R, + _objid: crate::ObjId, + _index: usize, + _num: usize, + ) { + panic!("delete not expected"); + } + + fn text_as_seq(&self) -> bool { + self.text_as_seq + } + } + + #[test] + fn basic_test() { + let mut doc = crate::AutoCommit::new(); + doc.put(crate::ROOT, "key", "value").unwrap(); + let map = doc.put_object(crate::ROOT, "map", ObjType::Map).unwrap(); + doc.put(&map, "nested_key", "value").unwrap(); + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + doc.insert(&list, 0, "value").unwrap(); + let text = doc.put_object(crate::ROOT, "text", ObjType::Text).unwrap(); + doc.insert(&text, 0, "a").unwrap(); + + let mut obs = ObserverStub::new(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "key".into(), + value: ObservedValue::Untagged("value".into()), + conflict: false, + }, + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Put { + obj: crate::ROOT, + prop: "map".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::Map), map.clone()), + conflict: false, + }, + ObserverCall::Put { + obj: crate::ROOT, + prop: "text".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::Text), text.clone()), + conflict: false, + }, + ObserverCall::Put { + obj: map.clone(), + prop: "nested_key".into(), + value: ObservedValue::Untagged("value".into()), + conflict: false, + }, + ObserverCall::Insert { + obj: list, + index: 0, + value: ObservedValue::Untagged("value".into()), + }, + ObserverCall::Insert { + obj: text, + index: 0, + value: ObservedValue::Untagged("a".into()), + }, + ]) + ); + } + + #[test] + fn test_deleted_ops_omitted() { + let mut doc = crate::AutoCommit::new(); + doc.put(crate::ROOT, "key", "value").unwrap(); + doc.delete(crate::ROOT, "key").unwrap(); + let map = doc.put_object(crate::ROOT, "map", ObjType::Map).unwrap(); + doc.put(&map, "nested_key", "value").unwrap(); + doc.delete(&map, "nested_key").unwrap(); + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + doc.insert(&list, 0, "value").unwrap(); + doc.delete(&list, 0).unwrap(); + let text = doc.put_object(crate::ROOT, "text", ObjType::Text).unwrap(); + doc.insert(&text, 0, "a").unwrap(); + doc.delete(&text, 0).unwrap(); + + doc.put_object(crate::ROOT, "deleted_map", ObjType::Map) + .unwrap(); + doc.delete(crate::ROOT, "deleted_map").unwrap(); + doc.put_object(crate::ROOT, "deleted_list", ObjType::List) + .unwrap(); + doc.delete(crate::ROOT, "deleted_list").unwrap(); + doc.put_object(crate::ROOT, "deleted_text", ObjType::Text) + .unwrap(); + doc.delete(crate::ROOT, "deleted_text").unwrap(); + + let mut obs = ObserverStub::new(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Put { + obj: crate::ROOT, + prop: "map".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::Map), map.clone()), + conflict: false, + }, + ObserverCall::Put { + obj: crate::ROOT, + prop: "text".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::Text), text.clone()), + conflict: false, + }, + ]) + ); + } + + #[test] + fn test_text_spliced() { + let mut doc = crate::AutoCommit::new(); + let text = doc.put_object(crate::ROOT, "text", ObjType::Text).unwrap(); + doc.insert(&text, 0, "a").unwrap(); + doc.splice_text(&text, 1, 0, "bcdef").unwrap(); + doc.splice_text(&text, 2, 2, "g").unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "text".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::Text), text.clone()), + conflict: false, + }, + ObserverCall::SpliceText { + obj: text, + index: 0, + chars: "abgef".to_string() + } + ]) + ); + } + + #[test] + fn test_counters() { + let actor1 = crate::ActorId::from("aa".as_bytes()); + let actor2 = crate::ActorId::from("bb".as_bytes()); + let mut doc = crate::AutoCommit::new().with_actor(actor2); + + let mut doc2 = doc.fork().with_actor(actor1); + doc2.put(crate::ROOT, "key", "someval").unwrap(); + + doc.put(crate::ROOT, "key", crate::ScalarValue::Counter(1.into())) + .unwrap(); + doc.increment(crate::ROOT, "key", 2).unwrap(); + doc.increment(crate::ROOT, "key", 3).unwrap(); + + doc.merge(&mut doc2).unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ObserverCall::Put { + obj: crate::ROOT, + prop: "key".into(), + value: ObservedValue::Untagged(Value::Scalar(Cow::Owned( + crate::ScalarValue::Counter(6.into()) + ))), + conflict: true, + },]) + ); + } + + #[test] + fn test_multiple_list_insertions() { + let mut doc = crate::AutoCommit::new(); + + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + doc.insert(&list, 0, 1).unwrap(); + doc.insert(&list, 1, 2).unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Insert { + obj: list.clone(), + index: 0, + value: ObservedValue::Untagged(1.into()), + }, + ObserverCall::Insert { + obj: list, + index: 1, + value: ObservedValue::Untagged(2.into()), + }, + ]) + ); + } + + #[test] + fn test_concurrent_insertions_at_same_index() { + let mut doc = crate::AutoCommit::new().with_actor(crate::ActorId::from("aa".as_bytes())); + + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + + let mut doc2 = doc.fork().with_actor(crate::ActorId::from("bb".as_bytes())); + + doc.insert(&list, 0, 1).unwrap(); + doc2.insert(&list, 0, 2).unwrap(); + doc.merge(&mut doc2).unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Insert { + obj: list.clone(), + index: 0, + value: ObservedValue::Untagged(2.into()), + }, + ObserverCall::Insert { + obj: list, + index: 1, + value: ObservedValue::Untagged(1.into()), + }, + ]) + ); + } + + #[test] + fn test_insert_objects() { + let mut doc = crate::AutoCommit::new().with_actor(crate::ActorId::from("aa".as_bytes())); + + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + + let map = doc.insert_object(&list, 0, ObjType::Map).unwrap(); + doc.put(&map, "key", "value").unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Insert { + obj: list.clone(), + index: 0, + value: ObservedValue::Tagged(Value::Object(ObjType::Map), map.clone()), + }, + ObserverCall::Put { + obj: map, + prop: "key".into(), + value: ObservedValue::Untagged("value".into()), + conflict: false + }, + ]) + ); + } + + #[test] + fn test_insert_and_update() { + let mut doc = crate::AutoCommit::new(); + + let list = doc.put_object(crate::ROOT, "list", ObjType::List).unwrap(); + + doc.insert(&list, 0, "one").unwrap(); + doc.insert(&list, 1, "two").unwrap(); + doc.put(&list, 0, "three").unwrap(); + doc.put(&list, 1, "four").unwrap(); + + let mut obs = ObserverStub::new_text_v2(); + super::observe_current_state(doc.document(), &mut obs); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ + ObserverCall::Put { + obj: crate::ROOT, + prop: "list".into(), + value: ObservedValue::Tagged(Value::Object(ObjType::List), list.clone()), + conflict: false, + }, + ObserverCall::Insert { + obj: list.clone(), + index: 0, + value: ObservedValue::Untagged("three".into()), + }, + ObserverCall::Insert { + obj: list.clone(), + index: 1, + value: ObservedValue::Untagged("four".into()), + }, + ]) + ); + } +} diff --git a/rust/automerge/src/op_set.rs b/rust/automerge/src/op_set.rs index 5b50d2b0..aab8ce74 100644 --- a/rust/automerge/src/op_set.rs +++ b/rust/automerge/src/op_set.rs @@ -5,7 +5,7 @@ use crate::op_tree::{self, OpTree}; use crate::parents::Parents; use crate::query::{self, OpIdVisSearch, TreeQuery}; use crate::types::{self, ActorId, Key, ListEncoding, ObjId, Op, OpId, OpIds, OpType, Prop}; -use crate::{ObjType, OpObserver}; +use crate::ObjType; use fxhash::FxBuildHasher; use std::borrow::Borrow; use std::cmp::Ordering; @@ -13,7 +13,7 @@ use std::collections::HashMap; use std::ops::RangeBounds; mod load; -pub(crate) use load::{ObservedOpSetBuilder, OpSetBuilder}; +pub(crate) use load::OpSetBuilder; pub(crate) type OpSet = OpSetInternal; @@ -32,12 +32,6 @@ impl OpSetInternal { OpSetBuilder::new() } - /// Create a builder which passes each operation to `observer`. This will be significantly - /// slower than `OpSetBuilder` - pub(crate) fn observed_builder(observer: &mut O) -> ObservedOpSetBuilder<'_, O> { - ObservedOpSetBuilder::new(observer) - } - pub(crate) fn new() -> Self { let mut trees: HashMap<_, _, _> = Default::default(); trees.insert(ObjId::root(), OpTree::new()); @@ -64,7 +58,7 @@ impl OpSetInternal { } pub(crate) fn iter(&self) -> Iter<'_> { - let mut objs: Vec<_> = self.trees.iter().collect(); + let mut objs: Vec<_> = self.trees.iter().map(|t| (t.0, t.1.objtype, t.1)).collect(); objs.sort_by(|a, b| self.m.lamport_cmp((a.0).0, (b.0).0)); Iter { opset: self, @@ -73,6 +67,17 @@ impl OpSetInternal { } } + /// Iterate over objects in the opset in causal order + pub(crate) fn iter_objs( + &self, + ) -> impl Iterator)> + '_ { + let mut objs: Vec<_> = self.trees.iter().map(|t| (t.0, t.1.objtype, t.1)).collect(); + objs.sort_by(|a, b| self.m.lamport_cmp((a.0).0, (b.0).0)); + IterObjs { + trees: objs.into_iter(), + } + } + pub(crate) fn parents(&self, obj: ObjId) -> Parents<'_> { Parents { obj, ops: self } } @@ -286,7 +291,7 @@ impl Default for OpSetInternal { } impl<'a> IntoIterator for &'a OpSetInternal { - type Item = (&'a ObjId, &'a Op); + type Item = (&'a ObjId, ObjType, &'a Op); type IntoIter = Iter<'a>; @@ -295,27 +300,41 @@ impl<'a> IntoIterator for &'a OpSetInternal { } } +pub(crate) struct IterObjs<'a> { + trees: std::vec::IntoIter<(&'a ObjId, ObjType, &'a op_tree::OpTree)>, +} + +impl<'a> Iterator for IterObjs<'a> { + type Item = (&'a ObjId, ObjType, op_tree::OpTreeIter<'a>); + + fn next(&mut self) -> Option { + self.trees + .next() + .map(|(id, typ, tree)| (id, typ, tree.iter())) + } +} + #[derive(Clone)] pub(crate) struct Iter<'a> { opset: &'a OpSet, - trees: std::vec::IntoIter<(&'a ObjId, &'a op_tree::OpTree)>, - current: Option<(&'a ObjId, op_tree::OpTreeIter<'a>)>, + trees: std::vec::IntoIter<(&'a ObjId, ObjType, &'a op_tree::OpTree)>, + current: Option<(&'a ObjId, ObjType, op_tree::OpTreeIter<'a>)>, } impl<'a> Iterator for Iter<'a> { - type Item = (&'a ObjId, &'a Op); + type Item = (&'a ObjId, ObjType, &'a Op); fn next(&mut self) -> Option { - if let Some((id, tree)) = &mut self.current { + if let Some((id, typ, tree)) = &mut self.current { if let Some(next) = tree.next() { - return Some((id, next)); + return Some((id, *typ, next)); } } loop { - self.current = self.trees.next().map(|o| (o.0, o.1.iter())); - if let Some((obj, tree)) = &mut self.current { + self.current = self.trees.next().map(|o| (o.0, o.1, o.2.iter())); + if let Some((obj, typ, tree)) = &mut self.current { if let Some(next) = tree.next() { - return Some((obj, next)); + return Some((obj, *typ, next)); } } else { return None; diff --git a/rust/automerge/src/op_set/load.rs b/rust/automerge/src/op_set/load.rs index 0df7f6ef..e14f46b7 100644 --- a/rust/automerge/src/op_set/load.rs +++ b/rust/automerge/src/op_set/load.rs @@ -6,8 +6,7 @@ use super::{OpSet, OpTree}; use crate::{ op_tree::OpTreeInternal, storage::load::{DocObserver, LoadedObject}, - types::{ObjId, Op}, - Automerge, OpObserver, + types::ObjId, }; /// An opset builder which creates an optree for each object as it finishes loading, inserting the @@ -51,38 +50,3 @@ impl DocObserver for OpSetBuilder { } } } - -/// A DocObserver which just accumulates ops until the document has finished reconstructing and -/// then inserts all of the ops using `OpSet::insert_op_with_observer` -pub(crate) struct ObservedOpSetBuilder<'a, O: OpObserver> { - observer: &'a mut O, - ops: Vec<(ObjId, Op)>, -} - -impl<'a, O: OpObserver> ObservedOpSetBuilder<'a, O> { - pub(crate) fn new(observer: &'a mut O) -> Self { - Self { - observer, - ops: Vec::new(), - } - } -} - -impl<'a, O: OpObserver> DocObserver for ObservedOpSetBuilder<'a, O> { - type Output = OpSet; - - fn object_loaded(&mut self, object: LoadedObject) { - self.ops.reserve(object.ops.len()); - for op in object.ops { - self.ops.push((object.id, op)); - } - } - - fn finish(self, _metadata: super::OpSetMetadata) -> Self::Output { - let mut doc = Automerge::new(); - for (obj, op) in self.ops { - doc.insert_op_with_observer(&obj, op, self.observer); - } - doc.into_ops() - } -} diff --git a/rust/automerge/src/storage/chunk.rs b/rust/automerge/src/storage/chunk.rs index 06e31973..d0048528 100644 --- a/rust/automerge/src/storage/chunk.rs +++ b/rust/automerge/src/storage/chunk.rs @@ -286,7 +286,7 @@ impl Header { fn hash(typ: ChunkType, data: &[u8]) -> ChangeHash { let mut out = vec![u8::from(typ)]; leb128::write::unsigned(&mut out, data.len() as u64).unwrap(); - out.extend(data.as_ref()); + out.extend(data); let hash_result = Sha256::digest(out); let array: [u8; 32] = hash_result.into(); ChangeHash(array) diff --git a/rust/automerge/src/sync.rs b/rust/automerge/src/sync.rs index 5d71d989..d3b6b3fa 100644 --- a/rust/automerge/src/sync.rs +++ b/rust/automerge/src/sync.rs @@ -524,7 +524,7 @@ impl Message { encode_many(&mut buf, self.changes.iter_mut(), |buf, change| { leb128::write::unsigned(buf, change.raw_bytes().len() as u64).unwrap(); - buf.extend(change.raw_bytes().as_ref()) + buf.extend::<&[u8]>(change.raw_bytes().as_ref()) }); buf diff --git a/rust/automerge/src/transaction/inner.rs b/rust/automerge/src/transaction/inner.rs index 95f922f3..0fe735d5 100644 --- a/rust/automerge/src/transaction/inner.rs +++ b/rust/automerge/src/transaction/inner.rs @@ -1,6 +1,5 @@ use std::num::NonZeroU64; -use crate::automerge::Actor; use crate::exid::ExId; use crate::query::{self, OpIdSearch}; use crate::storage::Change as StoredChange; diff --git a/rust/deny.toml b/rust/deny.toml index 12a562ce..473cdae8 100644 --- a/rust/deny.toml +++ b/rust/deny.toml @@ -110,6 +110,9 @@ exceptions = [ # should be revied more fully before release { allow = ["MPL-2.0"], name = "cbindgen" }, { allow = ["BSD-3-Clause"], name = "instant" }, + + # we only use prettytable in tests + { allow = ["BSD-3-Clause"], name = "prettytable" }, ] # Some crates don't have (easily) machine readable licensing information, From 1e33c9d9e0eb33e32dfffe5dd4045aac85822e6a Mon Sep 17 00:00:00 2001 From: Alex Good Date: Wed, 1 Feb 2023 18:08:22 +0000 Subject: [PATCH 55/72] Use Automerge::load instead of load_incremental if empty Problem: when running the sync protocol for a new document the API requires that the user create an empty document and then call `receive_sync_message` on that document. This results in the OpObserver for the new document being called with every single op in the document history. For documents with a large history this can be extremely time consuming, but the OpObserver doesn't need to know about all the hidden states. Solution: Modify `Automerge::load_with` and `Automerge::apply_changes_with` to check if the document is empty before applying changes. If the document _is_ empty then we don't call the observer for every change, but instead use `automerge::observe_current_state` to notify the observer of the new state once all the changes have been applied. --- javascript/test/legacy_tests.ts | 3 +- rust/automerge/src/automerge.rs | 71 +++++++++++++++++++++++++-- rust/automerge/src/automerge/tests.rs | 5 ++ rust/automerge/src/lib.rs | 2 +- 4 files changed, 73 insertions(+), 8 deletions(-) diff --git a/javascript/test/legacy_tests.ts b/javascript/test/legacy_tests.ts index 90c731d9..8c2e552e 100644 --- a/javascript/test/legacy_tests.ts +++ b/javascript/test/legacy_tests.ts @@ -1849,9 +1849,8 @@ describe("Automerge", () => { }) assert.deepStrictEqual(patches, [ { action: "put", path: ["birds"], value: [] }, - { action: "insert", path: ["birds", 0], values: [""] }, + { action: "insert", path: ["birds", 0], values: ["", ""] }, { action: "splice", path: ["birds", 0, 0], value: "Goldfinch" }, - { action: "insert", path: ["birds", 1], values: [""] }, { action: "splice", path: ["birds", 1, 0], value: "Chaffinch" }, ]) }) diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index e0db8b5a..a7223c7c 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -37,6 +37,15 @@ pub(crate) enum Actor { Cached(usize), } +/// What to do when loading a document partially succeeds +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum OnPartialLoad { + /// Ignore the error and return the loaded changes + Ignore, + /// Fail the entire load + Error, +} + /// An automerge document which does not manage transactions for you. /// /// ## Creating, loading, merging and forking documents @@ -121,6 +130,18 @@ impl Automerge { &self.ops } + /// Whether this document has any operations + pub fn is_empty(&self) -> bool { + self.history.is_empty() && self.queue.is_empty() + } + + pub(crate) fn actor_id(&self) -> ActorId { + match &self.actor { + Actor::Unused(id) => id.clone(), + Actor::Cached(idx) => self.ops.m.actors[*idx].clone(), + } + } + /// Remove the current actor from the opset if it has no ops /// /// If the current actor ID has no ops in the opset then remove it from the cache of actor IDs. @@ -410,20 +431,26 @@ impl Automerge { /// Load a document. pub fn load(data: &[u8]) -> Result { - Self::load_with::<()>(data, VerificationMode::Check, None) + Self::load_with::<()>(data, OnPartialLoad::Error, VerificationMode::Check, None) } /// Load a document without verifying the head hashes /// /// This is useful for debugging as it allows you to examine a corrupted document. pub fn load_unverified_heads(data: &[u8]) -> Result { - Self::load_with::<()>(data, VerificationMode::DontCheck, None) + Self::load_with::<()>( + data, + OnPartialLoad::Error, + VerificationMode::DontCheck, + None, + ) } /// Load a document with an observer #[tracing::instrument(skip(data, observer), err)] pub fn load_with( data: &[u8], + on_error: OnPartialLoad, mode: VerificationMode, mut observer: Option<&mut Obs>, ) -> Result { @@ -501,7 +528,11 @@ impl Automerge { am.apply_change(change, &mut observer); } } - load::LoadedChanges::Partial { error, .. } => return Err(error.into()), + load::LoadedChanges::Partial { error, .. } => { + if on_error == OnPartialLoad::Error { + return Err(error.into()); + } + } } if let Some(observer) = &mut observer { current_state::observe_current_state(&am, *observer); @@ -526,6 +557,18 @@ impl Automerge { data: &[u8], op_observer: Option<&mut Obs>, ) -> Result { + if self.is_empty() { + let mut doc = + Self::load_with::<()>(data, OnPartialLoad::Ignore, VerificationMode::Check, None)?; + doc = doc + .with_encoding(self.text_encoding) + .with_actor(self.actor_id()); + if let Some(obs) = op_observer { + current_state::observe_current_state(&doc, obs); + } + *self = doc; + return Ok(self.ops.len()); + } let changes = match load::load_changes(storage::parse::Input::new(data)) { load::LoadedChanges::Complete(c) => c, load::LoadedChanges::Partial { error, loaded, .. } => { @@ -566,6 +609,11 @@ impl Automerge { changes: I, mut op_observer: Option<&mut Obs>, ) -> Result<(), AutomergeError> { + // Record this so we can avoid observing each individual change and instead just observe + // the final state after all the changes have been applied. We can only do this for an + // empty document right now, once we have logic to produce the diffs between arbitrary + // states of the OpSet we can make this cleaner. + let empty_at_start = self.is_empty(); for c in changes { if !self.history_index.contains_key(&c.hash()) { if self.duplicate_seq(&c) { @@ -575,7 +623,11 @@ impl Automerge { )); } if self.is_causally_ready(&c) { - self.apply_change(c, &mut op_observer); + if empty_at_start { + self.apply_change::<()>(c, &mut None); + } else { + self.apply_change(c, &mut op_observer); + } } else { self.queue.push(c); } @@ -583,7 +635,16 @@ impl Automerge { } while let Some(c) = self.pop_next_causally_ready_change() { if !self.history_index.contains_key(&c.hash()) { - self.apply_change(c, &mut op_observer); + if empty_at_start { + self.apply_change::<()>(c, &mut None); + } else { + self.apply_change(c, &mut op_observer); + } + } + } + if empty_at_start { + if let Some(observer) = &mut op_observer { + current_state::observe_current_state(self, *observer); } } Ok(()) diff --git a/rust/automerge/src/automerge/tests.rs b/rust/automerge/src/automerge/tests.rs index 8d533fed..3511c4ed 100644 --- a/rust/automerge/src/automerge/tests.rs +++ b/rust/automerge/src/automerge/tests.rs @@ -1507,6 +1507,11 @@ fn observe_counter_change_application() { let changes = doc.get_changes(&[]).unwrap().into_iter().cloned(); let mut new_doc = AutoCommit::new().with_observer(VecOpObserver::default()); + // make a new change to the doc to stop the empty doc logic from skipping the intermediate + // patches. The is probably not really necessary, we could update this test to just test that + // the correct final state is emitted. For now though, we leave it as is. + new_doc.put(ROOT, "foo", "bar").unwrap(); + new_doc.observer().take_patches(); new_doc.apply_changes(changes).unwrap(); assert_eq!( new_doc.observer().take_patches(), diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index bafd8983..0b4cd743 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -274,7 +274,7 @@ mod values; #[cfg(feature = "optree-visualisation")] mod visualisation; -pub use crate::automerge::Automerge; +pub use crate::automerge::{Automerge, OnPartialLoad}; pub use autocommit::{AutoCommit, AutoCommitWithObs}; pub use autoserde::AutoSerde; pub use change::{Change, LoadError as LoadChangeError}; From 13a775ed9adc04c55067e3dc2eaa294fc862cb09 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 2 Feb 2023 13:28:22 +0000 Subject: [PATCH 56/72] Speed up loading by generating clocks on demand Context: currently we store a mapping from ChangeHash -> Clock, where `Clock` is the set of (ActorId, (Sequence number, max Op)) pairs derived from the given change and it's dependencies. This clock is used to determine what operations are visible at a given set of heads. Problem: populating this mapping for documents with large histories containing many actors can be very slow as for each change we have to allocate and merge a bunch of hashmaps. Solution: instead of creating the clocks on load, create an adjacency list based representation of the change graph and then derive the clock from this graph when it is needed. Traversing even large graphs is still almost as fast as looking up the clock in a hashmap. --- rust/automerge/src/automerge.rs | 135 ++++------- rust/automerge/src/change_graph.rs | 344 +++++++++++++++++++++++++++++ rust/automerge/src/clock.rs | 6 - rust/automerge/src/clocks.rs | 44 ---- rust/automerge/src/error.rs | 2 +- rust/automerge/src/lib.rs | 2 +- 6 files changed, 392 insertions(+), 141 deletions(-) create mode 100644 rust/automerge/src/change_graph.rs delete mode 100644 rust/automerge/src/clocks.rs diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index a7223c7c..128d4418 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -4,8 +4,7 @@ use std::fmt::Debug; use std::num::NonZeroU64; use std::ops::RangeBounds; -use crate::clock::ClockData; -use crate::clocks::Clocks; +use crate::change_graph::ChangeGraph; use crate::columnar::Key as EncodedKey; use crate::exid::ExId; use crate::keys::Keys; @@ -87,8 +86,8 @@ pub struct Automerge { history: Vec, /// Mapping from change hash to index into the history list. history_index: HashMap, - /// Mapping from change hash to vector clock at this state. - clocks: HashMap, + /// Graph of changes + change_graph: ChangeGraph, /// Mapping from actor index to list of seqs seen for them. states: HashMap>, /// Current dependencies of this document (heads hashes). @@ -111,7 +110,7 @@ impl Automerge { queue: vec![], history: vec![], history_index: HashMap::new(), - clocks: HashMap::new(), + change_graph: ChangeGraph::new(), states: HashMap::new(), ops: Default::default(), deps: Default::default(), @@ -477,14 +476,14 @@ impl Automerge { .map_err(|e| load::Error::InflateDocument(Box::new(e)))?; let mut hashes_by_index = HashMap::new(); let mut actor_to_history: HashMap> = HashMap::new(); - let mut clocks = Clocks::new(); + let mut change_graph = ChangeGraph::new(); for (index, change) in changes.iter().enumerate() { // SAFETY: This should be fine because we just constructed an opset containing // all the changes let actor_index = op_set.m.actors.lookup(change.actor_id()).unwrap(); actor_to_history.entry(actor_index).or_default().push(index); hashes_by_index.insert(index, change.hash()); - clocks.add_change(change, actor_index)?; + change_graph.add_change(change, actor_index)?; } let history_index = hashes_by_index.into_iter().map(|(k, v)| (v, k)).collect(); Self { @@ -492,7 +491,7 @@ impl Automerge { history: changes, history_index, states: actor_to_history, - clocks: clocks.into(), + change_graph, ops: op_set, deps: heads.into_iter().collect(), saved: Default::default(), @@ -824,16 +823,8 @@ impl Automerge { .filter(|hash| self.history_index.contains_key(hash)) .copied() .collect::>(); - let heads_clock = self.clock_at(&heads)?; - // keep the hashes that are concurrent or after the heads - changes.retain(|hash| { - self.clocks - .get(hash) - .unwrap() - .partial_cmp(&heads_clock) - .map_or(true, |o| o == Ordering::Greater) - }); + self.change_graph.remove_ancestors(changes, &heads); Ok(()) } @@ -841,7 +832,7 @@ impl Automerge { /// Get the changes since `have_deps` in this document using a clock internally. fn get_changes_clock(&self, have_deps: &[ChangeHash]) -> Result, AutomergeError> { // get the clock for the given deps - let clock = self.clock_at(have_deps)?; + let clock = self.clock_at(have_deps); // get the documents current clock @@ -875,26 +866,8 @@ impl Automerge { .find(|c| c.actor_id() == self.get_actor()); } - fn clock_at(&self, heads: &[ChangeHash]) -> Result { - if let Some(first_hash) = heads.first() { - let mut clock = self - .clocks - .get(first_hash) - .ok_or(AutomergeError::MissingHash(*first_hash))? - .clone(); - - for hash in &heads[1..] { - let c = self - .clocks - .get(hash) - .ok_or(AutomergeError::MissingHash(*hash))?; - clock.merge(c); - } - - Ok(clock) - } else { - Ok(Clock::new()) - } + fn clock_at(&self, heads: &[ChangeHash]) -> Clock { + self.change_graph.clock_for_heads(heads) } fn get_hash(&self, actor: usize, seq: u64) -> Result { @@ -920,22 +893,9 @@ impl Automerge { .push(history_index); self.history_index.insert(change.hash(), history_index); - let mut clock = Clock::new(); - for hash in change.deps() { - let c = self - .clocks - .get(hash) - .expect("Change's deps should already be in the document"); - clock.merge(c); - } - clock.include( - actor_index, - ClockData { - max_op: change.max_op(), - seq: change.seq(), - }, - ); - self.clocks.insert(change.hash(), clock); + self.change_graph + .add_change(&change, actor_index) + .expect("Change's deps should already be in the document"); self.history_index.insert(change.hash(), history_index); self.history.push(change); @@ -1197,9 +1157,8 @@ impl ReadDoc for Automerge { fn keys_at>(&self, obj: O, heads: &[ChangeHash]) -> KeysAt<'_, '_> { if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return KeysAt::new(self, self.ops.keys_at(obj, clock)); - } + let clock = self.clock_at(heads); + return KeysAt::new(self, self.ops.keys_at(obj, clock)); } KeysAt::new(self, None) } @@ -1223,10 +1182,9 @@ impl ReadDoc for Automerge { heads: &[ChangeHash], ) -> MapRangeAt<'_, R> { if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - let iter_range = self.ops.map_range_at(obj, range, clock); - return MapRangeAt::new(self, iter_range); - } + let clock = self.clock_at(heads); + let iter_range = self.ops.map_range_at(obj, range, clock); + return MapRangeAt::new(self, iter_range); } MapRangeAt::new(self, None) } @@ -1250,10 +1208,9 @@ impl ReadDoc for Automerge { heads: &[ChangeHash], ) -> ListRangeAt<'_, R> { if let Ok((obj, _)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - let iter_range = self.ops.list_range_at(obj, range, clock); - return ListRangeAt::new(self, iter_range); - } + let clock = self.clock_at(heads); + let iter_range = self.ops.list_range_at(obj, range, clock); + return ListRangeAt::new(self, iter_range); } ListRangeAt::new(self, None) } @@ -1272,20 +1229,20 @@ impl ReadDoc for Automerge { fn values_at>(&self, obj: O, heads: &[ChangeHash]) -> Values<'_> { if let Ok((obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return match obj_type { - ObjType::Map | ObjType::Table => { - let iter_range = self.ops.map_range_at(obj, .., clock); - Values::new(self, iter_range) - } - ObjType::List | ObjType::Text => { - let iter_range = self.ops.list_range_at(obj, .., clock); - Values::new(self, iter_range) - } - }; + let clock = self.clock_at(heads); + match obj_type { + ObjType::Map | ObjType::Table => { + let iter_range = self.ops.map_range_at(obj, .., clock); + Values::new(self, iter_range) + } + ObjType::List | ObjType::Text => { + let iter_range = self.ops.list_range_at(obj, .., clock); + Values::new(self, iter_range) + } } + } else { + Values::empty(self) } - Values::empty(self) } fn length>(&self, obj: O) -> usize { @@ -1303,18 +1260,18 @@ impl ReadDoc for Automerge { fn length_at>(&self, obj: O, heads: &[ChangeHash]) -> usize { if let Ok((inner_obj, obj_type)) = self.exid_to_obj(obj.as_ref()) { - if let Ok(clock) = self.clock_at(heads) { - return if obj_type == ObjType::Map || obj_type == ObjType::Table { - self.keys_at(obj, heads).count() - } else { - let encoding = ListEncoding::new(obj_type, self.text_encoding); - self.ops - .search(&inner_obj, query::LenAt::new(clock, encoding)) - .len - }; + let clock = self.clock_at(heads); + if obj_type == ObjType::Map || obj_type == ObjType::Table { + self.keys_at(obj, heads).count() + } else { + let encoding = ListEncoding::new(obj_type, self.text_encoding); + self.ops + .search(&inner_obj, query::LenAt::new(clock, encoding)) + .len } + } else { + 0 } - 0 } fn object_type>(&self, obj: O) -> Result { @@ -1338,7 +1295,7 @@ impl ReadDoc for Automerge { heads: &[ChangeHash], ) -> Result { let obj = self.exid_to_obj(obj.as_ref())?.0; - let clock = self.clock_at(heads)?; + let clock = self.clock_at(heads); let query = self.ops.search(&obj, query::ListValsAt::new(clock)); let mut buffer = String::new(); for q in &query.ops { @@ -1413,7 +1370,7 @@ impl ReadDoc for Automerge { ) -> Result, ExId)>, AutomergeError> { let prop = prop.into(); let obj = self.exid_to_obj(obj.as_ref())?.0; - let clock = self.clock_at(heads)?; + let clock = self.clock_at(heads); let result = match prop { Prop::Map(p) => { let prop = self.ops.m.props.lookup(&p); diff --git a/rust/automerge/src/change_graph.rs b/rust/automerge/src/change_graph.rs new file mode 100644 index 00000000..01d269d8 --- /dev/null +++ b/rust/automerge/src/change_graph.rs @@ -0,0 +1,344 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use crate::{ + clock::{Clock, ClockData}, + Change, ChangeHash, +}; + +/// The graph of changes +/// +/// This is a sort of adjacency list based representation, except that instead of using linked +/// lists, we keep all the edges and nodes in two vecs and reference them by index which plays nice +/// with the cache +#[derive(Debug, Clone)] +pub(crate) struct ChangeGraph { + nodes: Vec, + edges: Vec, + hashes: Vec, + nodes_by_hash: BTreeMap, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +struct NodeIdx(u32); + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +struct EdgeIdx(u32); + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +struct HashIdx(u32); + +#[derive(Debug, Clone)] +struct Edge { + // Edges are always child -> parent so we only store the target, the child is implicit + // as you get the edge from the child + target: NodeIdx, + next: Option, +} + +#[derive(Debug, Clone)] +struct ChangeNode { + hash_idx: HashIdx, + actor_index: usize, + seq: u64, + max_op: u64, + parents: Option, +} + +impl ChangeGraph { + pub(crate) fn new() -> Self { + Self { + nodes: Vec::new(), + edges: Vec::new(), + nodes_by_hash: BTreeMap::new(), + hashes: Vec::new(), + } + } + + pub(crate) fn add_change( + &mut self, + change: &Change, + actor_idx: usize, + ) -> Result<(), MissingDep> { + let hash = change.hash(); + if self.nodes_by_hash.contains_key(&hash) { + return Ok(()); + } + let parent_indices = change + .deps() + .iter() + .map(|h| self.nodes_by_hash.get(h).copied().ok_or(MissingDep(*h))) + .collect::, _>>()?; + let node_idx = self.add_node(actor_idx, change); + self.nodes_by_hash.insert(hash, node_idx); + for parent_idx in parent_indices { + self.add_parent(node_idx, parent_idx); + } + Ok(()) + } + + fn add_node(&mut self, actor_index: usize, change: &Change) -> NodeIdx { + let idx = NodeIdx(self.nodes.len() as u32); + let hash_idx = self.add_hash(change.hash()); + self.nodes.push(ChangeNode { + hash_idx, + actor_index, + seq: change.seq(), + max_op: change.max_op(), + parents: None, + }); + idx + } + + fn add_hash(&mut self, hash: ChangeHash) -> HashIdx { + let idx = HashIdx(self.hashes.len() as u32); + self.hashes.push(hash); + idx + } + + fn add_parent(&mut self, child_idx: NodeIdx, parent_idx: NodeIdx) { + let new_edge_idx = EdgeIdx(self.edges.len() as u32); + let new_edge = Edge { + target: parent_idx, + next: None, + }; + self.edges.push(new_edge); + + let child = &mut self.nodes[child_idx.0 as usize]; + if let Some(edge_idx) = child.parents { + let mut edge = &mut self.edges[edge_idx.0 as usize]; + while let Some(next) = edge.next { + edge = &mut self.edges[next.0 as usize]; + } + edge.next = Some(new_edge_idx); + } else { + child.parents = Some(new_edge_idx); + } + } + + fn parents(&self, node_idx: NodeIdx) -> impl Iterator + '_ { + let mut edge_idx = self.nodes[node_idx.0 as usize].parents; + std::iter::from_fn(move || { + let this_edge_idx = edge_idx?; + let edge = &self.edges[this_edge_idx.0 as usize]; + edge_idx = edge.next; + Some(edge.target) + }) + } + + pub(crate) fn clock_for_heads(&self, heads: &[ChangeHash]) -> Clock { + let mut clock = Clock::new(); + + self.traverse_ancestors(heads, |node, _hash| { + clock.include( + node.actor_index, + ClockData { + max_op: node.max_op, + seq: node.seq, + }, + ); + }); + + clock + } + + pub(crate) fn remove_ancestors( + &self, + changes: &mut BTreeSet, + heads: &[ChangeHash], + ) { + self.traverse_ancestors(heads, |_node, hash| { + changes.remove(hash); + }); + } + + /// Call `f` for each (node, hash) in the graph, starting from the given heads + /// + /// No guarantees are made about the order of traversal but each node will only be visited + /// once. + fn traverse_ancestors( + &self, + heads: &[ChangeHash], + mut f: F, + ) { + let mut to_visit = heads + .iter() + .filter_map(|h| self.nodes_by_hash.get(h)) + .copied() + .collect::>(); + + let mut visited = BTreeSet::new(); + + while let Some(idx) = to_visit.pop() { + if visited.contains(&idx) { + continue; + } else { + visited.insert(idx); + } + let node = &self.nodes[idx.0 as usize]; + let hash = &self.hashes[node.hash_idx.0 as usize]; + f(node, hash); + to_visit.extend(self.parents(idx)); + } + } +} + +#[derive(Debug, thiserror::Error)] +#[error("attempted to derive a clock for a change with dependencies we don't have")] +pub struct MissingDep(ChangeHash); + +#[cfg(test)] +mod tests { + use std::{ + num::NonZeroU64, + time::{SystemTime, UNIX_EPOCH}, + }; + + use crate::{ + clock::ClockData, + op_tree::OpSetMetadata, + storage::{change::ChangeBuilder, convert::op_as_actor_id}, + types::{Key, ObjId, Op, OpId, OpIds}, + ActorId, + }; + + use super::*; + + #[test] + fn clock_by_heads() { + let mut builder = TestGraphBuilder::new(); + let actor1 = builder.actor(); + let actor2 = builder.actor(); + let actor3 = builder.actor(); + let change1 = builder.change(&actor1, 10, &[]); + let change2 = builder.change(&actor2, 20, &[change1]); + let change3 = builder.change(&actor3, 30, &[change1]); + let change4 = builder.change(&actor1, 10, &[change2, change3]); + let graph = builder.build(); + + let mut expected_clock = Clock::new(); + expected_clock.include(builder.index(&actor1), ClockData { max_op: 50, seq: 2 }); + expected_clock.include(builder.index(&actor2), ClockData { max_op: 30, seq: 1 }); + expected_clock.include(builder.index(&actor3), ClockData { max_op: 40, seq: 1 }); + + let clock = graph.clock_for_heads(&[change4]); + assert_eq!(clock, expected_clock); + } + + #[test] + fn remove_ancestors() { + let mut builder = TestGraphBuilder::new(); + let actor1 = builder.actor(); + let actor2 = builder.actor(); + let actor3 = builder.actor(); + let change1 = builder.change(&actor1, 10, &[]); + let change2 = builder.change(&actor2, 20, &[change1]); + let change3 = builder.change(&actor3, 30, &[change1]); + let change4 = builder.change(&actor1, 10, &[change2, change3]); + let graph = builder.build(); + + let mut changes = vec![change1, change2, change3, change4] + .into_iter() + .collect::>(); + let heads = vec![change2]; + graph.remove_ancestors(&mut changes, &heads); + + let expected_changes = vec![change3, change4].into_iter().collect::>(); + + assert_eq!(changes, expected_changes); + } + + struct TestGraphBuilder { + actors: Vec, + changes: Vec, + seqs_by_actor: BTreeMap, + } + + impl TestGraphBuilder { + fn new() -> Self { + TestGraphBuilder { + actors: Vec::new(), + changes: Vec::new(), + seqs_by_actor: BTreeMap::new(), + } + } + + fn actor(&mut self) -> ActorId { + let actor = ActorId::random(); + self.actors.push(actor.clone()); + actor + } + + fn index(&self, actor: &ActorId) -> usize { + self.actors.iter().position(|a| a == actor).unwrap() + } + + /// Create a change with `num_new_ops` and `parents` for `actor` + /// + /// The `start_op` and `seq` of the change will be computed from the + /// previous changes for the same actor. + fn change( + &mut self, + actor: &ActorId, + num_new_ops: usize, + parents: &[ChangeHash], + ) -> ChangeHash { + let mut meta = OpSetMetadata::from_actors(self.actors.clone()); + let key = meta.props.cache("key".to_string()); + + let start_op = parents + .iter() + .map(|c| { + self.changes + .iter() + .find(|change| change.hash() == *c) + .unwrap() + .max_op() + }) + .max() + .unwrap_or(0) + + 1; + + let actor_idx = self.index(actor); + let ops = (0..num_new_ops) + .map(|opnum| Op { + id: OpId::new(start_op + opnum as u64, actor_idx), + action: crate::OpType::Put("value".into()), + key: Key::Map(key), + succ: OpIds::empty(), + pred: OpIds::empty(), + insert: false, + }) + .collect::>(); + + let root = ObjId::root(); + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as i64; + let seq = self.seqs_by_actor.entry(actor.clone()).or_insert(1); + let change = Change::new( + ChangeBuilder::new() + .with_dependencies(parents.to_vec()) + .with_start_op(NonZeroU64::new(start_op).unwrap()) + .with_actor(actor.clone()) + .with_seq(*seq) + .with_timestamp(timestamp) + .build(ops.iter().map(|op| op_as_actor_id(&root, op, &meta))) + .unwrap(), + ); + *seq = seq.checked_add(1).unwrap(); + let hash = change.hash(); + self.changes.push(change); + hash + } + + fn build(&self) -> ChangeGraph { + let mut graph = ChangeGraph::new(); + for change in &self.changes { + let actor_idx = self.index(change.actor_id()); + graph.add_change(change, actor_idx).unwrap(); + } + graph + } + } +} diff --git a/rust/automerge/src/clock.rs b/rust/automerge/src/clock.rs index 79125323..64d00fcf 100644 --- a/rust/automerge/src/clock.rs +++ b/rust/automerge/src/clock.rs @@ -71,12 +71,6 @@ impl Clock { self.0.get(actor_index) } - pub(crate) fn merge(&mut self, other: &Self) { - for (actor, data) in &other.0 { - self.include(*actor, *data); - } - } - fn is_greater(&self, other: &Self) -> bool { let mut has_greater = false; diff --git a/rust/automerge/src/clocks.rs b/rust/automerge/src/clocks.rs deleted file mode 100644 index 60fc5c71..00000000 --- a/rust/automerge/src/clocks.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::{ - clock::{Clock, ClockData}, - Change, ChangeHash, -}; -use std::collections::HashMap; - -pub(crate) struct Clocks(HashMap); - -#[derive(Debug, thiserror::Error)] -#[error("attempted to derive a clock for a change with dependencies we don't have")] -pub struct MissingDep(ChangeHash); - -impl Clocks { - pub(crate) fn new() -> Self { - Self(HashMap::new()) - } - - pub(crate) fn add_change( - &mut self, - change: &Change, - actor_index: usize, - ) -> Result<(), MissingDep> { - let mut clock = Clock::new(); - for hash in change.deps() { - let c = self.0.get(hash).ok_or(MissingDep(*hash))?; - clock.merge(c); - } - clock.include( - actor_index, - ClockData { - max_op: change.max_op(), - seq: change.seq(), - }, - ); - self.0.insert(change.hash(), clock); - Ok(()) - } -} - -impl From for HashMap { - fn from(c: Clocks) -> Self { - c.0 - } -} diff --git a/rust/automerge/src/error.rs b/rust/automerge/src/error.rs index 0f024d86..57a87167 100644 --- a/rust/automerge/src/error.rs +++ b/rust/automerge/src/error.rs @@ -7,7 +7,7 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum AutomergeError { #[error(transparent)] - Clocks(#[from] crate::clocks::MissingDep), + ChangeGraph(#[from] crate::change_graph::MissingDep), #[error("failed to load compressed data: {0}")] Deflate(#[source] std::io::Error), #[error("duplicate seq {0} found for actor {1}")] diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index 0b4cd743..fb8a3793 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -244,8 +244,8 @@ mod autocommit; mod automerge; mod autoserde; mod change; +mod change_graph; mod clock; -mod clocks; mod columnar; mod convert; mod error; From c5fde2802f8dfeaadd2394942d1deebbb7a590d7 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Fri, 3 Feb 2023 15:53:09 +0000 Subject: [PATCH 57/72] @automerge/automerge-wasm@0.1.24 and @automerge/automerge@2.0.2-alpha.1 --- javascript/package.json | 4 ++-- rust/automerge-wasm/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index 017c5a54..8712920c 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.1", + "version": "2.0.2-alpha.1", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -47,7 +47,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.23", + "@automerge/automerge-wasm": "0.1.24", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index cce3199f..57354ce1 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.23", + "version": "0.1.24", "license": "MIT", "files": [ "README.md", From a24d536d16f2adeea7bbdf094402665a80f400ab Mon Sep 17 00:00:00 2001 From: Alex Good Date: Sat, 4 Feb 2023 14:05:10 +0000 Subject: [PATCH 58/72] Move automerge::SequenceTree to automerge_wasm::SequenceTree The `SequenceTree` is only ever used in `automerge_wasm` so move it there. --- rust/automerge-wasm/Cargo.toml | 1 + rust/automerge-wasm/src/lib.rs | 1 + rust/automerge-wasm/src/observer.rs | 4 +- .../src/sequence_tree.rs | 81 +++---------------- rust/automerge/src/lib.rs | 3 - 5 files changed, 14 insertions(+), 76 deletions(-) rename rust/{automerge => automerge-wasm}/src/sequence_tree.rs (87%) diff --git a/rust/automerge-wasm/Cargo.toml b/rust/automerge-wasm/Cargo.toml index 3d2fafe4..b6055a7d 100644 --- a/rust/automerge-wasm/Cargo.toml +++ b/rust/automerge-wasm/Cargo.toml @@ -57,5 +57,6 @@ features = ["console"] [dev-dependencies] futures = "^0.1" +proptest = { version = "^1.0.0", default-features = false, features = ["std"] } wasm-bindgen-futures = "^0.4" wasm-bindgen-test = "^0.3" diff --git a/rust/automerge-wasm/src/lib.rs b/rust/automerge-wasm/src/lib.rs index b53bf3b9..09072ca7 100644 --- a/rust/automerge-wasm/src/lib.rs +++ b/rust/automerge-wasm/src/lib.rs @@ -41,6 +41,7 @@ use wasm_bindgen::JsCast; mod interop; mod observer; +mod sequence_tree; mod sync; mod value; diff --git a/rust/automerge-wasm/src/observer.rs b/rust/automerge-wasm/src/observer.rs index c0b462a6..2351c762 100644 --- a/rust/automerge-wasm/src/observer.rs +++ b/rust/automerge-wasm/src/observer.rs @@ -6,10 +6,12 @@ use crate::{ interop::{self, alloc, js_set}, TextRepresentation, }; -use automerge::{ObjId, OpObserver, Prop, ReadDoc, ScalarValue, SequenceTree, Value}; +use automerge::{ObjId, OpObserver, Prop, ReadDoc, ScalarValue, Value}; use js_sys::{Array, Object}; use wasm_bindgen::prelude::*; +use crate::sequence_tree::SequenceTree; + #[derive(Debug, Clone, Default)] pub(crate) struct Observer { enabled: bool, diff --git a/rust/automerge/src/sequence_tree.rs b/rust/automerge-wasm/src/sequence_tree.rs similarity index 87% rename from rust/automerge/src/sequence_tree.rs rename to rust/automerge-wasm/src/sequence_tree.rs index f95ceab3..91b183a2 100644 --- a/rust/automerge/src/sequence_tree.rs +++ b/rust/automerge-wasm/src/sequence_tree.rs @@ -5,10 +5,10 @@ use std::{ }; pub(crate) const B: usize = 16; -pub type SequenceTree = SequenceTreeInternal; +pub(crate) type SequenceTree = SequenceTreeInternal; #[derive(Clone, Debug)] -pub struct SequenceTreeInternal { +pub(crate) struct SequenceTreeInternal { root_node: Option>, } @@ -24,22 +24,17 @@ where T: Clone + Debug, { /// Construct a new, empty, sequence. - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { root_node: None } } /// Get the length of the sequence. - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.root_node.as_ref().map_or(0, |n| n.len()) } - /// Check if the sequence is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - /// Create an iterator through the sequence. - pub fn iter(&self) -> Iter<'_, T> { + pub(crate) fn iter(&self) -> Iter<'_, T> { Iter { inner: self, index: 0, @@ -51,7 +46,7 @@ where /// # Panics /// /// Panics if `index > len`. - pub fn insert(&mut self, index: usize, element: T) { + pub(crate) fn insert(&mut self, index: usize, element: T) { let old_len = self.len(); if let Some(root) = self.root_node.as_mut() { #[cfg(debug_assertions)] @@ -94,27 +89,22 @@ where } /// Push the `element` onto the back of the sequence. - pub fn push(&mut self, element: T) { + pub(crate) fn push(&mut self, element: T) { let l = self.len(); self.insert(l, element) } /// Get the `element` at `index` in the sequence. - pub fn get(&self, index: usize) -> Option<&T> { + pub(crate) fn get(&self, index: usize) -> Option<&T> { self.root_node.as_ref().and_then(|n| n.get(index)) } - /// Get the `element` at `index` in the sequence. - pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { - self.root_node.as_mut().and_then(|n| n.get_mut(index)) - } - /// Removes the element at `index` from the sequence. /// /// # Panics /// /// Panics if `index` is out of bounds. - pub fn remove(&mut self, index: usize) -> T { + pub(crate) fn remove(&mut self, index: usize) -> T { if let Some(root) = self.root_node.as_mut() { #[cfg(debug_assertions)] let len = root.check(); @@ -135,15 +125,6 @@ where panic!("remove from empty tree") } } - - /// Update the `element` at `index` in the sequence, returning the old value. - /// - /// # Panics - /// - /// Panics if `index > len` - pub fn set(&mut self, index: usize, element: T) -> T { - self.root_node.as_mut().unwrap().set(index, element) - } } impl SequenceTreeNode @@ -432,30 +413,6 @@ where assert!(self.is_full()); } - pub(crate) fn set(&mut self, index: usize, element: T) -> T { - if self.is_leaf() { - let old_element = self.elements.get_mut(index).unwrap(); - mem::replace(old_element, element) - } else { - let mut cumulative_len = 0; - for (child_index, child) in self.children.iter_mut().enumerate() { - match (cumulative_len + child.len()).cmp(&index) { - Ordering::Less => { - cumulative_len += child.len() + 1; - } - Ordering::Equal => { - let old_element = self.elements.get_mut(child_index).unwrap(); - return mem::replace(old_element, element); - } - Ordering::Greater => { - return child.set(index - cumulative_len, element); - } - } - } - panic!("Invalid index to set: {} but len was {}", index, self.len()) - } - } - pub(crate) fn get(&self, index: usize) -> Option<&T> { if self.is_leaf() { return self.elements.get(index); @@ -475,26 +432,6 @@ where } None } - - pub(crate) fn get_mut(&mut self, index: usize) -> Option<&mut T> { - if self.is_leaf() { - return self.elements.get_mut(index); - } else { - let mut cumulative_len = 0; - for (child_index, child) in self.children.iter_mut().enumerate() { - match (cumulative_len + child.len()).cmp(&index) { - Ordering::Less => { - cumulative_len += child.len() + 1; - } - Ordering::Equal => return self.elements.get_mut(child_index), - Ordering::Greater => { - return child.get_mut(index - cumulative_len); - } - } - } - } - None - } } impl Default for SequenceTreeInternal diff --git a/rust/automerge/src/lib.rs b/rust/automerge/src/lib.rs index fb8a3793..cbb535af 100644 --- a/rust/automerge/src/lib.rs +++ b/rust/automerge/src/lib.rs @@ -264,7 +264,6 @@ mod op_tree; mod parents; mod query; mod read; -mod sequence_tree; mod storage; pub mod sync; pub mod transaction; @@ -294,8 +293,6 @@ pub use op_observer::Patch; pub use op_observer::VecOpObserver; pub use parents::{Parent, Parents}; pub use read::ReadDoc; -#[doc(hidden)] -pub use sequence_tree::SequenceTree; pub use types::{ActorId, ChangeHash, ObjType, OpType, ParseChangeHashError, Prop, TextEncoding}; pub use value::{ScalarValue, Value}; pub use values::Values; From 11f063cbfe71bb81d849baca89f5eba8d441d594 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Thu, 9 Feb 2023 11:06:08 +0000 Subject: [PATCH 59/72] Remove nightly from CI --- .github/workflows/ci.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c2d469d5..bfa31bd5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -137,8 +137,6 @@ jobs: matrix: toolchain: - 1.66.0 - - nightly - continue-on-error: ${{ matrix.toolchain == 'nightly' }} steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 From 2cd7427f35e3b9b4a6b4d22d21dd083872015b57 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Tue, 17 Jan 2023 14:51:02 -0700 Subject: [PATCH 60/72] Use our leb128 parser for values This ensures that values in automerge documents are encoded correctly, and that no extra data is smuggled in any LEB fields. --- .../src/columnar/column_range/value.rs | 62 +++++++++--------- rust/automerge/src/columnar/encoding.rs | 2 + ...counter_value_has_incorrect_meta.automerge | Bin 0 -> 63 bytes .../fixtures/counter_value_is_ok.automerge | Bin 0 -> 63 bytes .../counter_value_is_overlong.automerge | Bin 0 -> 63 bytes rust/automerge/tests/test.rs | 14 ++++ 6 files changed, 48 insertions(+), 30 deletions(-) create mode 100644 rust/automerge/tests/fixtures/counter_value_has_incorrect_meta.automerge create mode 100644 rust/automerge/tests/fixtures/counter_value_is_ok.automerge create mode 100644 rust/automerge/tests/fixtures/counter_value_is_overlong.automerge diff --git a/rust/automerge/src/columnar/column_range/value.rs b/rust/automerge/src/columnar/column_range/value.rs index 43f63437..03a5aa60 100644 --- a/rust/automerge/src/columnar/column_range/value.rs +++ b/rust/automerge/src/columnar/column_range/value.rs @@ -4,10 +4,15 @@ use crate::{ columnar::{ encoding::{ leb128::{lebsize, ulebsize}, - raw, DecodeColumnError, RawBytes, RawDecoder, RawEncoder, RleDecoder, RleEncoder, Sink, + raw, DecodeColumnError, DecodeError, RawBytes, RawDecoder, RawEncoder, RleDecoder, + RleEncoder, Sink, }, SpliceError, }, + storage::parse::{ + leb128::{leb128_i64, leb128_u64}, + Input, ParseResult, + }, ScalarValue, }; @@ -217,18 +222,8 @@ impl<'a> Iterator for ValueIter<'a> { ValueType::Null => Some(Ok(ScalarValue::Null)), ValueType::True => Some(Ok(ScalarValue::Boolean(true))), ValueType::False => Some(Ok(ScalarValue::Boolean(false))), - ValueType::Uleb => self.parse_raw(val_meta, |mut bytes| { - let val = leb128::read::unsigned(&mut bytes).map_err(|e| { - DecodeColumnError::invalid_value("value", e.to_string()) - })?; - Ok(ScalarValue::Uint(val)) - }), - ValueType::Leb => self.parse_raw(val_meta, |mut bytes| { - let val = leb128::read::signed(&mut bytes).map_err(|e| { - DecodeColumnError::invalid_value("value", e.to_string()) - })?; - Ok(ScalarValue::Int(val)) - }), + ValueType::Uleb => self.parse_input(val_meta, leb128_u64), + ValueType::Leb => self.parse_input(val_meta, leb128_i64), ValueType::String => self.parse_raw(val_meta, |bytes| { let val = std::str::from_utf8(bytes) .map_err(|e| DecodeColumnError::invalid_value("value", e.to_string()))? @@ -250,17 +245,11 @@ impl<'a> Iterator for ValueIter<'a> { let val = f64::from_le_bytes(raw); Ok(ScalarValue::F64(val)) }), - ValueType::Counter => self.parse_raw(val_meta, |mut bytes| { - let val = leb128::read::signed(&mut bytes).map_err(|e| { - DecodeColumnError::invalid_value("value", e.to_string()) - })?; - Ok(ScalarValue::Counter(val.into())) + ValueType::Counter => self.parse_input(val_meta, |input| { + leb128_i64(input).map(|(i, n)| (i, ScalarValue::Counter(n.into()))) }), - ValueType::Timestamp => self.parse_raw(val_meta, |mut bytes| { - let val = leb128::read::signed(&mut bytes).map_err(|e| { - DecodeColumnError::invalid_value("value", e.to_string()) - })?; - Ok(ScalarValue::Timestamp(val)) + ValueType::Timestamp => self.parse_input(val_meta, |input| { + leb128_i64(input).map(|(i, n)| (i, ScalarValue::Timestamp(n))) }), ValueType::Unknown(code) => self.parse_raw(val_meta, |bytes| { Ok(ScalarValue::Unknown { @@ -284,8 +273,8 @@ impl<'a> Iterator for ValueIter<'a> { } impl<'a> ValueIter<'a> { - fn parse_raw Result>( - &mut self, + fn parse_raw<'b, R, F: Fn(&'b [u8]) -> Result>( + &'b mut self, meta: ValueMeta, f: F, ) -> Option> { @@ -298,11 +287,24 @@ impl<'a> ValueIter<'a> { } Ok(bytes) => bytes, }; - let val = match f(raw) { - Ok(v) => v, - Err(e) => return Some(Err(e)), - }; - Some(Ok(val)) + Some(f(raw)) + } + + fn parse_input<'b, R, F: Fn(Input<'b>) -> ParseResult<'b, R, DecodeError>>( + &'b mut self, + meta: ValueMeta, + f: F, + ) -> Option> + where + R: Into, + { + self.parse_raw(meta, |raw| match f(Input::new(raw)) { + Err(e) => Err(DecodeColumnError::invalid_value("value", e.to_string())), + Ok((i, _)) if !i.is_empty() => { + Err(DecodeColumnError::invalid_value("value", "extra bytes")) + } + Ok((_, v)) => Ok(v.into()), + }) } pub(crate) fn done(&self) -> bool { diff --git a/rust/automerge/src/columnar/encoding.rs b/rust/automerge/src/columnar/encoding.rs index bbdb34a8..c9435448 100644 --- a/rust/automerge/src/columnar/encoding.rs +++ b/rust/automerge/src/columnar/encoding.rs @@ -46,6 +46,8 @@ pub(crate) enum DecodeError { FromInt(#[from] std::num::TryFromIntError), #[error("bad leb128")] BadLeb(#[from] ::leb128::read::Error), + #[error(transparent)] + BadLeb128(#[from] crate::storage::parse::leb128::Error), #[error("attempted to allocate {attempted} which is larger than the maximum of {maximum}")] OverlargeAllocation { attempted: usize, maximum: usize }, #[error("invalid string encoding")] diff --git a/rust/automerge/tests/fixtures/counter_value_has_incorrect_meta.automerge b/rust/automerge/tests/fixtures/counter_value_has_incorrect_meta.automerge new file mode 100644 index 0000000000000000000000000000000000000000..2290b446ca661f302f6591c522a6653ba0be54a6 GIT binary patch literal 63 zcmZq8_iDCFPJPB`${^6qmb+L*-z{NbN`A*m!H-iI8Mkb^bm5T!0|T2Vvk9XUQy5b? TQvp*wVH2@I&u}A*O5KaD{l&S)MXnSh`0lxRq(Bd!v00tEUGyy^a VRsvT7Z~}h;VF7;ue<;uoe*j$F7aafq literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fixtures/counter_value_is_overlong.automerge b/rust/automerge/tests/fixtures/counter_value_is_overlong.automerge new file mode 100644 index 0000000000000000000000000000000000000000..831346f7f4109e2f292e502e13b326ca2485b351 GIT binary patch literal 63 zcmZq8_iD~Rd#9GsltG}IEqAeszFWe=l>CmBf*+?aGH%&+>B1ue1_m}!W)nsyrZA>( TrUIsV#ze+?#(Iql_4Nz@=B*VY literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/test.rs b/rust/automerge/tests/test.rs index ca6c64c0..191ce2f9 100644 --- a/rust/automerge/tests/test.rs +++ b/rust/automerge/tests/test.rs @@ -1412,6 +1412,20 @@ fn fuzz_crashers() { } } +fn fixture(name: &str) -> Vec { + fs::read("./tests/fixtures/".to_owned() + name).unwrap() +} + +#[test] +fn overlong_leb() { + // the value metadata says "2", but the LEB is only 1-byte long and there's an extra 0 + assert!(Automerge::load(&fixture("counter_value_has_incorrect_meta.automerge")).is_err()); + // the LEB is overlong (using 2 bytes where one would have sufficed) + assert!(Automerge::load(&fixture("counter_value_is_overlong.automerge")).is_err()); + // the LEB is correct + assert!(Automerge::load(&fixture("counter_value_is_ok.automerge")).is_ok()); +} + #[test] fn negative_64() { let mut doc = Automerge::new(); From 5e82dbc3c83c2336ca675ba8f167db5dba9b17cb Mon Sep 17 00:00:00 2001 From: Orion Henry Date: Mon, 13 Feb 2023 21:17:27 -0600 Subject: [PATCH 61/72] rework how skip works to push the logic into node --- javascript/test/basic_test.ts | 16 +++++ rust/automerge/src/op_tree/node.rs | 68 +++++++++++-------- rust/automerge/src/query/prop.rs | 47 ++----------- rust/automerge/src/query/seek_op.rs | 39 ++--------- .../automerge/src/query/seek_op_with_patch.rs | 38 +---------- 5 files changed, 67 insertions(+), 141 deletions(-) diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index 5aa1ac34..0e30dc7c 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -58,6 +58,22 @@ describe("Automerge", () => { }) }) + it("should be able to insert and delete a large number of properties", () => { + let doc = Automerge.init() + + doc = Automerge.change(doc, doc => { + doc['k1'] = true; + }); + + for (let idx = 1; idx <= 200; idx++) { + doc = Automerge.change(doc, doc => { + delete doc['k' + idx]; + doc['k' + (idx + 1)] = true; + assert(Object.keys(doc).length == 1) + }); + } + }) + it("can detect an automerge doc with isAutomerge()", () => { const doc1 = Automerge.from({ sub: { object: true } }) assert(Automerge.isAutomerge(doc1)) diff --git a/rust/automerge/src/op_tree/node.rs b/rust/automerge/src/op_tree/node.rs index ea7fbf48..8f2de662 100644 --- a/rust/automerge/src/op_tree/node.rs +++ b/rust/automerge/src/op_tree/node.rs @@ -27,50 +27,67 @@ impl OpTreeNode { } } + fn search_element<'a, 'b: 'a, Q>( + &'b self, + query: &mut Q, + m: &OpSetMetadata, + ops: &'a [Op], + index: usize, + ) -> bool + where + Q: TreeQuery<'a>, + { + if let Some(e) = self.elements.get(index) { + if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish { + return true; + } + } + false + } + pub(crate) fn search<'a, 'b: 'a, Q>( &'b self, query: &mut Q, m: &OpSetMetadata, ops: &'a [Op], - skip: Option, + mut skip: Option, ) -> bool where Q: TreeQuery<'a>, { if self.is_leaf() { - let skip = skip.unwrap_or(0); - for e in self.elements.iter().skip(skip) { + for e in self.elements.iter().skip(skip.unwrap_or(0)) { if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish { return true; } } false } else { - let mut skip = skip.unwrap_or(0); for (child_index, child) in self.children.iter().enumerate() { - match skip.cmp(&child.len()) { - Ordering::Greater => { - // not in this child at all - // take off the number of elements in the child as well as the next element - skip -= child.len() + 1; + match skip { + Some(n) if n > child.len() => { + skip = Some(n - child.len() - 1); } - Ordering::Equal => { - // just try the element - skip -= child.len(); - if let Some(e) = self.elements.get(child_index) { - if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish - { - return true; - } + Some(n) if n == child.len() => { + skip = None; + if self.search_element(query, m, ops, child_index) { + return true; } } - Ordering::Less => { + Some(n) => { + if child.search(query, m, ops, Some(n)) { + return true; + } + skip = Some(0); // important to not be None so we never call query_node again + if self.search_element(query, m, ops, child_index) { + return true; + } + } + None => { // descend and try find it match query.query_node_with_metadata(child, m, ops) { QueryResult::Descend => { - // search in the child node, passing in the number of items left to - // skip - if child.search(query, m, ops, Some(skip)) { + if child.search(query, m, ops, None) { return true; } } @@ -78,14 +95,9 @@ impl OpTreeNode { QueryResult::Next => (), QueryResult::Skip(_) => panic!("had skip from non-root node"), } - if let Some(e) = self.elements.get(child_index) { - if query.query_element_with_metadata(&ops[*e], m) == QueryResult::Finish - { - return true; - } + if self.search_element(query, m, ops, child_index) { + return true; } - // reset the skip to zero so we continue iterating normally - skip = 0; } } } diff --git a/rust/automerge/src/query/prop.rs b/rust/automerge/src/query/prop.rs index f6062ec6..d2a11361 100644 --- a/rust/automerge/src/query/prop.rs +++ b/rust/automerge/src/query/prop.rs @@ -1,6 +1,6 @@ use crate::op_tree::{OpSetMetadata, OpTreeNode}; use crate::query::{binary_search_by, QueryResult, TreeQuery}; -use crate::types::{Key, ListEncoding, Op}; +use crate::types::{Key, Op}; use std::fmt::Debug; #[derive(Debug, Clone, PartialEq)] @@ -9,15 +9,6 @@ pub(crate) struct Prop<'a> { pub(crate) ops: Vec<&'a Op>, pub(crate) ops_pos: Vec, pub(crate) pos: usize, - start: Option, -} - -#[derive(Debug, Clone, PartialEq)] -struct Start { - /// The index to start searching for in the optree - idx: usize, - /// The total length of the optree - optree_len: usize, } impl<'a> Prop<'a> { @@ -27,7 +18,6 @@ impl<'a> Prop<'a> { ops: vec![], ops_pos: vec![], pos: 0, - start: None, } } } @@ -39,38 +29,9 @@ impl<'a> TreeQuery<'a> for Prop<'a> { m: &OpSetMetadata, ops: &[Op], ) -> QueryResult { - if let Some(Start { - idx: start, - optree_len, - }) = self.start - { - if self.pos + child.len() >= start { - // skip empty nodes - if child.index.visible_len(ListEncoding::default()) == 0 { - if self.pos + child.len() >= optree_len { - self.pos = optree_len; - QueryResult::Finish - } else { - self.pos += child.len(); - QueryResult::Next - } - } else { - QueryResult::Descend - } - } else { - self.pos += child.len(); - QueryResult::Next - } - } else { - // in the root node find the first op position for the key - let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.key)); - self.start = Some(Start { - idx: start, - optree_len: child.len(), - }); - self.pos = start; - QueryResult::Skip(start) - } + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.key)); + self.pos = start; + QueryResult::Skip(start) } fn query_element(&mut self, op: &'a Op) -> QueryResult { diff --git a/rust/automerge/src/query/seek_op.rs b/rust/automerge/src/query/seek_op.rs index 22d1f58d..2ed875d2 100644 --- a/rust/automerge/src/query/seek_op.rs +++ b/rust/automerge/src/query/seek_op.rs @@ -1,6 +1,6 @@ use crate::op_tree::{OpSetMetadata, OpTreeNode}; use crate::query::{binary_search_by, QueryResult, TreeQuery}; -use crate::types::{Key, ListEncoding, Op, HEAD}; +use crate::types::{Key, Op, HEAD}; use std::cmp::Ordering; use std::fmt::Debug; @@ -14,8 +14,6 @@ pub(crate) struct SeekOp<'a> { pub(crate) succ: Vec, /// whether a position has been found found: bool, - /// The found start position of the key if there is one yet (for map objects). - start: Option, } impl<'a> SeekOp<'a> { @@ -25,7 +23,6 @@ impl<'a> SeekOp<'a> { succ: vec![], pos: 0, found: false, - start: None, } } @@ -72,37 +69,9 @@ impl<'a> TreeQuery<'a> for SeekOp<'a> { } } Key::Map(_) => { - if let Some(start) = self.start { - if self.pos + child.len() >= start { - // skip empty nodes - if child.index.visible_len(ListEncoding::List) == 0 { - let child_contains_key = - child.elements.iter().any(|e| ops[*e].key == self.op.key); - if !child_contains_key { - // If we are in a node which has no visible ops, but none of the - // elements of the node match the key of the op, then we must have - // finished processing and so we can just return. - // See https://github.com/automerge/automerge-rs/pull/480 - QueryResult::Finish - } else { - // Otherwise, we need to proceed to the next node - self.pos += child.len(); - QueryResult::Next - } - } else { - QueryResult::Descend - } - } else { - self.pos += child.len(); - QueryResult::Next - } - } else { - // in the root node find the first op position for the key - let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); - self.start = Some(start); - self.pos = start; - QueryResult::Skip(start) - } + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); + self.pos = start; + QueryResult::Skip(start) } } } diff --git a/rust/automerge/src/query/seek_op_with_patch.rs b/rust/automerge/src/query/seek_op_with_patch.rs index 7cacb032..cd30f5bb 100644 --- a/rust/automerge/src/query/seek_op_with_patch.rs +++ b/rust/automerge/src/query/seek_op_with_patch.rs @@ -16,8 +16,6 @@ pub(crate) struct SeekOpWithPatch<'a> { last_seen: Option, pub(crate) values: Vec<&'a Op>, pub(crate) had_value_before: bool, - /// The found start position of the key if there is one yet (for map objects). - start: Option, } impl<'a> SeekOpWithPatch<'a> { @@ -33,7 +31,6 @@ impl<'a> SeekOpWithPatch<'a> { last_seen: None, values: vec![], had_value_before: false, - start: None, } } @@ -132,38 +129,9 @@ impl<'a> TreeQuery<'a> for SeekOpWithPatch<'a> { // Updating a map: operations appear in sorted order by key Key::Map(_) => { - if let Some(start) = self.start { - if self.pos + child.len() >= start { - // skip empty nodes - if child.index.visible_len(self.encoding) == 0 { - let child_contains_key = - child.elements.iter().any(|e| ops[*e].key == self.op.key); - if !child_contains_key { - // If we are in a node which has no visible ops, but none of the - // elements of the node match the key of the op, then we must have - // finished processing and so we can just return. - // See https://github.com/automerge/automerge-rs/pull/480 - QueryResult::Finish - } else { - self.pos += child.len(); - QueryResult::Next - } - } else { - QueryResult::Descend - } - } else { - self.pos += child.len(); - QueryResult::Next - } - } else { - // in the root node find the first op position for the key - // Search for the place where we need to insert the new operation. First find the - // first op with a key >= the key we're updating - let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); - self.start = Some(start); - self.pos = start; - QueryResult::Skip(start) - } + let start = binary_search_by(child, ops, |op| m.key_cmp(&op.key, &self.op.key)); + self.pos = start; + QueryResult::Skip(start) } } } From 9271b20cf5442369f21dec43ebeed097e8092da8 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 14 Feb 2023 16:24:25 +0000 Subject: [PATCH 62/72] Correct logic when skip = B and fix formatting A few tests were failing which exposed the fact that if skip is `B` (the out factor of the OpTree) then we set `skip = None` and this causes us to attempt to return `Skip` in a non root node. I ported the failing test from JS to Rust and fixed the problem. I also fixed the formatting issues. --- javascript/test/basic_test.ts | 10 +++---- rust/automerge-wasm/test/test.ts | 2 +- rust/automerge/src/op_tree/node.rs | 4 +-- rust/automerge/src/sync.rs | 45 ++++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/javascript/test/basic_test.ts b/javascript/test/basic_test.ts index 0e30dc7c..e34484c4 100644 --- a/javascript/test/basic_test.ts +++ b/javascript/test/basic_test.ts @@ -62,15 +62,15 @@ describe("Automerge", () => { let doc = Automerge.init() doc = Automerge.change(doc, doc => { - doc['k1'] = true; - }); + doc["k1"] = true + }) for (let idx = 1; idx <= 200; idx++) { doc = Automerge.change(doc, doc => { - delete doc['k' + idx]; - doc['k' + (idx + 1)] = true; + delete doc["k" + idx] + doc["k" + (idx + 1)] = true assert(Object.keys(doc).length == 1) - }); + }) } }) diff --git a/rust/automerge-wasm/test/test.ts b/rust/automerge-wasm/test/test.ts index 56aaae74..bb4f71e3 100644 --- a/rust/automerge-wasm/test/test.ts +++ b/rust/automerge-wasm/test/test.ts @@ -1447,7 +1447,7 @@ describe('Automerge', () => { sync(n1, n2, s1, s2) // Having n3's last change concurrent to the last sync heads forces us into the slower code path - const change3 = n2.getLastLocalChange() + const change3 = n3.getLastLocalChange() if (change3 === null) throw new RangeError("no local change") n2.applyChanges([change3]) n1.put("_root", "n1", "final"); n1.commit("", 0) diff --git a/rust/automerge/src/op_tree/node.rs b/rust/automerge/src/op_tree/node.rs index 8f2de662..ed1b7646 100644 --- a/rust/automerge/src/op_tree/node.rs +++ b/rust/automerge/src/op_tree/node.rs @@ -69,7 +69,7 @@ impl OpTreeNode { skip = Some(n - child.len() - 1); } Some(n) if n == child.len() => { - skip = None; + skip = Some(0); // important to not be None so we never call query_node again if self.search_element(query, m, ops, child_index) { return true; } @@ -78,7 +78,7 @@ impl OpTreeNode { if child.search(query, m, ops, Some(n)) { return true; } - skip = Some(0); // important to not be None so we never call query_node again + skip = Some(0); // important to not be None so we never call query_node again if self.search_element(query, m, ops, child_index) { return true; } diff --git a/rust/automerge/src/sync.rs b/rust/automerge/src/sync.rs index d3b6b3fa..d6dc2580 100644 --- a/rust/automerge/src/sync.rs +++ b/rust/automerge/src/sync.rs @@ -887,6 +887,51 @@ mod tests { assert_eq!(doc2.get_heads(), all_heads); } + #[test] + fn should_handle_lots_of_branching_and_merging() { + let mut doc1 = crate::AutoCommit::new().with_actor(ActorId::try_from("01234567").unwrap()); + let mut doc2 = crate::AutoCommit::new().with_actor(ActorId::try_from("89abcdef").unwrap()); + let mut doc3 = crate::AutoCommit::new().with_actor(ActorId::try_from("fedcba98").unwrap()); + let mut s1 = State::new(); + let mut s2 = State::new(); + + doc1.put(crate::ROOT, "x", 0).unwrap(); + let change1 = doc1.get_last_local_change().unwrap().clone(); + + doc2.apply_changes([change1.clone()]).unwrap(); + doc3.apply_changes([change1]).unwrap(); + + doc3.put(crate::ROOT, "x", 1).unwrap(); + + //// - n1c1 <------ n1c2 <------ n1c3 <-- etc. <-- n1c20 <------ n1c21 + //// / \/ \/ \/ + //// / /\ /\ /\ + //// c0 <---- n2c1 <------ n2c2 <------ n2c3 <-- etc. <-- n2c20 <------ n2c21 + //// \ / + //// ---------------------------------------------- n3c1 <----- + for i in 1..20 { + doc1.put(crate::ROOT, "n1", i).unwrap(); + doc2.put(crate::ROOT, "n2", i).unwrap(); + let change1 = doc1.get_last_local_change().unwrap().clone(); + let change2 = doc2.get_last_local_change().unwrap().clone(); + doc1.apply_changes([change2.clone()]).unwrap(); + doc2.apply_changes([change1]).unwrap(); + } + + sync(&mut doc1, &mut doc2, &mut s1, &mut s2); + + //// Having n3's last change concurrent to the last sync heads forces us into the slower code path + let change3 = doc3.get_last_local_change().unwrap().clone(); + doc2.apply_changes([change3]).unwrap(); + + doc1.put(crate::ROOT, "n1", "final").unwrap(); + doc2.put(crate::ROOT, "n1", "final").unwrap(); + + sync(&mut doc1, &mut doc2, &mut s1, &mut s2); + + assert_eq!(doc1.get_heads(), doc2.get_heads()); + } + fn sync( a: &mut crate::AutoCommit, b: &mut crate::AutoCommit, From c92d042c87eb724e4878a4df0f8d31177c410c01 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 14 Feb 2023 17:25:25 +0000 Subject: [PATCH 63/72] @automerge/automerge-wasm@0.1.24 and @automerge/automerge@2.0.2-alpha.2 --- javascript/package.json | 4 ++-- rust/automerge-wasm/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/package.json b/javascript/package.json index 8712920c..e39f398a 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.2-alpha.1", + "version": "2.0.2-alpha.2", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", @@ -47,7 +47,7 @@ "typescript": "^4.9.4" }, "dependencies": { - "@automerge/automerge-wasm": "0.1.24", + "@automerge/automerge-wasm": "0.1.25", "uuid": "^9.0.0" } } diff --git a/rust/automerge-wasm/package.json b/rust/automerge-wasm/package.json index 57354ce1..80b39fd4 100644 --- a/rust/automerge-wasm/package.json +++ b/rust/automerge-wasm/package.json @@ -8,7 +8,7 @@ "description": "wasm-bindgen bindings to the automerge rust implementation", "homepage": "https://github.com/automerge/automerge-rs/tree/main/automerge-wasm", "repository": "github:automerge/automerge-rs", - "version": "0.1.24", + "version": "0.1.25", "license": "MIT", "files": [ "README.md", From 1425af43cdcd61295e0e65bf47fbce0076353682 Mon Sep 17 00:00:00 2001 From: Alex Good Date: Tue, 14 Feb 2023 19:47:53 +0000 Subject: [PATCH 64/72] @automerge/automerge@2.0.2 --- javascript/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/package.json b/javascript/package.json index e39f398a..79309907 100644 --- a/javascript/package.json +++ b/javascript/package.json @@ -4,7 +4,7 @@ "Orion Henry ", "Martin Kleppmann" ], - "version": "2.0.2-alpha.2", + "version": "2.0.2", "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", "repository": "github:automerge/automerge-rs", From 407faefa6e838abe0bd8526716c98eab592aa123 Mon Sep 17 00:00:00 2001 From: Philip Schatz <253202+philschatz@users.noreply.github.com> Date: Wed, 15 Feb 2023 03:23:02 -0600 Subject: [PATCH 65/72] A few setup fixes (#529) * include deno in dependencies * install javascript dependencies * remove redundant operation --- README.md | 3 +++ flake.nix | 1 + rust/automerge/src/automerge.rs | 1 - 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 94e1bbb8..76d48ddd 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,9 @@ brew install cmake node cmocka # install yarn npm install --global yarn +# install javascript dependencies +yarn --cwd ./javascript + # install rust dependencies cargo install wasm-bindgen-cli wasm-opt cargo-deny diff --git a/flake.nix b/flake.nix index 4f9ba1fe..37835738 100644 --- a/flake.nix +++ b/flake.nix @@ -54,6 +54,7 @@ nodejs yarn + deno # c deps cmake diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 128d4418..09c3cc9d 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -897,7 +897,6 @@ impl Automerge { .add_change(&change, actor_index) .expect("Change's deps should already be in the document"); - self.history_index.insert(change.hash(), history_index); self.history.push(change); history_index From 8de2fa9bd49e1bf04f2a864b3a57f911419a86ba Mon Sep 17 00:00:00 2001 From: Jason Kankiewicz Date: Sat, 25 Feb 2023 10:47:00 -0800 Subject: [PATCH 66/72] C API 2 (#530) The AMvalue union, AMlistItem struct, AMmapItem struct, and AMobjItem struct are gone, replaced by the AMitem struct. The AMchangeHashes, AMchanges, AMlistItems, AMmapItems, AMobjItems, AMstrs, and AMsyncHaves iterators are gone, replaced by the AMitems iterator. The AMitem struct is opaque, getting and setting values is now achieved exclusively through function calls. The AMitemsNext(), AMitemsPrev(), and AMresultItem() functions return a pointer to an AMitem struct so you ultimately get the same thing whether you're iterating over a sequence or calling AMmapGet() or AMlistGet(). Calling AMitemResult() on an AMitem struct will produce a new AMresult struct referencing its storage so now the AMresult struct for an iterator can be subsequently freed without affecting the AMitem structs that were filtered out of it. The storage for a set of AMitem structs can be recombined into a single AMresult struct by passing pointers to their corresponding AMresult structs to AMresultCat(). For C/C++ programmers, I've added AMstrCmp(), AMstrdup(), AM{idxType,objType,status,valType}ToString() and AM{idxType,objType,status,valType}FromString(). It's also now possible to pass arbitrary parameters through AMstack{Item,Items,Result}() to a callback function. --- rust/automerge-c/.clang-format | 250 +++ rust/automerge-c/.gitignore | 8 +- rust/automerge-c/CMakeLists.txt | 344 ++- rust/automerge-c/Cargo.toml | 4 +- rust/automerge-c/README.md | 197 +- rust/automerge-c/cbindgen.toml | 20 +- rust/automerge-c/cmake/Cargo.toml.in | 22 + rust/automerge-c/cmake/cbindgen.toml.in | 48 + rust/automerge-c/cmake/config.h.in | 31 +- .../cmake/enum-string-functions-gen.cmake | 183 ++ ...replace.cmake => file-regex-replace.cmake} | 4 +- .../{file_touch.cmake => file-touch.cmake} | 4 +- rust/automerge-c/docs/CMakeLists.txt | 35 + rust/automerge-c/{ => docs}/img/brandmark.png | Bin rust/automerge-c/examples/CMakeLists.txt | 20 +- rust/automerge-c/examples/README.md | 2 +- rust/automerge-c/examples/quickstart.c | 195 +- .../include/automerge-c/utils/result.h | 30 + .../include/automerge-c/utils/stack.h | 130 ++ .../automerge-c/utils/stack_callback_data.h | 53 + .../include/automerge-c/utils/string.h | 29 + rust/automerge-c/src/CMakeLists.txt | 250 --- rust/automerge-c/src/actor_id.rs | 84 +- rust/automerge-c/src/byte_span.rs | 146 +- rust/automerge-c/src/change.rs | 148 +- rust/automerge-c/src/change_hashes.rs | 400 ---- rust/automerge-c/src/changes.rs | 399 ---- rust/automerge-c/src/doc.rs | 607 +++-- rust/automerge-c/src/doc/list.rs | 555 ++--- rust/automerge-c/src/doc/list/item.rs | 97 - rust/automerge-c/src/doc/list/items.rs | 348 --- rust/automerge-c/src/doc/map.rs | 324 +-- rust/automerge-c/src/doc/map/item.rs | 98 - rust/automerge-c/src/doc/map/items.rs | 340 --- rust/automerge-c/src/doc/utils.rs | 27 +- rust/automerge-c/src/index.rs | 84 + rust/automerge-c/src/item.rs | 1963 ++++++++++++++++ rust/automerge-c/src/items.rs | 401 ++++ rust/automerge-c/src/lib.rs | 9 +- rust/automerge-c/src/obj.rs | 86 +- rust/automerge-c/src/obj/item.rs | 73 - rust/automerge-c/src/obj/items.rs | 341 --- rust/automerge-c/src/result.rs | 1039 ++++----- rust/automerge-c/src/result_stack.rs | 156 -- rust/automerge-c/src/strs.rs | 359 --- rust/automerge-c/src/sync.rs | 2 +- rust/automerge-c/src/sync/have.rs | 25 +- rust/automerge-c/src/sync/haves.rs | 378 ---- rust/automerge-c/src/sync/message.rs | 114 +- rust/automerge-c/src/sync/state.rs | 149 +- rust/automerge-c/src/utils/result.c | 33 + rust/automerge-c/src/utils/stack.c | 106 + .../src/utils/stack_callback_data.c | 9 + rust/automerge-c/src/utils/string.c | 46 + rust/automerge-c/test/CMakeLists.txt | 44 +- rust/automerge-c/test/actor_id_tests.c | 145 +- rust/automerge-c/test/base_state.c | 17 + rust/automerge-c/test/base_state.h | 39 + rust/automerge-c/test/byte_span_tests.c | 118 + rust/automerge-c/test/cmocka_utils.c | 88 + rust/automerge-c/test/cmocka_utils.h | 42 +- rust/automerge-c/test/doc_state.c | 27 + rust/automerge-c/test/doc_state.h | 17 + rust/automerge-c/test/doc_tests.c | 351 ++- rust/automerge-c/test/enum_string_tests.c | 148 ++ rust/automerge-c/test/group_state.c | 27 - rust/automerge-c/test/group_state.h | 16 - rust/automerge-c/test/item_tests.c | 94 + rust/automerge-c/test/list_tests.c | 720 +++--- rust/automerge-c/test/macro_utils.c | 47 +- rust/automerge-c/test/macro_utils.h | 29 +- rust/automerge-c/test/main.c | 17 +- rust/automerge-c/test/map_tests.c | 1754 ++++++++------- .../test/ported_wasm/basic_tests.c | 1986 ++++++++--------- rust/automerge-c/test/ported_wasm/suite.c | 7 +- .../automerge-c/test/ported_wasm/sync_tests.c | 1276 +++++------ rust/automerge-c/test/stack_utils.c | 31 - rust/automerge-c/test/stack_utils.h | 38 - rust/automerge-c/test/str_utils.c | 2 +- rust/automerge-c/test/str_utils.h | 19 +- rust/automerge/src/error.rs | 5 + scripts/ci/cmake-build | 2 +- 82 files changed, 9304 insertions(+), 8607 deletions(-) create mode 100644 rust/automerge-c/.clang-format create mode 100644 rust/automerge-c/cmake/Cargo.toml.in create mode 100644 rust/automerge-c/cmake/cbindgen.toml.in create mode 100644 rust/automerge-c/cmake/enum-string-functions-gen.cmake rename rust/automerge-c/cmake/{file_regex_replace.cmake => file-regex-replace.cmake} (87%) rename rust/automerge-c/cmake/{file_touch.cmake => file-touch.cmake} (82%) create mode 100644 rust/automerge-c/docs/CMakeLists.txt rename rust/automerge-c/{ => docs}/img/brandmark.png (100%) create mode 100644 rust/automerge-c/include/automerge-c/utils/result.h create mode 100644 rust/automerge-c/include/automerge-c/utils/stack.h create mode 100644 rust/automerge-c/include/automerge-c/utils/stack_callback_data.h create mode 100644 rust/automerge-c/include/automerge-c/utils/string.h delete mode 100644 rust/automerge-c/src/CMakeLists.txt delete mode 100644 rust/automerge-c/src/change_hashes.rs delete mode 100644 rust/automerge-c/src/changes.rs delete mode 100644 rust/automerge-c/src/doc/list/item.rs delete mode 100644 rust/automerge-c/src/doc/list/items.rs delete mode 100644 rust/automerge-c/src/doc/map/item.rs delete mode 100644 rust/automerge-c/src/doc/map/items.rs create mode 100644 rust/automerge-c/src/index.rs create mode 100644 rust/automerge-c/src/item.rs create mode 100644 rust/automerge-c/src/items.rs delete mode 100644 rust/automerge-c/src/obj/item.rs delete mode 100644 rust/automerge-c/src/obj/items.rs delete mode 100644 rust/automerge-c/src/result_stack.rs delete mode 100644 rust/automerge-c/src/strs.rs delete mode 100644 rust/automerge-c/src/sync/haves.rs create mode 100644 rust/automerge-c/src/utils/result.c create mode 100644 rust/automerge-c/src/utils/stack.c create mode 100644 rust/automerge-c/src/utils/stack_callback_data.c create mode 100644 rust/automerge-c/src/utils/string.c create mode 100644 rust/automerge-c/test/base_state.c create mode 100644 rust/automerge-c/test/base_state.h create mode 100644 rust/automerge-c/test/byte_span_tests.c create mode 100644 rust/automerge-c/test/cmocka_utils.c create mode 100644 rust/automerge-c/test/doc_state.c create mode 100644 rust/automerge-c/test/doc_state.h create mode 100644 rust/automerge-c/test/enum_string_tests.c delete mode 100644 rust/automerge-c/test/group_state.c delete mode 100644 rust/automerge-c/test/group_state.h create mode 100644 rust/automerge-c/test/item_tests.c delete mode 100644 rust/automerge-c/test/stack_utils.c delete mode 100644 rust/automerge-c/test/stack_utils.h diff --git a/rust/automerge-c/.clang-format b/rust/automerge-c/.clang-format new file mode 100644 index 00000000..dbf16c21 --- /dev/null +++ b/rust/automerge-c/.clang-format @@ -0,0 +1,250 @@ +--- +Language: Cpp +# BasedOnStyle: Chromium +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignArrayOfStructures: None +AlignConsecutiveAssignments: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: true +AlignConsecutiveBitFields: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: false +AlignConsecutiveDeclarations: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: false +AlignConsecutiveMacros: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + PadOperators: false +AlignEscapedNewlines: Left +AlignOperands: Align +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortEnumsOnASingleLine: true +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Inline +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +AttributeMacros: + - __capability +BinPackArguments: true +BinPackParameters: false +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeConceptDeclarations: Always +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +QualifierAlignment: Leave +CompactNamespaces: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +PackConstructorInitializers: NextLine +BasedOnStyle: '' +ConstructorInitializerAllOnOneLineOrOnePerLine: false +AllowAllConstructorInitializersOnNextLine: true +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IfMacros: + - KJ_IF_MAYBE +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*\.h>' + Priority: 1 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '.*' + Priority: 3 + SortPriority: 0 + CaseSensitive: false +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentAccessModifiers: false +IndentCaseLabels: true +IndentCaseBlocks: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentExternBlock: AfterExternBlock +IndentRequiresClause: true +IndentWidth: 4 +IndentWrappedFunctionNames: false +InsertBraces: false +InsertTrailingCommas: None +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +LambdaBodyIndentation: Signature +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakOpenParenthesis: 0 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PenaltyIndentedWhitespace: 0 +PointerAlignment: Left +PPIndentWidth: -1 +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' + BasedOnStyle: google + - Language: TextProto + Delimiters: + - pb + - PB + - proto + - PROTO + EnclosingFunctions: + - EqualsProto + - EquivToProto + - PARSE_PARTIAL_TEXT_PROTO + - PARSE_TEST_PROTO + - PARSE_TEXT_PROTO + - ParseTextOrDie + - ParseTextProtoOrDie + - ParseTestProto + - ParsePartialTestProto + CanonicalDelimiter: pb + BasedOnStyle: google +ReferenceAlignment: Pointer +ReflowComments: true +RemoveBracesLLVM: false +RequiresClausePosition: OwnLine +SeparateDefinitionBlocks: Leave +ShortNamespaceLines: 1 +SortIncludes: CaseSensitive +SortJavaStaticImport: Before +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeParensOptions: + AfterControlStatements: true + AfterForeachMacros: true + AfterFunctionDefinitionName: false + AfterFunctionDeclarationName: false + AfterIfMacros: true + AfterOverloadedOperator: false + AfterRequiresInClause: false + AfterRequiresInExpression: false + BeforeNonEmptyParentheses: false +SpaceAroundPointerQualifiers: Default +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: Never +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +BitFieldColonSpacing: Both +Standard: Auto +StatementAttributeLikeMacros: + - Q_EMIT +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseCRLF: false +UseTab: Never +WhitespaceSensitiveMacros: + - STRINGIZE + - PP_STRINGIZE + - BOOST_PP_STRINGIZE + - NS_SWIFT_NAME + - CF_SWIFT_NAME +... + diff --git a/rust/automerge-c/.gitignore b/rust/automerge-c/.gitignore index f04de582..14d74973 100644 --- a/rust/automerge-c/.gitignore +++ b/rust/automerge-c/.gitignore @@ -1,10 +1,10 @@ automerge automerge.h automerge.o -*.cmake +build/ +CMakeCache.txt CMakeFiles +CMakePresets.json Makefile DartConfiguration.tcl -config.h -CMakeCache.txt -Cargo +out/ diff --git a/rust/automerge-c/CMakeLists.txt b/rust/automerge-c/CMakeLists.txt index 1b68669a..056d111b 100644 --- a/rust/automerge-c/CMakeLists.txt +++ b/rust/automerge-c/CMakeLists.txt @@ -1,97 +1,279 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.23 FATAL_ERROR) -set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") +project(automerge-c VERSION 0.1.0 + LANGUAGES C + DESCRIPTION "C bindings for the Automerge Rust library.") -# Parse the library name, project name and project version out of Cargo's TOML file. -set(CARGO_LIB_SECTION OFF) +set(LIBRARY_NAME "automerge") -set(LIBRARY_NAME "") - -set(CARGO_PKG_SECTION OFF) - -set(CARGO_PKG_NAME "") - -set(CARGO_PKG_VERSION "") - -file(READ Cargo.toml TOML_STRING) - -string(REPLACE ";" "\\\\;" TOML_STRING "${TOML_STRING}") - -string(REPLACE "\n" ";" TOML_LINES "${TOML_STRING}") - -foreach(TOML_LINE IN ITEMS ${TOML_LINES}) - string(REGEX MATCH "^\\[(lib|package)\\]$" _ ${TOML_LINE}) - - if(CMAKE_MATCH_1 STREQUAL "lib") - set(CARGO_LIB_SECTION ON) - - set(CARGO_PKG_SECTION OFF) - elseif(CMAKE_MATCH_1 STREQUAL "package") - set(CARGO_LIB_SECTION OFF) - - set(CARGO_PKG_SECTION ON) - endif() - - string(REGEX MATCH "^name += +\"([^\"]+)\"$" _ ${TOML_LINE}) - - if(CMAKE_MATCH_1 AND (CARGO_LIB_SECTION AND NOT CARGO_PKG_SECTION)) - set(LIBRARY_NAME "${CMAKE_MATCH_1}") - elseif(CMAKE_MATCH_1 AND (NOT CARGO_LIB_SECTION AND CARGO_PKG_SECTION)) - set(CARGO_PKG_NAME "${CMAKE_MATCH_1}") - endif() - - string(REGEX MATCH "^version += +\"([^\"]+)\"$" _ ${TOML_LINE}) - - if(CMAKE_MATCH_1 AND CARGO_PKG_SECTION) - set(CARGO_PKG_VERSION "${CMAKE_MATCH_1}") - endif() - - if(LIBRARY_NAME AND (CARGO_PKG_NAME AND CARGO_PKG_VERSION)) - break() - endif() -endforeach() - -project(${CARGO_PKG_NAME} VERSION 0.0.1 LANGUAGES C DESCRIPTION "C bindings for the Automerge Rust backend.") - -include(CTest) +set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) option(BUILD_SHARED_LIBS "Enable the choice of a shared or static library.") +include(CTest) + include(CMakePackageConfigHelpers) include(GNUInstallDirs) +set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") + string(MAKE_C_IDENTIFIER ${PROJECT_NAME} SYMBOL_PREFIX) string(TOUPPER ${SYMBOL_PREFIX} SYMBOL_PREFIX) -set(CARGO_TARGET_DIR "${CMAKE_CURRENT_BINARY_DIR}/Cargo/target") +set(CARGO_TARGET_DIR "${CMAKE_BINARY_DIR}/Cargo/target") -set(CBINDGEN_INCLUDEDIR "${CARGO_TARGET_DIR}/${CMAKE_INSTALL_INCLUDEDIR}") +set(CBINDGEN_INCLUDEDIR "${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_INCLUDEDIR}") set(CBINDGEN_TARGET_DIR "${CBINDGEN_INCLUDEDIR}/${PROJECT_NAME}") -add_subdirectory(src) +find_program ( + CARGO_CMD + "cargo" + PATHS "$ENV{CARGO_HOME}/bin" + DOC "The Cargo command" +) -# Generate and install the configuration header. +if(NOT CARGO_CMD) + message(FATAL_ERROR "Cargo (Rust package manager) not found! " + "Please install it and/or set the CARGO_HOME " + "environment variable to its path.") +endif() + +string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) + +if(BUILD_TYPE_LOWER STREQUAL debug) + set(CARGO_BUILD_TYPE "debug") + + set(CARGO_FLAG "") +else() + set(CARGO_BUILD_TYPE "release") + + set(CARGO_FLAG "--release") +endif() + +set(CARGO_FEATURES "") + +set(CARGO_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_BUILD_TYPE}") + +set(BINDINGS_NAME "${LIBRARY_NAME}_core") + +configure_file( + ${CMAKE_MODULE_PATH}/Cargo.toml.in + ${CMAKE_SOURCE_DIR}/Cargo.toml + @ONLY + NEWLINE_STYLE LF +) + +set(INCLUDE_GUARD_PREFIX "${SYMBOL_PREFIX}") + +configure_file( + ${CMAKE_MODULE_PATH}/cbindgen.toml.in + ${CMAKE_SOURCE_DIR}/cbindgen.toml + @ONLY + NEWLINE_STYLE LF +) + +set(CARGO_OUTPUT + ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h + ${CARGO_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${BINDINGS_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX} +) + +# \note cbindgen's naming behavior isn't fully configurable and it ignores +# `const fn` calls (https://github.com/eqrion/cbindgen/issues/252). +add_custom_command( + OUTPUT + ${CARGO_OUTPUT} + COMMAND + # \note cbindgen won't regenerate its output header file after it's been removed but it will after its + # configuration file has been updated. + ${CMAKE_COMMAND} -DCONDITION=NOT_EXISTS -P ${CMAKE_SOURCE_DIR}/cmake/file-touch.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CMAKE_SOURCE_DIR}/cbindgen.toml + COMMAND + ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} + COMMAND + # Compensate for cbindgen's translation of consecutive uppercase letters to "ScreamingSnakeCase". + ${CMAKE_COMMAND} -DMATCH_REGEX=A_M\([^_]+\)_ -DREPLACE_EXPR=AM_\\1_ -P ${CMAKE_SOURCE_DIR}/cmake/file-regex-replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h + COMMAND + # Compensate for cbindgen ignoring `std:mem::size_of()` calls. + ${CMAKE_COMMAND} -DMATCH_REGEX=USIZE_ -DREPLACE_EXPR=\+${CMAKE_SIZEOF_VOID_P} -P ${CMAKE_SOURCE_DIR}/cmake/file-regex-replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h + MAIN_DEPENDENCY + src/lib.rs + DEPENDS + src/actor_id.rs + src/byte_span.rs + src/change.rs + src/doc.rs + src/doc/list.rs + src/doc/map.rs + src/doc/utils.rs + src/index.rs + src/item.rs + src/items.rs + src/obj.rs + src/result.rs + src/sync.rs + src/sync/have.rs + src/sync/message.rs + src/sync/state.rs + ${CMAKE_SOURCE_DIR}/build.rs + ${CMAKE_MODULE_PATH}/Cargo.toml.in + ${CMAKE_MODULE_PATH}/cbindgen.toml.in + WORKING_DIRECTORY + ${CMAKE_SOURCE_DIR} + COMMENT + "Producing the bindings' artifacts with Cargo..." + VERBATIM +) + +add_custom_target(${BINDINGS_NAME}_artifacts ALL + DEPENDS ${CARGO_OUTPUT} +) + +add_library(${BINDINGS_NAME} STATIC IMPORTED GLOBAL) + +target_include_directories(${BINDINGS_NAME} INTERFACE "${CBINDGEN_INCLUDEDIR}") + +set_target_properties( + ${BINDINGS_NAME} + PROPERTIES + # \note Cargo writes a debug build into a nested directory instead of + # decorating its name. + DEBUG_POSTFIX "" + DEFINE_SYMBOL "" + IMPORTED_IMPLIB "" + IMPORTED_LOCATION "${CARGO_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${BINDINGS_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" + IMPORTED_NO_SONAME "TRUE" + IMPORTED_SONAME "" + LINKER_LANGUAGE C + PUBLIC_HEADER "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" + SOVERSION "${PROJECT_VERSION_MAJOR}" + VERSION "${PROJECT_VERSION}" + # \note Cargo exports all of the symbols automatically. + WINDOWS_EXPORT_ALL_SYMBOLS "TRUE" +) + +target_compile_definitions(${BINDINGS_NAME} INTERFACE $) + +set(UTILS_SUBDIR "utils") + +add_custom_command( + OUTPUT + ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h + ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c + COMMAND + ${CMAKE_COMMAND} -DPROJECT_NAME=${PROJECT_NAME} -DLIBRARY_NAME=${LIBRARY_NAME} -DSUBDIR=${UTILS_SUBDIR} -P ${CMAKE_SOURCE_DIR}/cmake/enum-string-functions-gen.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c + MAIN_DEPENDENCY + ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h + DEPENDS + ${CMAKE_SOURCE_DIR}/cmake/enum-string-functions-gen.cmake + WORKING_DIRECTORY + ${CMAKE_SOURCE_DIR} + COMMENT + "Generating the enum string functions with CMake..." + VERBATIM +) + +add_custom_target(${LIBRARY_NAME}_utilities + DEPENDS ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h + ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c +) + +add_library(${LIBRARY_NAME}) + +target_compile_features(${LIBRARY_NAME} PRIVATE c_std_99) + +set(CMAKE_THREAD_PREFER_PTHREAD TRUE) + +set(THREADS_PREFER_PTHREAD_FLAG TRUE) + +find_package(Threads REQUIRED) + +set(LIBRARY_DEPENDENCIES Threads::Threads ${CMAKE_DL_LIBS}) + +if(WIN32) + list(APPEND LIBRARY_DEPENDENCIES Bcrypt userenv ws2_32) +else() + list(APPEND LIBRARY_DEPENDENCIES m) +endif() + +target_link_libraries(${LIBRARY_NAME} + PUBLIC ${BINDINGS_NAME} + ${LIBRARY_DEPENDENCIES} +) + +# \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't +# contain a non-existent path so its build-time include directory +# must be specified for all of its dependent targets instead. +target_include_directories(${LIBRARY_NAME} + PUBLIC "$" + "$" +) + +add_dependencies(${LIBRARY_NAME} ${BINDINGS_NAME}_artifacts) + +# Generate the configuration header. math(EXPR INTEGER_PROJECT_VERSION_MAJOR "${PROJECT_VERSION_MAJOR} * 100000") math(EXPR INTEGER_PROJECT_VERSION_MINOR "${PROJECT_VERSION_MINOR} * 100") math(EXPR INTEGER_PROJECT_VERSION_PATCH "${PROJECT_VERSION_PATCH}") -math(EXPR INTEGER_PROJECT_VERSION "${INTEGER_PROJECT_VERSION_MAJOR} + ${INTEGER_PROJECT_VERSION_MINOR} + ${INTEGER_PROJECT_VERSION_PATCH}") +math(EXPR INTEGER_PROJECT_VERSION "${INTEGER_PROJECT_VERSION_MAJOR} + \ + ${INTEGER_PROJECT_VERSION_MINOR} + \ + ${INTEGER_PROJECT_VERSION_PATCH}") configure_file( ${CMAKE_MODULE_PATH}/config.h.in - config.h + ${CBINDGEN_TARGET_DIR}/config.h @ONLY NEWLINE_STYLE LF ) +target_sources(${LIBRARY_NAME} + PRIVATE + src/${UTILS_SUBDIR}/result.c + src/${UTILS_SUBDIR}/stack_callback_data.c + src/${UTILS_SUBDIR}/stack.c + src/${UTILS_SUBDIR}/string.c + ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c + PUBLIC + FILE_SET api TYPE HEADERS + BASE_DIRS + ${CBINDGEN_INCLUDEDIR} + ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR} + FILES + ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h + ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h + ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/result.h + ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack_callback_data.h + ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack.h + ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/string.h + INTERFACE + FILE_SET config TYPE HEADERS + BASE_DIRS + ${CBINDGEN_INCLUDEDIR} + FILES + ${CBINDGEN_TARGET_DIR}/config.h +) + install( - FILES ${CMAKE_BINARY_DIR}/config.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME} + TARGETS ${LIBRARY_NAME} + EXPORT ${PROJECT_NAME}-config + FILE_SET api + FILE_SET config +) + +# \note Install the Cargo-built core bindings to enable direct linkage. +install( + FILES $ + DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + +install(EXPORT ${PROJECT_NAME}-config + FILE ${PROJECT_NAME}-config.cmake + NAMESPACE "${PROJECT_NAME}::" + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${LIB} ) if(BUILD_TESTING) @@ -100,42 +282,6 @@ if(BUILD_TESTING) enable_testing() endif() +add_subdirectory(docs) + add_subdirectory(examples EXCLUDE_FROM_ALL) - -# Generate and install .cmake files -set(PROJECT_CONFIG_NAME "${PROJECT_NAME}-config") - -set(PROJECT_CONFIG_VERSION_NAME "${PROJECT_CONFIG_NAME}-version") - -write_basic_package_version_file( - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_VERSION_NAME}.cmake - VERSION ${PROJECT_VERSION} - COMPATIBILITY ExactVersion -) - -# The namespace label starts with the title-cased library name. -string(SUBSTRING ${LIBRARY_NAME} 0 1 NS_FIRST) - -string(SUBSTRING ${LIBRARY_NAME} 1 -1 NS_REST) - -string(TOUPPER ${NS_FIRST} NS_FIRST) - -string(TOLOWER ${NS_REST} NS_REST) - -string(CONCAT NAMESPACE ${NS_FIRST} ${NS_REST} "::") - -# \note CMake doesn't automate the exporting of an imported library's targets -# so the package configuration script must do it. -configure_package_config_file( - ${CMAKE_MODULE_PATH}/${PROJECT_CONFIG_NAME}.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_NAME}.cmake - INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} -) - -install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_NAME}.cmake - ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_VERSION_NAME}.cmake - DESTINATION - ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} -) diff --git a/rust/automerge-c/Cargo.toml b/rust/automerge-c/Cargo.toml index d039e460..95a3a29c 100644 --- a/rust/automerge-c/Cargo.toml +++ b/rust/automerge-c/Cargo.toml @@ -7,8 +7,8 @@ license = "MIT" rust-version = "1.57.0" [lib] -name = "automerge" -crate-type = ["cdylib", "staticlib"] +name = "automerge_core" +crate-type = ["staticlib"] bench = false doc = false diff --git a/rust/automerge-c/README.md b/rust/automerge-c/README.md index a9f097e2..1fbca3df 100644 --- a/rust/automerge-c/README.md +++ b/rust/automerge-c/README.md @@ -1,22 +1,29 @@ -automerge-c exposes an API to C that can either be used directly or as a basis -for other language bindings that have good support for calling into C functions. +# Overview -# Building +automerge-c exposes a C API that can either be used directly or as the basis +for other language bindings that have good support for calling C functions. -See the main README for instructions on getting your environment set up, then -you can use `./scripts/ci/cmake-build Release static` to build automerge-c. +# Installing -It will output two files: +See the main README for instructions on getting your environment set up and then +you can build the automerge-c library and install its constituent files within +a root directory of your choosing (e.g. "/usr/local") like so: +```shell +cmake -E make_directory automerge-c/build +cmake -S automerge-c -B automerge-c/build +cmake --build automerge-c/build +cmake --install automerge-c/build --prefix "/usr/local" +``` +Installation is important because the name, location and structure of CMake's +out-of-source build subdirectory is subject to change based on the platform and +the release version; generated headers like `automerge-c/config.h` and +`automerge-c/utils/enum_string.h` are only sure to be found within their +installed locations. -- ./build/Cargo/target/include/automerge-c/automerge.h -- ./build/Cargo/target/release/libautomerge.a - -To use these in your application you must arrange for your C compiler to find -these files, either by moving them to the right location on your computer, or -by configuring the compiler to reference these directories. - -- `export LDFLAGS=-L./build/Cargo/target/release -lautomerge` -- `export CFLAGS=-I./build/Cargo/target/include` +It's not obvious because they are versioned but the `Cargo.toml` and +`cbindgen.toml` configuration files are also generated in order to ensure that +the project name, project version and library name that they contain match those +specified within the top-level `CMakeLists.txt` file. If you'd like to cross compile the library for different platforms you can do so using [cross](https://github.com/cross-rs/cross). For example: @@ -25,134 +32,176 @@ using [cross](https://github.com/cross-rs/cross). For example: This will output a shared library in the directory `rust/target/aarch64-unknown-linux-gnu/release/`. -You can replace `aarch64-unknown-linux-gnu` with any [cross supported targets](https://github.com/cross-rs/cross#supported-targets). The targets below are known to work, though other targets are expected to work too: +You can replace `aarch64-unknown-linux-gnu` with any +[cross supported targets](https://github.com/cross-rs/cross#supported-targets). +The targets below are known to work, though other targets are expected to work +too: - `x86_64-apple-darwin` - `aarch64-apple-darwin` - `x86_64-unknown-linux-gnu` - `aarch64-unknown-linux-gnu` -As a caveat, the header file is currently 32/64-bit dependant. You can re-use it -for all 64-bit architectures, but you must generate a specific header for 32-bit -targets. +As a caveat, CMake generates the `automerge.h` header file in terms of the +processor architecture of the computer on which it was built so, for example, +don't use a header generated for a 64-bit processor if your target is a 32-bit +processor. # Usage -For full reference, read through `automerge.h`, or to get started quickly look -at the +You can build and view the C API's HTML reference documentation like so: +```shell +cmake -E make_directory automerge-c/build +cmake -S automerge-c -B automerge-c/build +cmake --build automerge-c/build --target automerge_docs +firefox automerge-c/build/src/html/index.html +``` + +To get started quickly, look at the [examples](https://github.com/automerge/automerge-rs/tree/main/rust/automerge-c/examples). -Almost all operations in automerge-c act on an AMdoc struct which you can get -from `AMcreate()` or `AMload()`. Operations on a given doc are not thread safe -so you must use a mutex or similar to avoid calling more than one function with -the same AMdoc pointer concurrently. +Almost all operations in automerge-c act on an Automerge document +(`AMdoc` struct) which is structurally similar to a JSON document. -As with all functions that either allocate memory, or could fail if given -invalid input, `AMcreate()` returns an `AMresult`. The `AMresult` contains the -returned doc (or error message), and must be freed with `AMfree()` after you are -done to avoid leaking memory. +You can get a document by calling either `AMcreate()` or `AMload()`. Operations +on a given document are not thread-safe so you must use a mutex or similar to +avoid calling more than one function on the same one concurrently. +A C API function that could succeed or fail returns a result (`AMresult` struct) +containing a status code (`AMstatus` enum) and either a sequence of at least one +item (`AMitem` struct) or a read-only view onto a UTF-8 error message string +(`AMbyteSpan` struct). +An item contains up to three components: an index within its parent object +(`AMbyteSpan` struct or `size_t`), a unique identifier (`AMobjId` struct) and a +value. +The result of a successful function call that doesn't produce any values will +contain a single item that is void (`AM_VAL_TYPE_VOID`). +A returned result **must** be passed to `AMresultFree()` once the item(s) or +error message it contains is no longer needed in order to avoid a memory leak. ``` -#include #include +#include +#include +#include int main(int argc, char** argv) { AMresult *docResult = AMcreate(NULL); if (AMresultStatus(docResult) != AM_STATUS_OK) { - printf("failed to create doc: %s", AMerrorMessage(docResult).src); + char* const err_msg = AMstrdup(AMresultError(docResult), NULL); + printf("failed to create doc: %s", err_msg); + free(err_msg); goto cleanup; } - AMdoc *doc = AMresultValue(docResult).doc; + AMdoc *doc; + AMitemToDoc(AMresultItem(docResult), &doc); // useful code goes here! cleanup: - AMfree(docResult); + AMresultFree(docResult); } ``` -If you are writing code in C directly, you can use the `AMpush()` helper -function to reduce the boilerplate of error handling and freeing for you (see -examples/quickstart.c). +If you are writing an application in C, the `AMstackItem()`, `AMstackItems()` +and `AMstackResult()` functions enable the lifetimes of anonymous results to be +centrally managed and allow the same validation logic to be reused without +relying upon the `goto` statement (see examples/quickstart.c). If you are wrapping automerge-c in another language, particularly one that has a -garbage collector, you can call `AMfree` within a finalizer to ensure that memory -is reclaimed when it is no longer needed. +garbage collector, you can call the `AMresultFree()` function within a finalizer +to ensure that memory is reclaimed when it is no longer needed. -An AMdoc wraps an automerge document which are very similar to JSON documents. -Automerge documents consist of a mutable root, which is always a map from string -keys to values. Values can have the following types: +Automerge documents consist of a mutable root which is always a map from string +keys to values. A value can be one of the following types: - A number of type double / int64_t / uint64_t -- An explicit true / false / nul -- An immutable utf-8 string (AMbyteSpan) -- An immutable array of arbitrary bytes (AMbyteSpan) -- A mutable map from string keys to values (AMmap) -- A mutable list of values (AMlist) -- A mutable string (AMtext) +- An explicit true / false / null +- An immutable UTF-8 string (`AMbyteSpan`). +- An immutable array of arbitrary bytes (`AMbyteSpan`). +- A mutable map from string keys to values. +- A mutable list of values. +- A mutable UTF-8 string. -If you read from a location in the document with no value a value with -`.tag == AM_VALUE_VOID` will be returned, but you cannot write such a value explicitly. +If you read from a location in the document with no value, an item with type +`AM_VAL_TYPE_VOID` will be returned, but you cannot write such a value +explicitly. -Under the hood, automerge references mutable objects by the internal object id, -and `AM_ROOT` is always the object id of the root value. +Under the hood, automerge references a mutable object by its object identifier +where `AM_ROOT` signifies a document's root map object. -There is a function to put each type of value into either a map or a list, and a -function to read the current value from a list. As (in general) collaborators +There are functions to put each type of value into either a map or a list, and +functions to read the current or a historical value from a map or a list. As (in general) collaborators may edit the document at any time, you cannot guarantee that the type of the -value at a given part of the document will stay the same. As a result reading -from the document will return an `AMvalue` union that you can inspect to -determine its type. +value at a given part of the document will stay the same. As a result, reading +from the document will return an `AMitem` struct that you can inspect to +determine the type of value that it contains. Strings in automerge-c are represented using an `AMbyteSpan` which contains a -pointer and a length. Strings must be valid utf-8 and may contain null bytes. -As a convenience you can use `AMstr()` to get the representation of a -null-terminated C string as an `AMbyteSpan`. +pointer and a length. Strings must be valid UTF-8 and may contain NUL (`0`) +characters. +For your convenience, you can call `AMstr()` to get the `AMbyteSpan` struct +equivalent of a null-terminated byte string or `AMstrdup()` to get the +representation of an `AMbyteSpan` struct as a null-terminated byte string +wherein its NUL characters have been removed/replaced as you choose. Putting all of that together, to read and write from the root of the document you can do this: ``` -#include #include +#include +#include +#include int main(int argc, char** argv) { // ...previous example... - AMdoc *doc = AMresultValue(docResult).doc; + AMdoc *doc; + AMitemToDoc(AMresultItem(docResult), &doc); AMresult *putResult = AMmapPutStr(doc, AM_ROOT, AMstr("key"), AMstr("value")); if (AMresultStatus(putResult) != AM_STATUS_OK) { - printf("failed to put: %s", AMerrorMessage(putResult).src); + char* const err_msg = AMstrdup(AMresultError(putResult), NULL); + printf("failed to put: %s", err_msg); + free(err_msg); goto cleanup; } AMresult *getResult = AMmapGet(doc, AM_ROOT, AMstr("key"), NULL); if (AMresultStatus(getResult) != AM_STATUS_OK) { - printf("failed to get: %s", AMerrorMessage(getResult).src); + char* const err_msg = AMstrdup(AMresultError(putResult), NULL); + printf("failed to get: %s", err_msg); + free(err_msg); goto cleanup; } - AMvalue got = AMresultValue(getResult); - if (got.tag != AM_VALUE_STR) { + AMbyteSpan got; + if (AMitemToStr(AMresultItem(getResult), &got)) { + char* const c_str = AMstrdup(got, NULL); + printf("Got %zu-character string \"%s\"", got.count, c_str); + free(c_str); + } else { printf("expected to read a string!"); goto cleanup; } - printf("Got %zu-character string `%s`", got.str.count, got.str.src); cleanup: - AMfree(getResult); - AMfree(putResult); - AMfree(docResult); + AMresultFree(getResult); + AMresultFree(putResult); + AMresultFree(docResult); } ``` -Functions that do not return an `AMresult` (for example `AMmapItemValue()`) do -not allocate memory, but continue to reference memory that was previously -allocated. It's thus important to keep the original `AMresult` alive (in this -case the one returned by `AMmapRange()`) until after you are done with the return -values of these functions. +Functions that do not return an `AMresult` (for example `AMitemKey()`) do +not allocate memory but rather reference memory that was previously +allocated. It's therefore important to keep the original `AMresult` alive (in +this case the one returned by `AMmapRange()`) until after you are finished with +the items that it contains. However, the memory for an individual `AMitem` can +be shared with a new `AMresult` by calling `AMitemResult()` on it. In other +words, a select group of items can be filtered out of a collection and only each +one's corresponding `AMresult` must be kept alive from that point forward; the +originating collection's `AMresult` can be safely freed. Beyond that, good luck! diff --git a/rust/automerge-c/cbindgen.toml b/rust/automerge-c/cbindgen.toml index ada7f48d..21eaaadd 100644 --- a/rust/automerge-c/cbindgen.toml +++ b/rust/automerge-c/cbindgen.toml @@ -1,7 +1,7 @@ after_includes = """\n /** * \\defgroup enumerations Public Enumerations - Symbolic names for integer constants. + * Symbolic names for integer constants. */ /** @@ -12,21 +12,23 @@ after_includes = """\n #define AM_ROOT NULL /** - * \\memberof AMchangeHash + * \\memberof AMdoc * \\def AM_CHANGE_HASH_SIZE * \\brief The count of bytes in a change hash. */ #define AM_CHANGE_HASH_SIZE 32 """ -autogen_warning = "/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */" +autogen_warning = """ +/** + * \\file + * \\brief All constants, functions and types in the core Automerge C API. + * + * \\warning This file is auto-generated by cbindgen. + */ +""" documentation = true documentation_style = "doxy" -header = """ -/** \\file - * All constants, functions and types in the Automerge library's C API. - */ - """ -include_guard = "AUTOMERGE_H" +include_guard = "AUTOMERGE_C_H" includes = [] language = "C" line_length = 140 diff --git a/rust/automerge-c/cmake/Cargo.toml.in b/rust/automerge-c/cmake/Cargo.toml.in new file mode 100644 index 00000000..781e2fef --- /dev/null +++ b/rust/automerge-c/cmake/Cargo.toml.in @@ -0,0 +1,22 @@ +[package] +name = "@PROJECT_NAME@" +version = "@PROJECT_VERSION@" +authors = ["Orion Henry ", "Jason Kankiewicz "] +edition = "2021" +license = "MIT" +rust-version = "1.57.0" + +[lib] +name = "@BINDINGS_NAME@" +crate-type = ["staticlib"] +bench = false +doc = false + +[dependencies] +@LIBRARY_NAME@ = { path = "../@LIBRARY_NAME@" } +hex = "^0.4.3" +libc = "^0.2" +smol_str = "^0.1.21" + +[build-dependencies] +cbindgen = "^0.24" diff --git a/rust/automerge-c/cmake/cbindgen.toml.in b/rust/automerge-c/cmake/cbindgen.toml.in new file mode 100644 index 00000000..5122b75c --- /dev/null +++ b/rust/automerge-c/cmake/cbindgen.toml.in @@ -0,0 +1,48 @@ +after_includes = """\n +/** + * \\defgroup enumerations Public Enumerations + * Symbolic names for integer constants. + */ + +/** + * \\memberof AMdoc + * \\def AM_ROOT + * \\brief The root object of a document. + */ +#define AM_ROOT NULL + +/** + * \\memberof AMdoc + * \\def AM_CHANGE_HASH_SIZE + * \\brief The count of bytes in a change hash. + */ +#define AM_CHANGE_HASH_SIZE 32 +""" +autogen_warning = """ +/** + * \\file + * \\brief All constants, functions and types in the core Automerge C API. + * + * \\warning This file is auto-generated by cbindgen. + */ +""" +documentation = true +documentation_style = "doxy" +include_guard = "@INCLUDE_GUARD_PREFIX@_H" +includes = [] +language = "C" +line_length = 140 +no_includes = true +style = "both" +sys_includes = ["stdbool.h", "stddef.h", "stdint.h", "time.h"] +usize_is_size_t = true + +[enum] +derive_const_casts = true +enum_class = true +must_use = "MUST_USE_ENUM" +prefix_with_name = true +rename_variants = "ScreamingSnakeCase" + +[export] +item_types = ["constants", "enums", "functions", "opaque", "structs", "typedefs"] diff --git a/rust/automerge-c/cmake/config.h.in b/rust/automerge-c/cmake/config.h.in index 44ba5213..40482cb9 100644 --- a/rust/automerge-c/cmake/config.h.in +++ b/rust/automerge-c/cmake/config.h.in @@ -1,14 +1,35 @@ -#ifndef @SYMBOL_PREFIX@_CONFIG_H -#define @SYMBOL_PREFIX@_CONFIG_H - -/* This header is auto-generated by CMake. */ +#ifndef @INCLUDE_GUARD_PREFIX@_CONFIG_H +#define @INCLUDE_GUARD_PREFIX@_CONFIG_H +/** + * \file + * \brief Configuration pararameters defined by the build system. + * + * \warning This file is auto-generated by CMake. + */ +/** + * \def @SYMBOL_PREFIX@_VERSION + * \brief Denotes a semantic version of the form {MAJOR}{MINOR}{PATCH} as three, + * two-digit decimal numbers without leading zeros (e.g. 100 is 0.1.0). + */ #define @SYMBOL_PREFIX@_VERSION @INTEGER_PROJECT_VERSION@ +/** + * \def @SYMBOL_PREFIX@_MAJOR_VERSION + * \brief Denotes a semantic major version as a decimal number. + */ #define @SYMBOL_PREFIX@_MAJOR_VERSION (@SYMBOL_PREFIX@_VERSION / 100000) +/** + * \def @SYMBOL_PREFIX@_MINOR_VERSION + * \brief Denotes a semantic minor version as a decimal number. + */ #define @SYMBOL_PREFIX@_MINOR_VERSION ((@SYMBOL_PREFIX@_VERSION / 100) % 1000) +/** + * \def @SYMBOL_PREFIX@_PATCH_VERSION + * \brief Denotes a semantic patch version as a decimal number. + */ #define @SYMBOL_PREFIX@_PATCH_VERSION (@SYMBOL_PREFIX@_VERSION % 100) -#endif /* @SYMBOL_PREFIX@_CONFIG_H */ +#endif /* @INCLUDE_GUARD_PREFIX@_CONFIG_H */ diff --git a/rust/automerge-c/cmake/enum-string-functions-gen.cmake b/rust/automerge-c/cmake/enum-string-functions-gen.cmake new file mode 100644 index 00000000..77080e8d --- /dev/null +++ b/rust/automerge-c/cmake/enum-string-functions-gen.cmake @@ -0,0 +1,183 @@ +# This CMake script is used to generate a header and a source file for utility +# functions that convert the tags of generated enum types into strings and +# strings into the tags of generated enum types. +cmake_minimum_required(VERSION 3.23 FATAL_ERROR) + +# Seeks the starting line of the source enum's declaration. +macro(seek_enum_mode) + if (line MATCHES "^(typedef[ \t]+)?enum ") + string(REGEX REPLACE "^enum ([0-9a-zA-Z_]+).*$" "\\1" enum_name "${line}") + set(mode "read_tags") + endif() +endmacro() + +# Scans the input for the current enum's tags. +macro(read_tags_mode) + if(line MATCHES "^}") + set(mode "generate") + elseif(line MATCHES "^[A-Z0-9_]+.*$") + string(REGEX REPLACE "^([A-Za-z0-9_]+).*$" "\\1" tmp "${line}") + list(APPEND enum_tags "${tmp}") + endif() +endmacro() + +macro(write_header_file) + # Generate a to-string function declaration. + list(APPEND header_body + "/**\n" + " * \\ingroup enumerations\n" + " * \\brief Gets the string representation of an `${enum_name}` enum tag.\n" + " *\n" + " * \\param[in] tag An `${enum_name}` enum tag.\n" + " * \\return A null-terminated byte string.\n" + " */\n" + "char const* ${enum_name}ToString(${enum_name} const tag)\;\n" + "\n") + # Generate a from-string function declaration. + list(APPEND header_body + "/**\n" + " * \\ingroup enumerations\n" + " * \\brief Gets an `${enum_name}` enum tag from its string representation.\n" + " *\n" + " * \\param[out] dest An `${enum_name}` enum tag pointer.\n" + " * \\param[in] src A null-terminated byte string.\n" + " * \\return `true` if \\p src matches the string representation of an\n" + " * `${enum_name}` enum tag, `false` otherwise.\n" + " */\n" + "bool ${enum_name}FromString(${enum_name}* dest, char const* const src)\;\n" + "\n") +endmacro() + +macro(write_source_file) + # Generate a to-string function implementation. + list(APPEND source_body + "char const* ${enum_name}ToString(${enum_name} const tag) {\n" + " switch (tag) {\n" + " default:\n" + " return \"???\"\;\n") + foreach(label IN LISTS enum_tags) + list(APPEND source_body + " case ${label}:\n" + " return \"${label}\"\;\n") + endforeach() + list(APPEND source_body + " }\n" + "}\n" + "\n") + # Generate a from-string function implementation. + list(APPEND source_body + "bool ${enum_name}FromString(${enum_name}* dest, char const* const src) {\n") + foreach(label IN LISTS enum_tags) + list(APPEND source_body + " if (!strcmp(src, \"${label}\")) {\n" + " *dest = ${label}\;\n" + " return true\;\n" + " }\n") + endforeach() + list(APPEND source_body + " return false\;\n" + "}\n" + "\n") +endmacro() + +function(main) + set(header_body "") + # File header and includes. + list(APPEND header_body + "#ifndef ${include_guard}\n" + "#define ${include_guard}\n" + "/**\n" + " * \\file\n" + " * \\brief Utility functions for converting enum tags into null-terminated\n" + " * byte strings and vice versa.\n" + " *\n" + " * \\warning This file is auto-generated by CMake.\n" + " */\n" + "\n" + "#include \n" + "\n" + "#include <${library_include}>\n" + "\n") + set(source_body "") + # File includes. + list(APPEND source_body + "/** \\warning This file is auto-generated by CMake. */\n" + "\n" + "#include \"stdio.h\"\n" + "#include \"string.h\"\n" + "\n" + "#include <${header_include}>\n" + "\n") + set(enum_name "") + set(enum_tags "") + set(mode "seek_enum") + file(STRINGS "${input_path}" lines) + foreach(line IN LISTS lines) + string(REGEX REPLACE "^(.+)(//.*)?" "\\1" line "${line}") + string(STRIP "${line}" line) + if(mode STREQUAL "seek_enum") + seek_enum_mode() + elseif(mode STREQUAL "read_tags") + read_tags_mode() + else() + # The end of the enum declaration was reached. + if(NOT enum_name) + # The end of the file was reached. + return() + endif() + if(NOT enum_tags) + message(FATAL_ERROR "No tags found for `${enum_name}`.") + endif() + string(TOLOWER "${enum_name}" output_stem_prefix) + string(CONCAT output_stem "${output_stem_prefix}" "_string") + cmake_path(REPLACE_EXTENSION output_stem "h" OUTPUT_VARIABLE output_header_basename) + write_header_file() + write_source_file() + set(enum_name "") + set(enum_tags "") + set(mode "seek_enum") + endif() + endforeach() + # File footer. + list(APPEND header_body + "#endif /* ${include_guard} */\n") + message(STATUS "Generating header file \"${output_header_path}\"...") + file(WRITE "${output_header_path}" ${header_body}) + message(STATUS "Generating source file \"${output_source_path}\"...") + file(WRITE "${output_source_path}" ${source_body}) +endfunction() + +if(NOT DEFINED PROJECT_NAME) + message(FATAL_ERROR "Variable PROJECT_NAME is not defined.") +elseif(NOT DEFINED LIBRARY_NAME) + message(FATAL_ERROR "Variable LIBRARY_NAME is not defined.") +elseif(NOT DEFINED SUBDIR) + message(FATAL_ERROR "Variable SUBDIR is not defined.") +elseif(${CMAKE_ARGC} LESS 9) + message(FATAL_ERROR "Too few arguments.") +elseif(${CMAKE_ARGC} GREATER 10) + message(FATAL_ERROR "Too many arguments.") +elseif(NOT EXISTS ${CMAKE_ARGV5}) + message(FATAL_ERROR "Input header \"${CMAKE_ARGV7}\" not found.") +endif() +cmake_path(CONVERT "${CMAKE_ARGV7}" TO_CMAKE_PATH_LIST input_path NORMALIZE) +cmake_path(CONVERT "${CMAKE_ARGV8}" TO_CMAKE_PATH_LIST output_header_path NORMALIZE) +cmake_path(CONVERT "${CMAKE_ARGV9}" TO_CMAKE_PATH_LIST output_source_path NORMALIZE) +string(TOLOWER "${PROJECT_NAME}" project_root) +cmake_path(CONVERT "${SUBDIR}" TO_CMAKE_PATH_LIST project_subdir NORMALIZE) +string(TOLOWER "${project_subdir}" project_subdir) +string(TOLOWER "${LIBRARY_NAME}" library_stem) +cmake_path(REPLACE_EXTENSION library_stem "h" OUTPUT_VARIABLE library_basename) +string(JOIN "/" library_include "${project_root}" "${library_basename}") +string(TOUPPER "${PROJECT_NAME}" project_name_upper) +string(TOUPPER "${project_subdir}" include_guard_infix) +string(REGEX REPLACE "/" "_" include_guard_infix "${include_guard_infix}") +string(REGEX REPLACE "-" "_" include_guard_prefix "${project_name_upper}") +string(JOIN "_" include_guard_prefix "${include_guard_prefix}" "${include_guard_infix}") +string(JOIN "/" output_header_prefix "${project_root}" "${project_subdir}") +cmake_path(GET output_header_path STEM output_header_stem) +string(TOUPPER "${output_header_stem}" include_guard_stem) +string(JOIN "_" include_guard "${include_guard_prefix}" "${include_guard_stem}" "H") +cmake_path(GET output_header_path FILENAME output_header_basename) +string(JOIN "/" header_include "${output_header_prefix}" "${output_header_basename}") +main() diff --git a/rust/automerge-c/cmake/file_regex_replace.cmake b/rust/automerge-c/cmake/file-regex-replace.cmake similarity index 87% rename from rust/automerge-c/cmake/file_regex_replace.cmake rename to rust/automerge-c/cmake/file-regex-replace.cmake index 27306458..09005bc2 100644 --- a/rust/automerge-c/cmake/file_regex_replace.cmake +++ b/rust/automerge-c/cmake/file-regex-replace.cmake @@ -1,4 +1,6 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +# This CMake script is used to perform string substitutions within a generated +# file. +cmake_minimum_required(VERSION 3.23 FATAL_ERROR) if(NOT DEFINED MATCH_REGEX) message(FATAL_ERROR "Variable \"MATCH_REGEX\" is not defined.") diff --git a/rust/automerge-c/cmake/file_touch.cmake b/rust/automerge-c/cmake/file-touch.cmake similarity index 82% rename from rust/automerge-c/cmake/file_touch.cmake rename to rust/automerge-c/cmake/file-touch.cmake index 087d59b6..2c196755 100644 --- a/rust/automerge-c/cmake/file_touch.cmake +++ b/rust/automerge-c/cmake/file-touch.cmake @@ -1,4 +1,6 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +# This CMake script is used to force Cargo to regenerate the header file for the +# core bindings after the out-of-source build directory has been cleaned. +cmake_minimum_required(VERSION 3.23 FATAL_ERROR) if(NOT DEFINED CONDITION) message(FATAL_ERROR "Variable \"CONDITION\" is not defined.") diff --git a/rust/automerge-c/docs/CMakeLists.txt b/rust/automerge-c/docs/CMakeLists.txt new file mode 100644 index 00000000..1d94c872 --- /dev/null +++ b/rust/automerge-c/docs/CMakeLists.txt @@ -0,0 +1,35 @@ +find_package(Doxygen OPTIONAL_COMPONENTS dot) + +if(DOXYGEN_FOUND) + set(DOXYGEN_ALIASES "installed_headerfile=\\headerfile ${LIBRARY_NAME}.h <${PROJECT_NAME}/${LIBRARY_NAME}.h>") + + set(DOXYGEN_GENERATE_LATEX YES) + + set(DOXYGEN_PDF_HYPERLINKS YES) + + set(DOXYGEN_PROJECT_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/img/brandmark.png") + + set(DOXYGEN_SORT_BRIEF_DOCS YES) + + set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "${CMAKE_SOURCE_DIR}/README.md") + + doxygen_add_docs( + ${LIBRARY_NAME}_docs + "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" + "${CBINDGEN_TARGET_DIR}/config.h" + "${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h" + "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/result.h" + "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack_callback_data.h" + "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack.h" + "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/string.h" + "${CMAKE_SOURCE_DIR}/README.md" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + COMMENT "Producing documentation with Doxygen..." + ) + + # \note A Doxygen input file isn't a file-level dependency so the Doxygen + # command must instead depend upon a target that either outputs the + # file or depends upon it also or it will just output an error message + # when it can't be found. + add_dependencies(${LIBRARY_NAME}_docs ${BINDINGS_NAME}_artifacts ${LIBRARY_NAME}_utilities) +endif() diff --git a/rust/automerge-c/img/brandmark.png b/rust/automerge-c/docs/img/brandmark.png similarity index 100% rename from rust/automerge-c/img/brandmark.png rename to rust/automerge-c/docs/img/brandmark.png diff --git a/rust/automerge-c/examples/CMakeLists.txt b/rust/automerge-c/examples/CMakeLists.txt index 3395124c..f080237b 100644 --- a/rust/automerge-c/examples/CMakeLists.txt +++ b/rust/automerge-c/examples/CMakeLists.txt @@ -1,41 +1,39 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) - add_executable( - example_quickstart + ${LIBRARY_NAME}_quickstart quickstart.c ) -set_target_properties(example_quickstart PROPERTIES LINKER_LANGUAGE C) +set_target_properties(${LIBRARY_NAME}_quickstart PROPERTIES LINKER_LANGUAGE C) # \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't # contain a non-existent path so its build-time include directory # must be specified for all of its dependent targets instead. target_include_directories( - example_quickstart + ${LIBRARY_NAME}_quickstart PRIVATE "$" ) -target_link_libraries(example_quickstart PRIVATE ${LIBRARY_NAME}) +target_link_libraries(${LIBRARY_NAME}_quickstart PRIVATE ${LIBRARY_NAME}) -add_dependencies(example_quickstart ${LIBRARY_NAME}_artifacts) +add_dependencies(${LIBRARY_NAME}_quickstart ${BINDINGS_NAME}_artifacts) if(BUILD_SHARED_LIBS AND WIN32) add_custom_command( - TARGET example_quickstart + TARGET ${LIBRARY_NAME}_quickstart POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_BINARY_DIR} COMMENT "Copying the DLL built by Cargo into the examples directory..." VERBATIM ) endif() add_custom_command( - TARGET example_quickstart + TARGET ${LIBRARY_NAME}_quickstart POST_BUILD COMMAND - example_quickstart + ${LIBRARY_NAME}_quickstart COMMENT "Running the example quickstart..." VERBATIM diff --git a/rust/automerge-c/examples/README.md b/rust/automerge-c/examples/README.md index 17aa2227..17e69412 100644 --- a/rust/automerge-c/examples/README.md +++ b/rust/automerge-c/examples/README.md @@ -5,5 +5,5 @@ ```shell cmake -E make_directory automerge-c/build cmake -S automerge-c -B automerge-c/build -cmake --build automerge-c/build --target example_quickstart +cmake --build automerge-c/build --target automerge_quickstart ``` diff --git a/rust/automerge-c/examples/quickstart.c b/rust/automerge-c/examples/quickstart.c index bc418511..ab6769ef 100644 --- a/rust/automerge-c/examples/quickstart.c +++ b/rust/automerge-c/examples/quickstart.c @@ -3,152 +3,127 @@ #include #include +#include +#include +#include +#include -static void abort_cb(AMresultStack**, uint8_t); +static bool abort_cb(AMstack**, void*); /** * \brief Based on https://automerge.github.io/docs/quickstart */ int main(int argc, char** argv) { - AMresultStack* stack = NULL; - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, abort_cb).doc; - AMobjId const* const cards = AMpush(&stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("cards"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - abort_cb).obj_id; - AMobjId const* const card1 = AMpush(&stack, - AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - abort_cb).obj_id; - AMfree(AMmapPutStr(doc1, card1, AMstr("title"), AMstr("Rewrite everything in Clojure"))); - AMfree(AMmapPutBool(doc1, card1, AMstr("done"), false)); - AMobjId const* const card2 = AMpush(&stack, - AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - abort_cb).obj_id; - AMfree(AMmapPutStr(doc1, card2, AMstr("title"), AMstr("Rewrite everything in Haskell"))); - AMfree(AMmapPutBool(doc1, card2, AMstr("done"), false)); - AMfree(AMcommit(doc1, AMstr("Add card"), NULL)); + AMstack* stack = NULL; + AMdoc* doc1; + AMitemToDoc(AMstackItem(&stack, AMcreate(NULL), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1); + AMobjId const* const cards = + AMitemObjId(AMstackItem(&stack, AMmapPutObject(doc1, AM_ROOT, AMstr("cards"), AM_OBJ_TYPE_LIST), abort_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMobjId const* const card1 = + AMitemObjId(AMstackItem(&stack, AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), abort_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMmapPutStr(doc1, card1, AMstr("title"), AMstr("Rewrite everything in Clojure")), abort_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutBool(doc1, card1, AMstr("done"), false), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMobjId const* const card2 = + AMitemObjId(AMstackItem(&stack, AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), abort_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMmapPutStr(doc1, card2, AMstr("title"), AMstr("Rewrite everything in Haskell")), abort_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutBool(doc1, card2, AMstr("done"), false), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr("Add card"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMdoc* doc2 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, abort_cb).doc; - AMfree(AMmerge(doc2, doc1)); + AMdoc* doc2; + AMitemToDoc(AMstackItem(&stack, AMcreate(NULL), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2); + AMstackItem(NULL, AMmerge(doc2, doc1), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMbyteSpan const binary = AMpush(&stack, AMsave(doc1), AM_VALUE_BYTES, abort_cb).bytes; - doc2 = AMpush(&stack, AMload(binary.src, binary.count), AM_VALUE_DOC, abort_cb).doc; + AMbyteSpan binary; + AMitemToBytes(AMstackItem(&stack, AMsave(doc1), abort_cb, AMexpect(AM_VAL_TYPE_BYTES)), &binary); + AMitemToDoc(AMstackItem(&stack, AMload(binary.src, binary.count), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2); - AMfree(AMmapPutBool(doc1, card1, AMstr("done"), true)); - AMfree(AMcommit(doc1, AMstr("Mark card as done"), NULL)); + AMstackItem(NULL, AMmapPutBool(doc1, card1, AMstr("done"), true), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr("Mark card as done"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMlistDelete(doc2, cards, 0)); - AMfree(AMcommit(doc2, AMstr("Delete card"), NULL)); + AMstackItem(NULL, AMlistDelete(doc2, cards, 0), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc2, AMstr("Delete card"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMmerge(doc1, doc2)); + AMstackItem(NULL, AMmerge(doc1, doc2), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMchanges changes = AMpush(&stack, AMgetChanges(doc1, NULL), AM_VALUE_CHANGES, abort_cb).changes; - AMchange const* change = NULL; - while ((change = AMchangesNext(&changes, 1)) != NULL) { - AMbyteSpan const change_hash = AMchangeHash(change); - AMchangeHashes const heads = AMpush(&stack, - AMchangeHashesInit(&change_hash, 1), - AM_VALUE_CHANGE_HASHES, - abort_cb).change_hashes; - AMbyteSpan const msg = AMchangeMessage(change); - char* const c_msg = calloc(1, msg.count + 1); - strncpy(c_msg, msg.src, msg.count); - printf("%s %ld\n", c_msg, AMobjSize(doc1, cards, &heads)); + AMitems changes = AMstackItems(&stack, AMgetChanges(doc1, NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMitem* item = NULL; + while ((item = AMitemsNext(&changes, 1)) != NULL) { + AMchange const* change; + AMitemToChange(item, &change); + AMitems const heads = AMstackItems(&stack, AMitemFromChangeHash(AMchangeHash(change)), abort_cb, + AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + char* const c_msg = AMstrdup(AMchangeMessage(change), NULL); + printf("%s %zu\n", c_msg, AMobjSize(doc1, cards, &heads)); free(c_msg); } - AMfreeStack(&stack); + AMstackFree(&stack); } -static char const* discriminant_suffix(AMvalueVariant const); - /** - * \brief Prints an error message to `stderr`, deallocates all results in the - * given stack and exits. + * \brief Examines the result at the top of the given stack and, if it's + * invalid, prints an error message to `stderr`, deallocates all results + * in the stack and exits. * - * \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. - * \param[in] discriminant An `AMvalueVariant` enum tag. - * \pre \p stack` != NULL`. - * \post `*stack == NULL`. + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] data A pointer to an owned `AMstackCallbackData` struct or `NULL`. + * \return `true` if the top `AMresult` in \p stack is valid, `false` otherwise. + * \pre \p stack `!= NULL`. */ -static void abort_cb(AMresultStack** stack, uint8_t discriminant) { +static bool abort_cb(AMstack** stack, void* data) { static char buffer[512] = {0}; char const* suffix = NULL; if (!stack) { suffix = "Stack*"; - } - else if (!*stack) { + } else if (!*stack) { suffix = "Stack"; - } - else if (!(*stack)->result) { + } else if (!(*stack)->result) { suffix = ""; } if (suffix) { - fprintf(stderr, "Null `AMresult%s*`.", suffix); - AMfreeStack(stack); + fprintf(stderr, "Null `AMresult%s*`.\n", suffix); + AMstackFree(stack); exit(EXIT_FAILURE); - return; + return false; } AMstatus const status = AMresultStatus((*stack)->result); switch (status) { - case AM_STATUS_ERROR: strcpy(buffer, "Error"); break; - case AM_STATUS_INVALID_RESULT: strcpy(buffer, "Invalid result"); break; - case AM_STATUS_OK: break; - default: sprintf(buffer, "Unknown `AMstatus` tag %d", status); + case AM_STATUS_ERROR: + strcpy(buffer, "Error"); + break; + case AM_STATUS_INVALID_RESULT: + strcpy(buffer, "Invalid result"); + break; + case AM_STATUS_OK: + break; + default: + sprintf(buffer, "Unknown `AMstatus` tag %d", status); } if (buffer[0]) { - AMbyteSpan const msg = AMerrorMessage((*stack)->result); - char* const c_msg = calloc(1, msg.count + 1); - strncpy(c_msg, msg.src, msg.count); - fprintf(stderr, "%s; %s.", buffer, c_msg); + char* const c_msg = AMstrdup(AMresultError((*stack)->result), NULL); + fprintf(stderr, "%s; %s.\n", buffer, c_msg); free(c_msg); - AMfreeStack(stack); + AMstackFree(stack); exit(EXIT_FAILURE); - return; + return false; } - AMvalue const value = AMresultValue((*stack)->result); - fprintf(stderr, "Unexpected tag `AM_VALUE_%s` (%d); expected `AM_VALUE_%s`.", - discriminant_suffix(value.tag), - value.tag, - discriminant_suffix(discriminant)); - AMfreeStack(stack); - exit(EXIT_FAILURE); -} - -/** - * \brief Gets the suffix for a discriminant's corresponding string - * representation. - * - * \param[in] discriminant An `AMvalueVariant` enum tag. - * \return A UTF-8 string. - */ -static char const* discriminant_suffix(AMvalueVariant const discriminant) { - char const* suffix = NULL; - switch (discriminant) { - case AM_VALUE_ACTOR_ID: suffix = "ACTOR_ID"; break; - case AM_VALUE_BOOLEAN: suffix = "BOOLEAN"; break; - case AM_VALUE_BYTES: suffix = "BYTES"; break; - case AM_VALUE_CHANGE_HASHES: suffix = "CHANGE_HASHES"; break; - case AM_VALUE_CHANGES: suffix = "CHANGES"; break; - case AM_VALUE_COUNTER: suffix = "COUNTER"; break; - case AM_VALUE_DOC: suffix = "DOC"; break; - case AM_VALUE_F64: suffix = "F64"; break; - case AM_VALUE_INT: suffix = "INT"; break; - case AM_VALUE_LIST_ITEMS: suffix = "LIST_ITEMS"; break; - case AM_VALUE_MAP_ITEMS: suffix = "MAP_ITEMS"; break; - case AM_VALUE_NULL: suffix = "NULL"; break; - case AM_VALUE_OBJ_ID: suffix = "OBJ_ID"; break; - case AM_VALUE_OBJ_ITEMS: suffix = "OBJ_ITEMS"; break; - case AM_VALUE_STR: suffix = "STR"; break; - case AM_VALUE_STRS: suffix = "STRINGS"; break; - case AM_VALUE_SYNC_MESSAGE: suffix = "SYNC_MESSAGE"; break; - case AM_VALUE_SYNC_STATE: suffix = "SYNC_STATE"; break; - case AM_VALUE_TIMESTAMP: suffix = "TIMESTAMP"; break; - case AM_VALUE_UINT: suffix = "UINT"; break; - case AM_VALUE_VOID: suffix = "VOID"; break; - default: suffix = "..."; + if (data) { + AMstackCallbackData* sc_data = (AMstackCallbackData*)data; + AMvalType const tag = AMitemValType(AMresultItem((*stack)->result)); + if (tag != sc_data->bitmask) { + fprintf(stderr, "Unexpected tag `%s` (%d) instead of `%s` at %s:%d.\n", AMvalTypeToString(tag), tag, + AMvalTypeToString(sc_data->bitmask), sc_data->file, sc_data->line); + free(sc_data); + AMstackFree(stack); + exit(EXIT_FAILURE); + return false; + } } - return suffix; + free(data); + return true; } diff --git a/rust/automerge-c/include/automerge-c/utils/result.h b/rust/automerge-c/include/automerge-c/utils/result.h new file mode 100644 index 00000000..ab8a2f93 --- /dev/null +++ b/rust/automerge-c/include/automerge-c/utils/result.h @@ -0,0 +1,30 @@ +#ifndef AUTOMERGE_C_UTILS_RESULT_H +#define AUTOMERGE_C_UTILS_RESULT_H +/** + * \file + * \brief Utility functions for use with `AMresult` structs. + */ + +#include + +#include + +/** + * \brief Transfers the items within an arbitrary list of results into a + * new result in their order of specification. + * \param[in] count The count of subsequent arguments. + * \param[in] ... A \p count list of arguments, each of which is a pointer to + * an `AMresult` struct whose items will be transferred out of it + * and which is subsequently freed. + * \return A pointer to an `AMresult` struct or `NULL`. + * \pre `∀𝑥 ∈` \p ... `, AMresultStatus(𝑥) == AM_STATUS_OK` + * \post `(∃𝑥 ∈` \p ... `, AMresultStatus(𝑥) != AM_STATUS_OK) -> NULL` + * \attention All `AMresult` struct pointer arguments are passed to + * `AMresultFree()` regardless of success; use `AMresultCat()` + * instead if you wish to pass them to `AMresultFree()` yourself. + * \warning The returned `AMresult` struct pointer must be passed to + * `AMresultFree()` in order to avoid a memory leak. + */ +AMresult* AMresultFrom(int count, ...); + +#endif /* AUTOMERGE_C_UTILS_RESULT_H */ diff --git a/rust/automerge-c/include/automerge-c/utils/stack.h b/rust/automerge-c/include/automerge-c/utils/stack.h new file mode 100644 index 00000000..a8e9fd08 --- /dev/null +++ b/rust/automerge-c/include/automerge-c/utils/stack.h @@ -0,0 +1,130 @@ +#ifndef AUTOMERGE_C_UTILS_STACK_H +#define AUTOMERGE_C_UTILS_STACK_H +/** + * \file + * \brief Utility data structures and functions for hiding `AMresult` structs, + * managing their lifetimes, and automatically applying custom + * validation logic to the `AMitem` structs that they contain. + * + * \note The `AMstack` struct and its related functions drastically reduce the + * need for boilerplate code and/or `goto` statement usage within a C + * application but a higher-level programming language offers even better + * ways to do the same things. + */ + +#include + +/** + * \struct AMstack + * \brief A node in a singly-linked list of result pointers. + */ +typedef struct AMstack { + /** A result to be deallocated. */ + AMresult* result; + /** The previous node in the singly-linked list or `NULL`. */ + struct AMstack* prev; +} AMstack; + +/** + * \memberof AMstack + * \brief The prototype of a function that examines the result at the top of + * the given stack in terms of some arbitrary data. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] data A pointer to arbitrary data or `NULL`. + * \return `true` if the top `AMresult` struct in \p stack is valid, `false` + * otherwise. + * \pre \p stack `!= NULL`. + */ +typedef bool (*AMstackCallback)(AMstack** stack, void* data); + +/** + * \memberof AMstack + * \brief Deallocates the storage for a stack of results. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \pre \p stack `!= NULL` + * \post `*stack == NULL` + */ +void AMstackFree(AMstack** stack); + +/** + * \memberof AMstack + * \brief Gets a result from the stack after removing it. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] result A pointer to the `AMresult` to be popped or `NULL` to + * select the top result in \p stack. + * \return A pointer to an `AMresult` struct or `NULL`. + * \pre \p stack `!= NULL` + * \warning The returned `AMresult` struct pointer must be passed to + * `AMresultFree()` in order to avoid a memory leak. + */ +AMresult* AMstackPop(AMstack** stack, AMresult const* result); + +/** + * \memberof AMstack + * \brief Pushes the given result onto the given stack, calls the given + * callback with the given data to validate it and then either gets the + * result if it's valid or gets `NULL` instead. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] result A pointer to an `AMresult` struct. + * \param[in] callback A pointer to a function with the same signature as + * `AMstackCallback()` or `NULL`. + * \param[in] data A pointer to arbitrary data or `NULL` which is passed to + * \p callback. + * \return \p result or `NULL`. + * \warning If \p stack `== NULL` then \p result is deallocated in order to + * avoid a memory leak. + */ +AMresult* AMstackResult(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); + +/** + * \memberof AMstack + * \brief Pushes the given result onto the given stack, calls the given + * callback with the given data to validate it and then either gets the + * first item in the sequence of items within that result if it's valid + * or gets `NULL` instead. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] result A pointer to an `AMresult` struct. + * \param[in] callback A pointer to a function with the same signature as + * `AMstackCallback()` or `NULL`. + * \param[in] data A pointer to arbitrary data or `NULL` which is passed to + * \p callback. + * \return A pointer to an `AMitem` struct or `NULL`. + * \warning If \p stack `== NULL` then \p result is deallocated in order to + * avoid a memory leak. + */ +AMitem* AMstackItem(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); + +/** + * \memberof AMstack + * \brief Pushes the given result onto the given stack, calls the given + * callback with the given data to validate it and then either gets an + * `AMitems` struct over the sequence of items within that result if it's + * valid or gets an empty `AMitems` instead. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] result A pointer to an `AMresult` struct. + * \param[in] callback A pointer to a function with the same signature as + * `AMstackCallback()` or `NULL`. + * \param[in] data A pointer to arbitrary data or `NULL` which is passed to + * \p callback. + * \return An `AMitems` struct. + * \warning If \p stack `== NULL` then \p result is deallocated immediately + * in order to avoid a memory leak. + */ +AMitems AMstackItems(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); + +/** + * \memberof AMstack + * \brief Gets the count of results that have been pushed onto the stack. + * + * \param[in,out] stack A pointer to an `AMstack` struct. + * \return A 64-bit unsigned integer. + */ +size_t AMstackSize(AMstack const* const stack); + +#endif /* AUTOMERGE_C_UTILS_STACK_H */ diff --git a/rust/automerge-c/include/automerge-c/utils/stack_callback_data.h b/rust/automerge-c/include/automerge-c/utils/stack_callback_data.h new file mode 100644 index 00000000..6f9f1edb --- /dev/null +++ b/rust/automerge-c/include/automerge-c/utils/stack_callback_data.h @@ -0,0 +1,53 @@ +#ifndef AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H +#define AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H +/** + * \file + * \brief Utility data structures, functions and macros for supplying + * parameters to the custom validation logic applied to `AMitem` + * structs. + */ + +#include + +/** + * \struct AMstackCallbackData + * \brief A data structure for passing the parameters of an item value test + * to an implementation of the `AMstackCallback` function prototype. + */ +typedef struct { + /** A bitmask of `AMvalType` tags. */ + AMvalType bitmask; + /** A null-terminated file path string. */ + char const* file; + /** The ordinal number of a line within a file. */ + int line; +} AMstackCallbackData; + +/** + * \memberof AMstackCallbackData + * \brief Allocates a new `AMstackCallbackData` struct and initializes its + * members from their corresponding arguments. + * + * \param[in] bitmask A bitmask of `AMvalType` tags. + * \param[in] file A null-terminated file path string. + * \param[in] line The ordinal number of a line within a file. + * \return A pointer to a disowned `AMstackCallbackData` struct. + * \warning The returned pointer must be passed to `free()` to avoid a memory + * leak. + */ +AMstackCallbackData* AMstackCallbackDataInit(AMvalType const bitmask, char const* const file, int const line); + +/** + * \memberof AMstackCallbackData + * \def AMexpect + * \brief Allocates a new `AMstackCallbackData` struct and initializes it from + * an `AMvalueType` bitmask. + * + * \param[in] bitmask A bitmask of `AMvalType` tags. + * \return A pointer to a disowned `AMstackCallbackData` struct. + * \warning The returned pointer must be passed to `free()` to avoid a memory + * leak. + */ +#define AMexpect(bitmask) AMstackCallbackDataInit(bitmask, __FILE__, __LINE__) + +#endif /* AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H */ diff --git a/rust/automerge-c/include/automerge-c/utils/string.h b/rust/automerge-c/include/automerge-c/utils/string.h new file mode 100644 index 00000000..4d61c2e9 --- /dev/null +++ b/rust/automerge-c/include/automerge-c/utils/string.h @@ -0,0 +1,29 @@ +#ifndef AUTOMERGE_C_UTILS_STRING_H +#define AUTOMERGE_C_UTILS_STRING_H +/** + * \file + * \brief Utility functions for use with `AMbyteSpan` structs that provide + * UTF-8 string views. + */ + +#include + +/** + * \memberof AMbyteSpan + * \brief Returns a pointer to a null-terminated byte string which is a + * duplicate of the given UTF-8 string view except for the substitution + * of its NUL (0) characters with the specified null-terminated byte + * string. + * + * \param[in] str A UTF-8 string view as an `AMbyteSpan` struct. + * \param[in] nul A null-terminated byte string to substitute for NUL characters + * or `NULL` to substitute `"\\0"` for NUL characters. + * \return A disowned null-terminated byte string. + * \pre \p str.src `!= NULL` + * \pre \p str.count `<= sizeof(`\p str.src `)` + * \warning The returned pointer must be passed to `free()` to avoid a memory + * leak. + */ +char* AMstrdup(AMbyteSpan const str, char const* nul); + +#endif /* AUTOMERGE_C_UTILS_STRING_H */ diff --git a/rust/automerge-c/src/CMakeLists.txt b/rust/automerge-c/src/CMakeLists.txt deleted file mode 100644 index e02c0a96..00000000 --- a/rust/automerge-c/src/CMakeLists.txt +++ /dev/null @@ -1,250 +0,0 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) - -find_program ( - CARGO_CMD - "cargo" - PATHS "$ENV{CARGO_HOME}/bin" - DOC "The Cargo command" -) - -if(NOT CARGO_CMD) - message(FATAL_ERROR "Cargo (Rust package manager) not found! Install it and/or set the CARGO_HOME environment variable.") -endif() - -string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) - -if(BUILD_TYPE_LOWER STREQUAL debug) - set(CARGO_BUILD_TYPE "debug") - - set(CARGO_FLAG "") -else() - set(CARGO_BUILD_TYPE "release") - - set(CARGO_FLAG "--release") -endif() - -set(CARGO_FEATURES "") - -set(CARGO_CURRENT_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_BUILD_TYPE}") - -set( - CARGO_OUTPUT - ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h - ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX} -) - -if(WIN32) - # \note The basename of an import library output by Cargo is the filename - # of its corresponding shared library. - list(APPEND CARGO_OUTPUT ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}) -endif() - -add_custom_command( - OUTPUT - ${CARGO_OUTPUT} - COMMAND - # \note cbindgen won't regenerate its output header file after it's - # been removed but it will after its configuration file has been - # updated. - ${CMAKE_COMMAND} -DCONDITION=NOT_EXISTS -P ${CMAKE_SOURCE_DIR}/cmake/file_touch.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CMAKE_SOURCE_DIR}/cbindgen.toml - COMMAND - ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} - MAIN_DEPENDENCY - lib.rs - DEPENDS - actor_id.rs - byte_span.rs - change_hashes.rs - change.rs - changes.rs - doc.rs - doc/list.rs - doc/list/item.rs - doc/list/items.rs - doc/map.rs - doc/map/item.rs - doc/map/items.rs - doc/utils.rs - obj.rs - obj/item.rs - obj/items.rs - result.rs - result_stack.rs - strs.rs - sync.rs - sync/have.rs - sync/haves.rs - sync/message.rs - sync/state.rs - ${CMAKE_SOURCE_DIR}/build.rs - ${CMAKE_SOURCE_DIR}/Cargo.toml - ${CMAKE_SOURCE_DIR}/cbindgen.toml - WORKING_DIRECTORY - ${CMAKE_SOURCE_DIR} - COMMENT - "Producing the library artifacts with Cargo..." - VERBATIM -) - -add_custom_target( - ${LIBRARY_NAME}_artifacts ALL - DEPENDS ${CARGO_OUTPUT} -) - -# \note cbindgen's naming behavior isn't fully configurable and it ignores -# `const fn` calls (https://github.com/eqrion/cbindgen/issues/252). -add_custom_command( - TARGET ${LIBRARY_NAME}_artifacts - POST_BUILD - COMMAND - # Compensate for cbindgen's variant struct naming. - ${CMAKE_COMMAND} -DMATCH_REGEX=AM\([^_]+_[^_]+\)_Body -DREPLACE_EXPR=AM\\1 -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h - COMMAND - # Compensate for cbindgen's union tag enum type naming. - ${CMAKE_COMMAND} -DMATCH_REGEX=AM\([^_]+\)_Tag -DREPLACE_EXPR=AM\\1Variant -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h - COMMAND - # Compensate for cbindgen's translation of consecutive uppercase letters to "ScreamingSnakeCase". - ${CMAKE_COMMAND} -DMATCH_REGEX=A_M\([^_]+\)_ -DREPLACE_EXPR=AM_\\1_ -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h - COMMAND - # Compensate for cbindgen ignoring `std:mem::size_of()` calls. - ${CMAKE_COMMAND} -DMATCH_REGEX=USIZE_ -DREPLACE_EXPR=\+${CMAKE_SIZEOF_VOID_P} -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h - WORKING_DIRECTORY - ${CMAKE_SOURCE_DIR} - COMMENT - "Compensating for cbindgen deficits..." - VERBATIM -) - -if(BUILD_SHARED_LIBS) - if(WIN32) - set(LIBRARY_DESTINATION "${CMAKE_INSTALL_BINDIR}") - else() - set(LIBRARY_DESTINATION "${CMAKE_INSTALL_LIBDIR}") - endif() - - set(LIBRARY_DEFINE_SYMBOL "${SYMBOL_PREFIX}_EXPORTS") - - # \note The basename of an import library output by Cargo is the filename - # of its corresponding shared library. - set(LIBRARY_IMPLIB "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}") - - set(LIBRARY_LOCATION "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}") - - set(LIBRARY_NO_SONAME "${WIN32}") - - set(LIBRARY_SONAME "${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") - - set(LIBRARY_TYPE "SHARED") -else() - set(LIBRARY_DEFINE_SYMBOL "") - - set(LIBRARY_DESTINATION "${CMAKE_INSTALL_LIBDIR}") - - set(LIBRARY_IMPLIB "") - - set(LIBRARY_LOCATION "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}") - - set(LIBRARY_NO_SONAME "TRUE") - - set(LIBRARY_SONAME "") - - set(LIBRARY_TYPE "STATIC") -endif() - -add_library(${LIBRARY_NAME} ${LIBRARY_TYPE} IMPORTED GLOBAL) - -set_target_properties( - ${LIBRARY_NAME} - PROPERTIES - # \note Cargo writes a debug build into a nested directory instead of - # decorating its name. - DEBUG_POSTFIX "" - DEFINE_SYMBOL "${LIBRARY_DEFINE_SYMBOL}" - IMPORTED_IMPLIB "${LIBRARY_IMPLIB}" - IMPORTED_LOCATION "${LIBRARY_LOCATION}" - IMPORTED_NO_SONAME "${LIBRARY_NO_SONAME}" - IMPORTED_SONAME "${LIBRARY_SONAME}" - LINKER_LANGUAGE C - PUBLIC_HEADER "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" - SOVERSION "${PROJECT_VERSION_MAJOR}" - VERSION "${PROJECT_VERSION}" - # \note Cargo exports all of the symbols automatically. - WINDOWS_EXPORT_ALL_SYMBOLS "TRUE" -) - -target_compile_definitions(${LIBRARY_NAME} INTERFACE $) - -target_include_directories( - ${LIBRARY_NAME} - INTERFACE - "$" -) - -set(CMAKE_THREAD_PREFER_PTHREAD TRUE) - -set(THREADS_PREFER_PTHREAD_FLAG TRUE) - -find_package(Threads REQUIRED) - -set(LIBRARY_DEPENDENCIES Threads::Threads ${CMAKE_DL_LIBS}) - -if(WIN32) - list(APPEND LIBRARY_DEPENDENCIES Bcrypt userenv ws2_32) -else() - list(APPEND LIBRARY_DEPENDENCIES m) -endif() - -target_link_libraries(${LIBRARY_NAME} INTERFACE ${LIBRARY_DEPENDENCIES}) - -install( - FILES $ - TYPE LIB - # \note The basename of an import library output by Cargo is the filename - # of its corresponding shared library. - RENAME "${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}" - OPTIONAL -) - -set(LIBRARY_FILE_NAME "${CMAKE_${LIBRARY_TYPE}_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_${LIBRARY_TYPE}_LIBRARY_SUFFIX}") - -install( - FILES $ - RENAME "${LIBRARY_FILE_NAME}" - DESTINATION ${LIBRARY_DESTINATION} -) - -install( - FILES $ - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME} -) - -find_package(Doxygen OPTIONAL_COMPONENTS dot) - -if(DOXYGEN_FOUND) - set(DOXYGEN_ALIASES "installed_headerfile=\\headerfile ${LIBRARY_NAME}.h <${PROJECT_NAME}/${LIBRARY_NAME}.h>") - - set(DOXYGEN_GENERATE_LATEX YES) - - set(DOXYGEN_PDF_HYPERLINKS YES) - - set(DOXYGEN_PROJECT_LOGO "${CMAKE_SOURCE_DIR}/img/brandmark.png") - - set(DOXYGEN_SORT_BRIEF_DOCS YES) - - set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "${CMAKE_SOURCE_DIR}/README.md") - - doxygen_add_docs( - ${LIBRARY_NAME}_docs - "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" - "${CMAKE_SOURCE_DIR}/README.md" - USE_STAMP_FILE - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - COMMENT "Producing documentation with Doxygen..." - ) - - # \note A Doxygen input file isn't a file-level dependency so the Doxygen - # command must instead depend upon a target that outputs the file or - # it will just output an error message when it can't be found. - add_dependencies(${LIBRARY_NAME}_docs ${LIBRARY_NAME}_artifacts) -endif() diff --git a/rust/automerge-c/src/actor_id.rs b/rust/automerge-c/src/actor_id.rs index bc86d5ef..5a28959e 100644 --- a/rust/automerge-c/src/actor_id.rs +++ b/rust/automerge-c/src/actor_id.rs @@ -1,4 +1,5 @@ use automerge as am; +use libc::c_int; use std::cell::RefCell; use std::cmp::Ordering; use std::str::FromStr; @@ -11,7 +12,7 @@ macro_rules! to_actor_id { let handle = $handle.as_ref(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMactorId pointer").into(), + None => return AMresult::error("Invalid `AMactorId*`").into(), } }}; } @@ -57,11 +58,11 @@ impl AsRef for AMactorId { } /// \memberof AMactorId -/// \brief Gets the value of an actor identifier as a sequence of bytes. +/// \brief Gets the value of an actor identifier as an array of bytes. /// /// \param[in] actor_id A pointer to an `AMactorId` struct. -/// \pre \p actor_id `!= NULL`. -/// \return An `AMbyteSpan` struct. +/// \return An `AMbyteSpan` struct for an array of bytes. +/// \pre \p actor_id `!= NULL` /// \internal /// /// # Safety @@ -82,8 +83,8 @@ pub unsafe extern "C" fn AMactorIdBytes(actor_id: *const AMactorId) -> AMbyteSpa /// \return `-1` if \p actor_id1 `<` \p actor_id2, `0` if /// \p actor_id1 `==` \p actor_id2 and `1` if /// \p actor_id1 `>` \p actor_id2. -/// \pre \p actor_id1 `!= NULL`. -/// \pre \p actor_id2 `!= NULL`. +/// \pre \p actor_id1 `!= NULL` +/// \pre \p actor_id2 `!= NULL` /// \internal /// /// #Safety @@ -93,7 +94,7 @@ pub unsafe extern "C" fn AMactorIdBytes(actor_id: *const AMactorId) -> AMbyteSpa pub unsafe extern "C" fn AMactorIdCmp( actor_id1: *const AMactorId, actor_id2: *const AMactorId, -) -> isize { +) -> c_int { match (actor_id1.as_ref(), actor_id2.as_ref()) { (Some(actor_id1), Some(actor_id2)) => match actor_id1.as_ref().cmp(actor_id2.as_ref()) { Ordering::Less => -1, @@ -101,65 +102,69 @@ pub unsafe extern "C" fn AMactorIdCmp( Ordering::Greater => 1, }, (None, Some(_)) => -1, - (Some(_), None) => 1, (None, None) => 0, + (Some(_), None) => 1, } } /// \memberof AMactorId -/// \brief Allocates a new actor identifier and initializes it with a random -/// UUID. +/// \brief Allocates a new actor identifier and initializes it from a random +/// UUID value. /// -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMactorId` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. #[no_mangle] pub unsafe extern "C" fn AMactorIdInit() -> *mut AMresult { to_result(Ok::(am::ActorId::random())) } /// \memberof AMactorId -/// \brief Allocates a new actor identifier and initializes it from a sequence -/// of bytes. +/// \brief Allocates a new actor identifier and initializes it from an array of +/// bytes value. /// -/// \param[in] src A pointer to a contiguous sequence of bytes. -/// \param[in] count The number of bytes to copy from \p src. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMactorId` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] src A pointer to an array of bytes. +/// \param[in] count The count of bytes to copy from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] -pub unsafe extern "C" fn AMactorIdInitBytes(src: *const u8, count: usize) -> *mut AMresult { - let slice = std::slice::from_raw_parts(src, count); - to_result(Ok::(am::ActorId::from( - slice, - ))) +pub unsafe extern "C" fn AMactorIdFromBytes(src: *const u8, count: usize) -> *mut AMresult { + if !src.is_null() { + let value = std::slice::from_raw_parts(src, count); + to_result(Ok::(am::ActorId::from( + value, + ))) + } else { + AMresult::error("Invalid uint8_t*").into() + } } /// \memberof AMactorId /// \brief Allocates a new actor identifier and initializes it from a -/// hexadecimal string. +/// hexadecimal UTF-8 string view value. /// -/// \param[in] hex_str A UTF-8 string view as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMactorId` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// hex_str must be a valid pointer to an AMbyteSpan #[no_mangle] -pub unsafe extern "C" fn AMactorIdInitStr(hex_str: AMbyteSpan) -> *mut AMresult { +pub unsafe extern "C" fn AMactorIdFromStr(value: AMbyteSpan) -> *mut AMresult { use am::AutomergeError::InvalidActorId; - to_result(match (&hex_str).try_into() { + to_result(match (&value).try_into() { Ok(s) => match am::ActorId::from_str(s) { Ok(actor_id) => Ok(actor_id), Err(_) => Err(InvalidActorId(String::from(s))), @@ -169,11 +174,12 @@ pub unsafe extern "C" fn AMactorIdInitStr(hex_str: AMbyteSpan) -> *mut AMresult } /// \memberof AMactorId -/// \brief Gets the value of an actor identifier as a hexadecimal string. +/// \brief Gets the value of an actor identifier as a UTF-8 hexadecimal string +/// view. /// /// \param[in] actor_id A pointer to an `AMactorId` struct. -/// \pre \p actor_id `!= NULL`. /// \return A UTF-8 string view as an `AMbyteSpan` struct. +/// \pre \p actor_id `!= NULL` /// \internal /// /// # Safety diff --git a/rust/automerge-c/src/byte_span.rs b/rust/automerge-c/src/byte_span.rs index fd4c3ca0..5855cfc7 100644 --- a/rust/automerge-c/src/byte_span.rs +++ b/rust/automerge-c/src/byte_span.rs @@ -1,14 +1,17 @@ use automerge as am; -use libc::strlen; +use std::cmp::Ordering; use std::convert::TryFrom; use std::os::raw::c_char; +use libc::{c_int, strlen}; +use smol_str::SmolStr; + macro_rules! to_str { - ($span:expr) => {{ - let result: Result<&str, am::AutomergeError> = (&$span).try_into(); + ($byte_span:expr) => {{ + let result: Result<&str, am::AutomergeError> = (&$byte_span).try_into(); match result { Ok(s) => s, - Err(e) => return AMresult::err(&e.to_string()).into(), + Err(e) => return AMresult::error(&e.to_string()).into(), } }}; } @@ -17,16 +20,17 @@ pub(crate) use to_str; /// \struct AMbyteSpan /// \installed_headerfile -/// \brief A view onto a contiguous sequence of bytes. +/// \brief A view onto an array of bytes. #[repr(C)] pub struct AMbyteSpan { - /// A pointer to an array of bytes. - /// \attention NEVER CALL `free()` ON \p src! - /// \warning \p src is only valid until the `AMfree()` function is called - /// on the `AMresult` struct that stores the array of bytes to - /// which it points. + /// A pointer to the first byte of an array of bytes. + /// \warning \p src is only valid until the array of bytes to which it + /// points is freed. + /// \note If the `AMbyteSpan` came from within an `AMitem` struct then + /// \p src will be freed when the pointer to the `AMresult` struct + /// containing the `AMitem` struct is passed to `AMresultFree()`. pub src: *const u8, - /// The number of bytes in the array. + /// The count of bytes in the array. pub count: usize, } @@ -52,9 +56,7 @@ impl PartialEq for AMbyteSpan { } else if self.src == other.src { return true; } - let slice = unsafe { std::slice::from_raw_parts(self.src, self.count) }; - let other_slice = unsafe { std::slice::from_raw_parts(other.src, other.count) }; - slice == other_slice + <&[u8]>::from(self) == <&[u8]>::from(other) } } @@ -72,10 +74,15 @@ impl From<&am::ActorId> for AMbyteSpan { impl From<&mut am::ActorId> for AMbyteSpan { fn from(actor: &mut am::ActorId) -> Self { - let slice = actor.to_bytes(); + actor.as_ref().into() + } +} + +impl From<&am::ChangeHash> for AMbyteSpan { + fn from(change_hash: &am::ChangeHash) -> Self { Self { - src: slice.as_ptr(), - count: slice.len(), + src: change_hash.0.as_ptr(), + count: change_hash.0.len(), } } } @@ -93,12 +100,9 @@ impl From<*const c_char> for AMbyteSpan { } } -impl From<&am::ChangeHash> for AMbyteSpan { - fn from(change_hash: &am::ChangeHash) -> Self { - Self { - src: change_hash.0.as_ptr(), - count: change_hash.0.len(), - } +impl From<&SmolStr> for AMbyteSpan { + fn from(smol_str: &SmolStr) -> Self { + smol_str.as_bytes().into() } } @@ -111,13 +115,39 @@ impl From<&[u8]> for AMbyteSpan { } } +impl From<&AMbyteSpan> for &[u8] { + fn from(byte_span: &AMbyteSpan) -> Self { + unsafe { std::slice::from_raw_parts(byte_span.src, byte_span.count) } + } +} + +impl From<&AMbyteSpan> for Vec { + fn from(byte_span: &AMbyteSpan) -> Self { + <&[u8]>::from(byte_span).to_vec() + } +} + +impl TryFrom<&AMbyteSpan> for am::ChangeHash { + type Error = am::AutomergeError; + + fn try_from(byte_span: &AMbyteSpan) -> Result { + use am::AutomergeError::InvalidChangeHashBytes; + + let slice: &[u8] = byte_span.into(); + match slice.try_into() { + Ok(change_hash) => Ok(change_hash), + Err(e) => Err(InvalidChangeHashBytes(e)), + } + } +} + impl TryFrom<&AMbyteSpan> for &str { type Error = am::AutomergeError; - fn try_from(span: &AMbyteSpan) -> Result { + fn try_from(byte_span: &AMbyteSpan) -> Result { use am::AutomergeError::InvalidCharacter; - let slice = unsafe { std::slice::from_raw_parts(span.src, span.count) }; + let slice = byte_span.into(); match std::str::from_utf8(slice) { Ok(str_) => Ok(str_), Err(e) => Err(InvalidCharacter(e.valid_up_to())), @@ -125,17 +155,69 @@ impl TryFrom<&AMbyteSpan> for &str { } } -/// \brief Creates an AMbyteSpan from a pointer + length +/// \memberof AMbyteSpan +/// \brief Creates a view onto an array of bytes. /// -/// \param[in] src A pointer to a span of bytes -/// \param[in] count The number of bytes in the span -/// \return An `AMbyteSpan` struct +/// \param[in] src A pointer to an array of bytes or `NULL`. +/// \param[in] count The count of bytes to view from the array pointed to by +/// \p src. +/// \return An `AMbyteSpan` struct. +/// \pre \p count `<= sizeof(`\p src `)` +/// \post `(`\p src `== NULL) -> (AMbyteSpan){NULL, 0}` /// \internal /// /// #Safety -/// AMbytes does not retain the underlying storage, so you must discard the -/// return value before freeing the bytes. +/// src must be a byte array of length `>= count` or `std::ptr::null()` #[no_mangle] pub unsafe extern "C" fn AMbytes(src: *const u8, count: usize) -> AMbyteSpan { - AMbyteSpan { src, count } + AMbyteSpan { + src, + count: if src.is_null() { 0 } else { count }, + } +} + +/// \memberof AMbyteSpan +/// \brief Creates a view onto a C string. +/// +/// \param[in] c_str A null-terminated byte string or `NULL`. +/// \return An `AMbyteSpan` struct. +/// \pre Each byte in \p c_str encodes one UTF-8 character. +/// \internal +/// +/// #Safety +/// c_str must be a null-terminated array of `std::os::raw::c_char` or `std::ptr::null()`. +#[no_mangle] +pub unsafe extern "C" fn AMstr(c_str: *const c_char) -> AMbyteSpan { + c_str.into() +} + +/// \memberof AMbyteSpan +/// \brief Compares two UTF-8 string views lexicographically. +/// +/// \param[in] lhs A UTF-8 string view as an `AMbyteSpan` struct. +/// \param[in] rhs A UTF-8 string view as an `AMbyteSpan` struct. +/// \return Negative value if \p lhs appears before \p rhs in lexicographical order. +/// Zero if \p lhs and \p rhs compare equal. +/// Positive value if \p lhs appears after \p rhs in lexicographical order. +/// \pre \p lhs.src `!= NULL` +/// \pre \p lhs.count `<= sizeof(`\p lhs.src `)` +/// \pre \p rhs.src `!= NULL` +/// \pre \p rhs.count `<= sizeof(`\p rhs.src `)` +/// \internal +/// +/// #Safety +/// lhs.src must be a byte array of length >= lhs.count +/// rhs.src must be a a byte array of length >= rhs.count +#[no_mangle] +pub unsafe extern "C" fn AMstrCmp(lhs: AMbyteSpan, rhs: AMbyteSpan) -> c_int { + match (<&str>::try_from(&lhs), <&str>::try_from(&rhs)) { + (Ok(lhs), Ok(rhs)) => match lhs.cmp(rhs) { + Ordering::Less => -1, + Ordering::Equal => 0, + Ordering::Greater => 1, + }, + (Err(_), Ok(_)) => -1, + (Err(_), Err(_)) => 0, + (Ok(_), Err(_)) => 1, + } } diff --git a/rust/automerge-c/src/change.rs b/rust/automerge-c/src/change.rs index d64a2635..8529ed94 100644 --- a/rust/automerge-c/src/change.rs +++ b/rust/automerge-c/src/change.rs @@ -2,7 +2,6 @@ use automerge as am; use std::cell::RefCell; use crate::byte_span::AMbyteSpan; -use crate::change_hashes::AMchangeHashes; use crate::result::{to_result, AMresult}; macro_rules! to_change { @@ -10,7 +9,7 @@ macro_rules! to_change { let handle = $handle.as_ref(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMchange pointer").into(), + None => return AMresult::error("Invalid `AMchange*`").into(), } }}; } @@ -21,14 +20,14 @@ macro_rules! to_change { #[derive(Eq, PartialEq)] pub struct AMchange { body: *mut am::Change, - changehash: RefCell>, + change_hash: RefCell>, } impl AMchange { pub fn new(change: &mut am::Change) -> Self { Self { body: change, - changehash: Default::default(), + change_hash: Default::default(), } } @@ -40,12 +39,12 @@ impl AMchange { } pub fn hash(&self) -> AMbyteSpan { - let mut changehash = self.changehash.borrow_mut(); - if let Some(changehash) = changehash.as_ref() { - changehash.into() + let mut change_hash = self.change_hash.borrow_mut(); + if let Some(change_hash) = change_hash.as_ref() { + change_hash.into() } else { let hash = unsafe { (*self.body).hash() }; - let ptr = changehash.insert(hash); + let ptr = change_hash.insert(hash); AMbyteSpan { src: ptr.0.as_ptr(), count: hash.as_ref().len(), @@ -70,11 +69,10 @@ impl AsRef for AMchange { /// \brief Gets the first referenced actor identifier in a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \pre \p change `!= NULL`. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMactorId` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item. +/// \pre \p change `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -90,8 +88,8 @@ pub unsafe extern "C" fn AMchangeActorId(change: *const AMchange) -> *mut AMresu /// \memberof AMchange /// \brief Compresses the raw bytes of a change. /// -/// \param[in,out] change A pointer to an `AMchange` struct. -/// \pre \p change `!= NULL`. +/// \param[in] change A pointer to an `AMchange` struct. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -107,18 +105,20 @@ pub unsafe extern "C" fn AMchangeCompress(change: *mut AMchange) { /// \brief Gets the dependencies of a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \return A pointer to an `AMchangeHashes` struct or `NULL`. -/// \pre \p change `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p change `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// change must be a valid pointer to an AMchange #[no_mangle] -pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> AMchangeHashes { - match change.as_ref() { - Some(change) => AMchangeHashes::new(change.as_ref().deps()), +pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> *mut AMresult { + to_result(match change.as_ref() { + Some(change) => change.as_ref().deps(), None => Default::default(), - } + }) } /// \memberof AMchange @@ -126,7 +126,7 @@ pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> AMchangeHashes /// /// \param[in] change A pointer to an `AMchange` struct. /// \return An `AMbyteSpan` struct. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -141,32 +141,33 @@ pub unsafe extern "C" fn AMchangeExtraBytes(change: *const AMchange) -> AMbyteSp } /// \memberof AMchange -/// \brief Loads a sequence of bytes into a change. +/// \brief Allocates a new change and initializes it from an array of bytes value. /// /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to load. -/// \return A pointer to an `AMresult` struct containing an `AMchange` struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to load from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_CHANGE` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] pub unsafe extern "C" fn AMchangeFromBytes(src: *const u8, count: usize) -> *mut AMresult { - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result(am::Change::from_bytes(data)) + let data = std::slice::from_raw_parts(src, count); + to_result(am::Change::from_bytes(data.to_vec())) } /// \memberof AMchange /// \brief Gets the hash of a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \return A change hash as an `AMbyteSpan` struct. -/// \pre \p change `!= NULL`. +/// \return An `AMbyteSpan` struct for a change hash. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -183,8 +184,8 @@ pub unsafe extern "C" fn AMchangeHash(change: *const AMchange) -> AMbyteSpan { /// \brief Tests the emptiness of a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \return A boolean. -/// \pre \p change `!= NULL`. +/// \return `true` if \p change is empty, `false` otherwise. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -198,12 +199,37 @@ pub unsafe extern "C" fn AMchangeIsEmpty(change: *const AMchange) -> bool { } } +/// \memberof AMchange +/// \brief Loads a document into a sequence of changes. +/// +/// \param[in] src A pointer to an array of bytes. +/// \param[in] count The count of bytes to load from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE` items. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// src must be a byte array of length `>= count` +#[no_mangle] +pub unsafe extern "C" fn AMchangeLoadDocument(src: *const u8, count: usize) -> *mut AMresult { + let data = std::slice::from_raw_parts(src, count); + to_result::, _>>( + am::Automerge::load(data) + .and_then(|d| d.get_changes(&[]).map(|c| c.into_iter().cloned().collect())), + ) +} + /// \memberof AMchange /// \brief Gets the maximum operation index of a change. /// /// \param[in] change A pointer to an `AMchange` struct. /// \return A 64-bit unsigned integer. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -221,8 +247,8 @@ pub unsafe extern "C" fn AMchangeMaxOp(change: *const AMchange) -> u64 { /// \brief Gets the message of a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \return A UTF-8 string view as an `AMbyteSpan` struct. -/// \pre \p change `!= NULL`. +/// \return An `AMbyteSpan` struct for a UTF-8 string. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -240,7 +266,7 @@ pub unsafe extern "C" fn AMchangeMessage(change: *const AMchange) -> AMbyteSpan /// /// \param[in] change A pointer to an `AMchange` struct. /// \return A 64-bit unsigned integer. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -259,7 +285,7 @@ pub unsafe extern "C" fn AMchangeSeq(change: *const AMchange) -> u64 { /// /// \param[in] change A pointer to an `AMchange` struct. /// \return A 64-bit unsigned integer. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -267,10 +293,9 @@ pub unsafe extern "C" fn AMchangeSeq(change: *const AMchange) -> u64 { #[no_mangle] pub unsafe extern "C" fn AMchangeSize(change: *const AMchange) -> usize { if let Some(change) = change.as_ref() { - change.as_ref().len() - } else { - 0 + return change.as_ref().len(); } + 0 } /// \memberof AMchange @@ -278,7 +303,7 @@ pub unsafe extern "C" fn AMchangeSize(change: *const AMchange) -> usize { /// /// \param[in] change A pointer to an `AMchange` struct. /// \return A 64-bit unsigned integer. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -297,7 +322,7 @@ pub unsafe extern "C" fn AMchangeStartOp(change: *const AMchange) -> u64 { /// /// \param[in] change A pointer to an `AMchange` struct. /// \return A 64-bit signed integer. -/// \pre \p change `!= NULL`. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -315,8 +340,8 @@ pub unsafe extern "C" fn AMchangeTime(change: *const AMchange) -> i64 { /// \brief Gets the raw bytes of a change. /// /// \param[in] change A pointer to an `AMchange` struct. -/// \return An `AMbyteSpan` struct. -/// \pre \p change `!= NULL`. +/// \return An `AMbyteSpan` struct for an array of bytes. +/// \pre \p change `!= NULL` /// \internal /// /// # Safety @@ -329,28 +354,3 @@ pub unsafe extern "C" fn AMchangeRawBytes(change: *const AMchange) -> AMbyteSpan Default::default() } } - -/// \memberof AMchange -/// \brief Loads a document into a sequence of changes. -/// -/// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to load. -/// \return A pointer to an `AMresult` struct containing a sequence of -/// `AMchange` structs. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. -/// \internal -/// -/// # Safety -/// src must be a byte array of size `>= count` -#[no_mangle] -pub unsafe extern "C" fn AMchangeLoadDocument(src: *const u8, count: usize) -> *mut AMresult { - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result::, _>>( - am::Automerge::load(&data) - .and_then(|d| d.get_changes(&[]).map(|c| c.into_iter().cloned().collect())), - ) -} diff --git a/rust/automerge-c/src/change_hashes.rs b/rust/automerge-c/src/change_hashes.rs deleted file mode 100644 index 029612e9..00000000 --- a/rust/automerge-c/src/change_hashes.rs +++ /dev/null @@ -1,400 +0,0 @@ -use automerge as am; -use std::cmp::Ordering; -use std::ffi::c_void; -use std::mem::size_of; - -use crate::byte_span::AMbyteSpan; -use crate::result::{to_result, AMresult}; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(change_hashes: &[am::ChangeHash], offset: isize) -> Self { - Self { - len: change_hashes.len(), - offset, - ptr: change_hashes.as_ptr() as *const c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<&am::ChangeHash> { - if self.is_stopped() { - return None; - } - let slice: &[am::ChangeHash] = - unsafe { std::slice::from_raw_parts(self.ptr as *const am::ChangeHash, self.len) }; - let value = &slice[self.get_index()]; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<&am::ChangeHash> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[am::ChangeHash] = - unsafe { std::slice::from_raw_parts(self.ptr as *const am::ChangeHash, self.len) }; - Some(&slice[self.get_index()]) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) - .try_into() - .unwrap() - } - } -} - -/// \struct AMchangeHashes -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of change hashes. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMchangeHashes { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_], -} - -impl AMchangeHashes { - pub fn new(change_hashes: &[am::ChangeHash]) -> Self { - Self { - detail: Detail::new(change_hashes, 0).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<&am::ChangeHash> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<&am::ChangeHash> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[am::ChangeHash]> for AMchangeHashes { - fn as_ref(&self) -> &[am::ChangeHash] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const am::ChangeHash, detail.len) } - } -} - -impl Default for AMchangeHashes { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMchangeHashes -/// \brief Advances an iterator over a sequence of change hashes by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesAdvance(change_hashes: *mut AMchangeHashes, n: isize) { - if let Some(change_hashes) = change_hashes.as_mut() { - change_hashes.advance(n); - }; -} - -/// \memberof AMchangeHashes -/// \brief Compares the sequences of change hashes underlying a pair of -/// iterators. -/// -/// \param[in] change_hashes1 A pointer to an `AMchangeHashes` struct. -/// \param[in] change_hashes2 A pointer to an `AMchangeHashes` struct. -/// \return `-1` if \p change_hashes1 `<` \p change_hashes2, `0` if -/// \p change_hashes1 `==` \p change_hashes2 and `1` if -/// \p change_hashes1 `>` \p change_hashes2. -/// \pre \p change_hashes1 `!= NULL`. -/// \pre \p change_hashes2 `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes1 must be a valid pointer to an AMchangeHashes -/// change_hashes2 must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesCmp( - change_hashes1: *const AMchangeHashes, - change_hashes2: *const AMchangeHashes, -) -> isize { - match (change_hashes1.as_ref(), change_hashes2.as_ref()) { - (Some(change_hashes1), Some(change_hashes2)) => { - match change_hashes1.as_ref().cmp(change_hashes2.as_ref()) { - Ordering::Less => -1, - Ordering::Equal => 0, - Ordering::Greater => 1, - } - } - (None, Some(_)) => -1, - (Some(_), None) => 1, - (None, None) => 0, - } -} - -/// \memberof AMchangeHashes -/// \brief Allocates an iterator over a sequence of change hashes and -/// initializes it from a sequence of byte spans. -/// -/// \param[in] src A pointer to an array of `AMbyteSpan` structs. -/// \param[in] count The number of `AMbyteSpan` structs to copy from \p src. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`) / sizeof(AMbyteSpan)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. -/// \internal -/// -/// # Safety -/// src must be an AMbyteSpan array of size `>= count` -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesInit(src: *const AMbyteSpan, count: usize) -> *mut AMresult { - let mut change_hashes = Vec::::new(); - for n in 0..count { - let byte_span = &*src.add(n); - let slice = std::slice::from_raw_parts(byte_span.src, byte_span.count); - match slice.try_into() { - Ok(change_hash) => { - change_hashes.push(change_hash); - } - Err(e) => { - return to_result(Err(e)); - } - } - } - to_result(Ok::, am::InvalidChangeHashSlice>( - change_hashes, - )) -} - -/// \memberof AMchangeHashes -/// \brief Gets the change hash at the current position of an iterator over a -/// sequence of change hashes and then advances it by at most \p |n| -/// positions where the sign of \p n is relative to the iterator's -/// direction. -/// -/// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return An `AMbyteSpan` struct with `.src == NULL` when \p change_hashes -/// was previously advanced past its forward/reverse limit. -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesNext( - change_hashes: *mut AMchangeHashes, - n: isize, -) -> AMbyteSpan { - if let Some(change_hashes) = change_hashes.as_mut() { - if let Some(change_hash) = change_hashes.next(n) { - return change_hash.into(); - } - } - Default::default() -} - -/// \memberof AMchangeHashes -/// \brief Advances an iterator over a sequence of change hashes by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the change hash at its new -/// position. -/// -/// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return An `AMbyteSpan` struct with `.src == NULL` when \p change_hashes is -/// presently advanced past its forward/reverse limit. -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesPrev( - change_hashes: *mut AMchangeHashes, - n: isize, -) -> AMbyteSpan { - if let Some(change_hashes) = change_hashes.as_mut() { - if let Some(change_hash) = change_hashes.prev(n) { - return change_hash.into(); - } - } - Default::default() -} - -/// \memberof AMchangeHashes -/// \brief Gets the size of the sequence of change hashes underlying an -/// iterator. -/// -/// \param[in] change_hashes A pointer to an `AMchangeHashes` struct. -/// \return The count of values in \p change_hashes. -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesSize(change_hashes: *const AMchangeHashes) -> usize { - if let Some(change_hashes) = change_hashes.as_ref() { - change_hashes.len() - } else { - 0 - } -} - -/// \memberof AMchangeHashes -/// \brief Creates an iterator over the same sequence of change hashes as the -/// given one but with the opposite position and direction. -/// -/// \param[in] change_hashes A pointer to an `AMchangeHashes` struct. -/// \return An `AMchangeHashes` struct -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesReversed( - change_hashes: *const AMchangeHashes, -) -> AMchangeHashes { - if let Some(change_hashes) = change_hashes.as_ref() { - change_hashes.reversed() - } else { - Default::default() - } -} - -/// \memberof AMchangeHashes -/// \brief Creates an iterator at the starting position over the same sequence -/// of change hashes as the given one. -/// -/// \param[in] change_hashes A pointer to an `AMchangeHashes` struct. -/// \return An `AMchangeHashes` struct -/// \pre \p change_hashes `!= NULL`. -/// \internal -/// -/// #Safety -/// change_hashes must be a valid pointer to an AMchangeHashes -#[no_mangle] -pub unsafe extern "C" fn AMchangeHashesRewound( - change_hashes: *const AMchangeHashes, -) -> AMchangeHashes { - if let Some(change_hashes) = change_hashes.as_ref() { - change_hashes.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/changes.rs b/rust/automerge-c/src/changes.rs deleted file mode 100644 index 1bff35c8..00000000 --- a/rust/automerge-c/src/changes.rs +++ /dev/null @@ -1,399 +0,0 @@ -use automerge as am; -use std::collections::BTreeMap; -use std::ffi::c_void; -use std::mem::size_of; - -use crate::byte_span::AMbyteSpan; -use crate::change::AMchange; -use crate::result::{to_result, AMresult}; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, - storage: *mut c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(changes: &[am::Change], offset: isize, storage: &mut BTreeMap) -> Self { - let storage: *mut BTreeMap = storage; - Self { - len: changes.len(), - offset, - ptr: changes.as_ptr() as *const c_void, - storage: storage as *mut c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<*const AMchange> { - if self.is_stopped() { - return None; - } - let slice: &mut [am::Change] = - unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut am::Change, self.len) }; - let storage = unsafe { &mut *(self.storage as *mut BTreeMap) }; - let index = self.get_index(); - let value = match storage.get_mut(&index) { - Some(value) => value, - None => { - storage.insert(index, AMchange::new(&mut slice[index])); - storage.get_mut(&index).unwrap() - } - }; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<*const AMchange> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &mut [am::Change] = - unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut am::Change, self.len) }; - let storage = unsafe { &mut *(self.storage as *mut BTreeMap) }; - let index = self.get_index(); - Some(match storage.get_mut(&index) { - Some(value) => value, - None => { - storage.insert(index, AMchange::new(&mut slice[index])); - storage.get_mut(&index).unwrap() - } - }) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - storage: self.storage, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - storage: self.storage, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts( - (&detail as *const Detail) as *const u8, - USIZE_USIZE_USIZE_USIZE_, - ) - .try_into() - .unwrap() - } - } -} - -/// \struct AMchanges -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of changes. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMchanges { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_USIZE_], -} - -impl AMchanges { - pub fn new(changes: &[am::Change], storage: &mut BTreeMap) -> Self { - Self { - detail: Detail::new(changes, 0, &mut *storage).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<*const AMchange> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<*const AMchange> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[am::Change]> for AMchanges { - fn as_ref(&self) -> &[am::Change] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const am::Change, detail.len) } - } -} - -impl Default for AMchanges { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMchanges -/// \brief Advances an iterator over a sequence of changes by at most \p |n| -/// positions where the sign of \p n is relative to the iterator's -/// direction. -/// -/// \param[in,out] changes A pointer to an `AMchanges` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesAdvance(changes: *mut AMchanges, n: isize) { - if let Some(changes) = changes.as_mut() { - changes.advance(n); - }; -} - -/// \memberof AMchanges -/// \brief Tests the equality of two sequences of changes underlying a pair of -/// iterators. -/// -/// \param[in] changes1 A pointer to an `AMchanges` struct. -/// \param[in] changes2 A pointer to an `AMchanges` struct. -/// \return `true` if \p changes1 `==` \p changes2 and `false` otherwise. -/// \pre \p changes1 `!= NULL`. -/// \pre \p changes2 `!= NULL`. -/// \internal -/// -/// #Safety -/// changes1 must be a valid pointer to an AMchanges -/// changes2 must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesEqual( - changes1: *const AMchanges, - changes2: *const AMchanges, -) -> bool { - match (changes1.as_ref(), changes2.as_ref()) { - (Some(changes1), Some(changes2)) => changes1.as_ref() == changes2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} - -/// \memberof AMchanges -/// \brief Allocates an iterator over a sequence of changes and initializes it -/// from a sequence of byte spans. -/// -/// \param[in] src A pointer to an array of `AMbyteSpan` structs. -/// \param[in] count The number of `AMbyteSpan` structs to copy from \p src. -/// \return A pointer to an `AMresult` struct containing an `AMchanges` struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`) / sizeof(AMbyteSpan)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. -/// \internal -/// -/// # Safety -/// src must be an AMbyteSpan array of size `>= count` -#[no_mangle] -pub unsafe extern "C" fn AMchangesInit(src: *const AMbyteSpan, count: usize) -> *mut AMresult { - let mut changes = Vec::::new(); - for n in 0..count { - let byte_span = &*src.add(n); - let slice = std::slice::from_raw_parts(byte_span.src, byte_span.count); - match slice.try_into() { - Ok(change) => { - changes.push(change); - } - Err(e) => { - return to_result(Err::, am::LoadChangeError>(e)); - } - } - } - to_result(Ok::, am::LoadChangeError>(changes)) -} - -/// \memberof AMchanges -/// \brief Gets the change at the current position of an iterator over a -/// sequence of changes and then advances it by at most \p |n| positions -/// where the sign of \p n is relative to the iterator's direction. -/// -/// \param[in,out] changes A pointer to an `AMchanges` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMchange` struct that's `NULL` when \p changes was -/// previously advanced past its forward/reverse limit. -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesNext(changes: *mut AMchanges, n: isize) -> *const AMchange { - if let Some(changes) = changes.as_mut() { - if let Some(change) = changes.next(n) { - return change; - } - } - std::ptr::null() -} - -/// \memberof AMchanges -/// \brief Advances an iterator over a sequence of changes by at most \p |n| -/// positions where the sign of \p n is relative to the iterator's -/// direction and then gets the change at its new position. -/// -/// \param[in,out] changes A pointer to an `AMchanges` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMchange` struct that's `NULL` when \p changes is -/// presently advanced past its forward/reverse limit. -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesPrev(changes: *mut AMchanges, n: isize) -> *const AMchange { - if let Some(changes) = changes.as_mut() { - if let Some(change) = changes.prev(n) { - return change; - } - } - std::ptr::null() -} - -/// \memberof AMchanges -/// \brief Gets the size of the sequence of changes underlying an iterator. -/// -/// \param[in] changes A pointer to an `AMchanges` struct. -/// \return The count of values in \p changes. -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesSize(changes: *const AMchanges) -> usize { - if let Some(changes) = changes.as_ref() { - changes.len() - } else { - 0 - } -} - -/// \memberof AMchanges -/// \brief Creates an iterator over the same sequence of changes as the given -/// one but with the opposite position and direction. -/// -/// \param[in] changes A pointer to an `AMchanges` struct. -/// \return An `AMchanges` struct. -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesReversed(changes: *const AMchanges) -> AMchanges { - if let Some(changes) = changes.as_ref() { - changes.reversed() - } else { - Default::default() - } -} - -/// \memberof AMchanges -/// \brief Creates an iterator at the starting position over the same sequence -/// of changes as the given one. -/// -/// \param[in] changes A pointer to an `AMchanges` struct. -/// \return An `AMchanges` struct -/// \pre \p changes `!= NULL`. -/// \internal -/// -/// #Safety -/// changes must be a valid pointer to an AMchanges -#[no_mangle] -pub unsafe extern "C" fn AMchangesRewound(changes: *const AMchanges) -> AMchanges { - if let Some(changes) = changes.as_ref() { - changes.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/doc.rs b/rust/automerge-c/src/doc.rs index f02c01bf..82f52bf7 100644 --- a/rust/automerge-c/src/doc.rs +++ b/rust/automerge-c/src/doc.rs @@ -6,43 +6,23 @@ use std::ops::{Deref, DerefMut}; use crate::actor_id::{to_actor_id, AMactorId}; use crate::byte_span::{to_str, AMbyteSpan}; -use crate::change_hashes::AMchangeHashes; +use crate::items::AMitems; use crate::obj::{to_obj_id, AMobjId, AMobjType}; -use crate::result::{to_result, AMresult, AMvalue}; +use crate::result::{to_result, AMresult}; use crate::sync::{to_sync_message, AMsyncMessage, AMsyncState}; pub mod list; pub mod map; pub mod utils; -use crate::changes::AMchanges; -use crate::doc::utils::{to_doc, to_doc_mut}; - -macro_rules! to_changes { - ($handle:expr) => {{ - let handle = $handle.as_ref(); - match handle { - Some(b) => b, - None => return AMresult::err("Invalid AMchanges pointer").into(), - } - }}; -} - -macro_rules! to_index { - ($index:expr, $len:expr, $param_name:expr) => {{ - if $index > $len && $index != usize::MAX { - return AMresult::err(&format!("Invalid {} {}", $param_name, $index)).into(); - } - std::cmp::min($index, $len) - }}; -} +use crate::doc::utils::{clamp, to_doc, to_doc_mut, to_items}; macro_rules! to_sync_state_mut { ($handle:expr) => {{ let handle = $handle.as_mut(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMsyncState pointer").into(), + None => return AMresult::error("Invalid `AMsyncState*`").into(), } }}; } @@ -57,6 +37,10 @@ impl AMdoc { pub fn new(auto_commit: am::AutoCommit) -> Self { Self(auto_commit) } + + pub fn is_equal_to(&mut self, other: &mut Self) -> bool { + self.document().get_heads() == other.document().get_heads() + } } impl AsRef for AMdoc { @@ -82,38 +66,38 @@ impl DerefMut for AMdoc { /// \memberof AMdoc /// \brief Applies a sequence of changes to a document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in] changes A pointer to an `AMchanges` struct. -/// \pre \p doc `!= NULL`. -/// \pre \p changes `!= NULL`. -/// \return A pointer to an `AMresult` struct containing a void. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] items A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE` +/// items. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p items `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc -/// changes must be a valid pointer to an AMchanges. +/// items must be a valid pointer to an AMitems. #[no_mangle] -pub unsafe extern "C" fn AMapplyChanges( - doc: *mut AMdoc, - changes: *const AMchanges, -) -> *mut AMresult { +pub unsafe extern "C" fn AMapplyChanges(doc: *mut AMdoc, items: *const AMitems) -> *mut AMresult { let doc = to_doc_mut!(doc); - let changes = to_changes!(changes); - to_result(doc.apply_changes(changes.as_ref().to_vec())) + let items = to_items!(items); + match Vec::::try_from(items) { + Ok(changes) => to_result(doc.apply_changes(changes)), + Err(e) => AMresult::error(&e.to_string()).into(), + } } /// \memberof AMdoc /// \brief Allocates storage for a document and initializes it by duplicating /// the given document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMdoc` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_DOC` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -129,10 +113,9 @@ pub unsafe extern "C" fn AMclone(doc: *const AMdoc) -> *mut AMresult { /// /// \param[in] actor_id A pointer to an `AMactorId` struct or `NULL` for a /// random one. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMdoc` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_DOC` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -149,15 +132,15 @@ pub unsafe extern "C" fn AMcreate(actor_id: *const AMactorId) -> *mut AMresult { /// \brief Commits the current operations on a document with an optional /// message and/or *nix timestamp (milliseconds). /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] message A UTF-8 string view as an `AMbyteSpan` struct. /// \param[in] timestamp A pointer to a 64-bit integer or `NULL`. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// with one element if there were operations to commit, or void if -/// there were no operations to commit. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with one `AM_VAL_TYPE_CHANGE_HASH` +/// item if there were operations to commit or an `AM_VAL_TYPE_VOID` item +/// if there were no operations to commit. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -183,24 +166,24 @@ pub unsafe extern "C" fn AMcommit( /// \brief Creates an empty change with an optional message and/or *nix /// timestamp (milliseconds). /// -/// This is useful if you wish to create a "merge commit" which has as its -/// dependents the current heads of the document but you don't have any -/// operations to add to the document. +/// \details This is useful if you wish to create a "merge commit" which has as +/// its dependents the current heads of the document but you don't have +/// any operations to add to the document. /// /// \note If there are outstanding uncommitted changes to the document -/// then two changes will be created: one for creating the outstanding changes -/// and one for the empty change. The empty change will always be the -/// latest change in the document after this call and the returned hash will be -/// the hash of that empty change. +/// then two changes will be created: one for creating the outstanding +/// changes and one for the empty change. The empty change will always be +/// the latest change in the document after this call and the returned +/// hash will be the hash of that empty change. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] message A UTF-8 string view as an `AMbyteSpan` struct. /// \param[in] timestamp A pointer to a 64-bit integer or `NULL`. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// with one element. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with one `AM_VAL_TYPE_CHANGE_HASH` +/// item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -226,11 +209,11 @@ pub unsafe extern "C" fn AMemptyChange( /// \brief Tests the equality of two documents after closing their respective /// transactions. /// -/// \param[in,out] doc1 An `AMdoc` struct. -/// \param[in,out] doc2 An `AMdoc` struct. +/// \param[in] doc1 A pointer to an `AMdoc` struct. +/// \param[in] doc2 A pointer to an `AMdoc` struct. /// \return `true` if \p doc1 `==` \p doc2 and `false` otherwise. -/// \pre \p doc1 `!= NULL`. -/// \pre \p doc2 `!= NULL`. +/// \pre \p doc1 `!= NULL` +/// \pre \p doc2 `!= NULL` /// \internal /// /// #Safety @@ -239,33 +222,36 @@ pub unsafe extern "C" fn AMemptyChange( #[no_mangle] pub unsafe extern "C" fn AMequal(doc1: *mut AMdoc, doc2: *mut AMdoc) -> bool { match (doc1.as_mut(), doc2.as_mut()) { - (Some(doc1), Some(doc2)) => doc1.document().get_heads() == doc2.document().get_heads(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, + (Some(doc1), Some(doc2)) => doc1.is_equal_to(doc2), + (None, None) | (None, Some(_)) | (Some(_), None) => false, } } /// \memberof AMdoc -/// \brief Forks this document at the current or a historical point for use by +/// \brief Forks this document at its current or a historical point for use by /// a different actor. -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical -/// point or `NULL` for the current point. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMdoc` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical point or `NULL` to select its +/// current point. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] -pub unsafe extern "C" fn AMfork(doc: *mut AMdoc, heads: *const AMchangeHashes) -> *mut AMresult { +pub unsafe extern "C" fn AMfork(doc: *mut AMdoc, heads: *const AMitems) -> *mut AMresult { let doc = to_doc_mut!(doc); match heads.as_ref() { None => to_result(doc.fork()), - Some(heads) => to_result(doc.fork_at(heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.fork_at(&heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } @@ -273,14 +259,14 @@ pub unsafe extern "C" fn AMfork(doc: *mut AMdoc, heads: *const AMchangeHashes) - /// \brief Generates a synchronization message for a peer based upon the given /// synchronization state. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in,out] sync_state A pointer to an `AMsyncState` struct. -/// \return A pointer to an `AMresult` struct containing either a pointer to an -/// `AMsyncMessage` struct or a void. -/// \pre \p doc `!= NULL`. -/// \pre \p sync_state `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] sync_state A pointer to an `AMsyncState` struct. +/// \return A pointer to an `AMresult` struct with either an +/// `AM_VAL_TYPE_SYNC_MESSAGE` or `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p sync_state `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -300,11 +286,10 @@ pub unsafe extern "C" fn AMgenerateSyncMessage( /// \brief Gets a document's actor identifier. /// /// \param[in] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMactorId` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -320,20 +305,22 @@ pub unsafe extern "C" fn AMgetActorId(doc: *const AMdoc) -> *mut AMresult { /// \memberof AMdoc /// \brief Gets the change added to a document by its respective hash. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src. -/// \return A pointer to an `AMresult` struct containing an `AMchanges` struct. -/// \pre \p doc `!= NULL`. -/// \pre \p src `!= NULL`. -/// \pre \p count `>= AM_CHANGE_HASH_SIZE`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to copy from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_CHANGE` item. +/// \pre \p doc `!= NULL` +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src') >= AM_CHANGE_HASH_SIZE` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc -/// src must be a byte array of size `>= automerge::types::HASH_SIZE` +/// src must be a byte array of length `>= automerge::types::HASH_SIZE` #[no_mangle] pub unsafe extern "C" fn AMgetChangeByHash( doc: *mut AMdoc, @@ -344,48 +331,48 @@ pub unsafe extern "C" fn AMgetChangeByHash( let slice = std::slice::from_raw_parts(src, count); match slice.try_into() { Ok(change_hash) => to_result(doc.get_change_by_hash(&change_hash)), - Err(e) => AMresult::err(&e.to_string()).into(), + Err(e) => AMresult::error(&e.to_string()).into(), } } /// \memberof AMdoc /// \brief Gets the changes added to a document by their respective hashes. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in] have_deps A pointer to an `AMchangeHashes` struct or `NULL`. -/// \return A pointer to an `AMresult` struct containing an `AMchanges` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] have_deps A pointer to an `AMitems` struct with +/// `AM_VAL_TYPE_CHANGE_HASH` items or `NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE` items. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc #[no_mangle] -pub unsafe extern "C" fn AMgetChanges( - doc: *mut AMdoc, - have_deps: *const AMchangeHashes, -) -> *mut AMresult { +pub unsafe extern "C" fn AMgetChanges(doc: *mut AMdoc, have_deps: *const AMitems) -> *mut AMresult { let doc = to_doc_mut!(doc); - let empty_deps = Vec::::new(); let have_deps = match have_deps.as_ref() { - Some(have_deps) => have_deps.as_ref(), - None => &empty_deps, + Some(have_deps) => match Vec::::try_from(have_deps) { + Ok(change_hashes) => change_hashes, + Err(e) => return AMresult::error(&e.to_string()).into(), + }, + None => Vec::::new(), }; - to_result(doc.get_changes(have_deps)) + to_result(doc.get_changes(&have_deps)) } /// \memberof AMdoc /// \brief Gets the changes added to a second document that weren't added to /// a first document. /// -/// \param[in,out] doc1 An `AMdoc` struct. -/// \param[in,out] doc2 An `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing an `AMchanges` struct. -/// \pre \p doc1 `!= NULL`. -/// \pre \p doc2 `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc1 A pointer to an `AMdoc` struct. +/// \param[in] doc2 A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE` items. +/// \pre \p doc1 `!= NULL` +/// \pre \p doc2 `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -401,12 +388,11 @@ pub unsafe extern "C" fn AMgetChangesAdded(doc1: *mut AMdoc, doc2: *mut AMdoc) - /// \memberof AMdoc /// \brief Gets the current heads of a document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -423,41 +409,42 @@ pub unsafe extern "C" fn AMgetHeads(doc: *mut AMdoc) -> *mut AMresult { /// \brief Gets the hashes of the changes in a document that aren't transitive /// dependencies of the given hashes of changes. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in] heads A pointer to an `AMchangeHashes` struct or `NULL`. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items or `NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] -pub unsafe extern "C" fn AMgetMissingDeps( - doc: *mut AMdoc, - heads: *const AMchangeHashes, -) -> *mut AMresult { +pub unsafe extern "C" fn AMgetMissingDeps(doc: *mut AMdoc, heads: *const AMitems) -> *mut AMresult { let doc = to_doc_mut!(doc); - let empty_heads = Vec::::new(); let heads = match heads.as_ref() { - Some(heads) => heads.as_ref(), - None => &empty_heads, + None => Vec::::new(), + Some(heads) => match >::try_from(heads) { + Ok(heads) => heads, + Err(e) => { + return AMresult::error(&e.to_string()).into(); + } + }, }; - to_result(doc.get_missing_deps(heads)) + to_result(doc.get_missing_deps(heads.as_slice())) } /// \memberof AMdoc /// \brief Gets the last change made to a document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing either an `AMchange` -/// struct or a void. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct containing either an +/// `AM_VAL_TYPE_CHANGE` or `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -473,29 +460,33 @@ pub unsafe extern "C" fn AMgetLastLocalChange(doc: *mut AMdoc) -> *mut AMresult /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// keys or `NULL` for current keys. -/// \return A pointer to an `AMresult` struct containing an `AMstrs` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select historical keys or `NULL` to select current +/// keys. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_STR` items. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMkeys( doc: *const AMdoc, obj_id: *const AMobjId, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); match heads.as_ref() { None => to_result(doc.keys(obj_id)), - Some(heads) => to_result(doc.keys_at(obj_id, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.keys_at(obj_id, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } @@ -504,42 +495,43 @@ pub unsafe extern "C" fn AMkeys( /// form of an incremental save. /// /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to load. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMdoc` struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to load from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_DOC` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] pub unsafe extern "C" fn AMload(src: *const u8, count: usize) -> *mut AMresult { - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result(am::AutoCommit::load(&data)) + let data = std::slice::from_raw_parts(src, count); + to_result(am::AutoCommit::load(data)) } /// \memberof AMdoc /// \brief Loads the compact form of an incremental save into a document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to load. -/// \return A pointer to an `AMresult` struct containing the number of -/// operations loaded from \p src. -/// \pre \p doc `!= NULL`. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to load from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_UINT` item. +/// \pre \p doc `!= NULL` +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] pub unsafe extern "C" fn AMloadIncremental( doc: *mut AMdoc, @@ -547,23 +539,21 @@ pub unsafe extern "C" fn AMloadIncremental( count: usize, ) -> *mut AMresult { let doc = to_doc_mut!(doc); - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result(doc.load_incremental(&data)) + let data = std::slice::from_raw_parts(src, count); + to_result(doc.load_incremental(data)) } /// \memberof AMdoc /// \brief Applies all of the changes in \p src which are not in \p dest to /// \p dest. /// -/// \param[in,out] dest A pointer to an `AMdoc` struct. -/// \param[in,out] src A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing an `AMchangeHashes` -/// struct. -/// \pre \p dest `!= NULL`. -/// \pre \p src `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] dest A pointer to an `AMdoc` struct. +/// \param[in] src A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p dest `!= NULL` +/// \pre \p src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -580,31 +570,37 @@ pub unsafe extern "C" fn AMmerge(dest: *mut AMdoc, src: *mut AMdoc) -> *mut AMre /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// size or `NULL` for current size. -/// \return A 64-bit unsigned integer. -/// \pre \p doc `!= NULL`. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical size or `NULL` to select its +/// current size. +/// \return The count of items in the object identified by \p obj_id. +/// \pre \p doc `!= NULL` /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMobjSize( doc: *const AMdoc, obj_id: *const AMobjId, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> usize { if let Some(doc) = doc.as_ref() { let obj_id = to_obj_id!(obj_id); match heads.as_ref() { - None => doc.length(obj_id), - Some(heads) => doc.length_at(obj_id, heads.as_ref()), + None => { + return doc.length(obj_id); + } + Some(heads) => { + if let Ok(heads) = >::try_from(heads) { + return doc.length_at(obj_id, &heads); + } + } } - } else { - 0 } + 0 } /// \memberof AMdoc @@ -612,8 +608,9 @@ pub unsafe extern "C" fn AMobjSize( /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \return An `AMobjType`. -/// \pre \p doc `!= NULL`. +/// \return An `AMobjType` tag or `0`. +/// \pre \p doc `!= NULL` +/// \pre \p obj_id `!= NULL` /// \internal /// /// # Safety @@ -623,44 +620,45 @@ pub unsafe extern "C" fn AMobjSize( pub unsafe extern "C" fn AMobjObjType(doc: *const AMdoc, obj_id: *const AMobjId) -> AMobjType { if let Some(doc) = doc.as_ref() { let obj_id = to_obj_id!(obj_id); - match doc.object_type(obj_id) { - Err(_) => AMobjType::Void, - Ok(obj_type) => obj_type.into(), + if let Ok(obj_type) = doc.object_type(obj_id) { + return (&obj_type).into(); } - } else { - AMobjType::Void } + Default::default() } /// \memberof AMdoc -/// \brief Gets the current or historical values of an object within its entire -/// range. +/// \brief Gets the current or historical items of an entire object. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// items or `NULL` for current items. -/// \return A pointer to an `AMresult` struct containing an `AMobjItems` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select its historical items or `NULL` to select +/// its current items. +/// \return A pointer to an `AMresult` struct with an `AMitems` struct. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] -pub unsafe extern "C" fn AMobjValues( +pub unsafe extern "C" fn AMobjItems( doc: *const AMdoc, obj_id: *const AMobjId, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); match heads.as_ref() { None => to_result(doc.values(obj_id)), - Some(heads) => to_result(doc.values_at(obj_id, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.values_at(obj_id, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } @@ -670,7 +668,7 @@ pub unsafe extern "C" fn AMobjValues( /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \return The count of pending operations for \p doc. -/// \pre \p doc `!= NULL`. +/// \pre \p doc `!= NULL` /// \internal /// /// # Safety @@ -678,23 +676,22 @@ pub unsafe extern "C" fn AMobjValues( #[no_mangle] pub unsafe extern "C" fn AMpendingOps(doc: *const AMdoc) -> usize { if let Some(doc) = doc.as_ref() { - doc.pending_ops() - } else { - 0 + return doc.pending_ops(); } + 0 } /// \memberof AMdoc /// \brief Receives a synchronization message from a peer based upon a given /// synchronization state. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \param[in,out] sync_state A pointer to an `AMsyncState` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \param[in] sync_state A pointer to an `AMsyncState` struct. /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p sync_state `!= NULL`. -/// \pre \p sync_message `!= NULL`. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p sync_state `!= NULL` +/// \pre \p sync_message `!= NULL` /// \internal /// /// # Safety @@ -720,9 +717,9 @@ pub unsafe extern "C" fn AMreceiveSyncMessage( /// \brief Cancels the pending operations added during a document's current /// transaction and gets the number of cancellations. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \return The count of pending operations for \p doc that were cancelled. -/// \pre \p doc `!= NULL`. +/// \pre \p doc `!= NULL` /// \internal /// /// # Safety @@ -730,21 +727,19 @@ pub unsafe extern "C" fn AMreceiveSyncMessage( #[no_mangle] pub unsafe extern "C" fn AMrollback(doc: *mut AMdoc) -> usize { if let Some(doc) = doc.as_mut() { - doc.rollback() - } else { - 0 + return doc.rollback(); } + 0 } /// \memberof AMdoc /// \brief Saves the entirety of a document into a compact form. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing an array of bytes as -/// an `AMbyteSpan` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BYTES` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -759,12 +754,11 @@ pub unsafe extern "C" fn AMsave(doc: *mut AMdoc) -> *mut AMresult { /// \brief Saves the changes to a document since its last save into a compact /// form. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. -/// \return A pointer to an `AMresult` struct containing an array of bytes as -/// an `AMbyteSpan` struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] doc A pointer to an `AMdoc` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BYTES` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -778,13 +772,13 @@ pub unsafe extern "C" fn AMsaveIncremental(doc: *mut AMdoc) -> *mut AMresult { /// \memberof AMdoc /// \brief Puts the actor identifier of a document. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] actor_id A pointer to an `AMactorId` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p actor_id `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p actor_id `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -805,76 +799,65 @@ pub unsafe extern "C" fn AMsetActorId( /// \brief Splices values into and/or removes values from the identified object /// at a given position within it. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] pos A position in the object identified by \p obj_id or /// `SIZE_MAX` to indicate one past its end. -/// \param[in] del The number of characters to delete or `SIZE_MAX` to indicate +/// \param[in] del The number of values to delete or `SIZE_MAX` to indicate /// all of them. -/// \param[in] src A pointer to an array of `AMvalue` structs. -/// \param[in] count The number of `AMvalue` structs in \p src to load. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id`)` or \p pos `== SIZE_MAX`. -/// \pre `0 <=` \p del `<= AMobjSize(`\p obj_id`)` or \p del `== SIZE_MAX`. -/// \pre `(`\p src `!= NULL and 1 <=` \p count `<= sizeof(`\p src`)/ -/// sizeof(AMvalue)) or `\p src `== NULL or `\p count `== 0`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] values A copy of an `AMitems` struct from which values will be +/// spliced starting at its current position; call +/// `AMitemsRewound()` on a used `AMitems` first to ensure +/// that all of its values are spliced in. Pass `(AMitems){0}` +/// when zero values should be spliced in. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \pre `0 <=` \p del `<= AMobjSize(`\p obj_id `)` or \p del `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// src must be an AMvalue array of size `>= count` or std::ptr::null() +/// values must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMsplice( doc: *mut AMdoc, obj_id: *const AMobjId, pos: usize, del: usize, - src: *const AMvalue, - count: usize, + values: AMitems, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); let len = doc.length(obj_id); - let pos = to_index!(pos, len, "pos"); - let del = to_index!(del, len, "del"); - let mut vals: Vec = vec![]; - if !(src.is_null() || count == 0) { - let c_vals = std::slice::from_raw_parts(src, count); - for c_val in c_vals { - match c_val.try_into() { - Ok(s) => { - vals.push(s); - } - Err(e) => { - return AMresult::err(&e.to_string()).into(); - } - } - } + let pos = clamp!(pos, len, "pos"); + let del = clamp!(del, len, "del"); + match Vec::::try_from(&values) { + Ok(vals) => to_result(doc.splice(obj_id, pos, del, vals)), + Err(e) => AMresult::error(&e.to_string()).into(), } - to_result(doc.splice(obj_id, pos, del, vals)) } /// \memberof AMdoc /// \brief Splices characters into and/or removes characters from the /// identified object at a given position within it. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] pos A position in the text object identified by \p obj_id or /// `SIZE_MAX` to indicate one past its end. /// \param[in] del The number of characters to delete or `SIZE_MAX` to indicate /// all of them. /// \param[in] text A UTF-8 string view as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id`)` or \p pos `== SIZE_MAX`. -/// \pre `0 <=` \p del `<= AMobjSize(`\p obj_id`)` or \p del `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \pre `0 <=` \p del `<= AMobjSize(`\p obj_id `)` or \p del `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -891,8 +874,8 @@ pub unsafe extern "C" fn AMspliceText( let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); let len = doc.length(obj_id); - let pos = to_index!(pos, len, "pos"); - let del = to_index!(del, len, "del"); + let pos = clamp!(pos, len, "pos"); + let del = clamp!(del, len, "del"); to_result(doc.splice_text(obj_id, pos, del, to_str!(text))) } @@ -901,28 +884,32 @@ pub unsafe extern "C" fn AMspliceText( /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// keys or `NULL` for current keys. -/// \return A pointer to an `AMresult` struct containing a UTF-8 string. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] heads A pointer to an `AMitems` struct containing +/// `AM_VAL_TYPE_CHANGE_HASH` items to select a historical string +/// or `NULL` to select the current string. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_STR` item. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMtext( doc: *const AMdoc, obj_id: *const AMobjId, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); match heads.as_ref() { None => to_result(doc.text(obj_id)), - Some(heads) => to_result(doc.text_at(obj_id, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.text_at(obj_id, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } diff --git a/rust/automerge-c/src/doc/list.rs b/rust/automerge-c/src/doc/list.rs index 6bcdeabf..c4503322 100644 --- a/rust/automerge-c/src/doc/list.rs +++ b/rust/automerge-c/src/doc/list.rs @@ -3,47 +3,44 @@ use automerge::transaction::Transactable; use automerge::ReadDoc; use crate::byte_span::{to_str, AMbyteSpan}; -use crate::change_hashes::AMchangeHashes; -use crate::doc::{to_doc, to_doc_mut, to_obj_id, AMdoc}; -use crate::obj::{to_obj_type, AMobjId, AMobjType}; +use crate::doc::{to_doc, to_doc_mut, AMdoc}; +use crate::items::AMitems; +use crate::obj::{to_obj_id, to_obj_type, AMobjId, AMobjType}; use crate::result::{to_result, AMresult}; -pub mod item; -pub mod items; - macro_rules! adjust { - ($index:expr, $insert:expr, $len:expr) => {{ + ($pos:expr, $insert:expr, $len:expr) => {{ // An empty object can only be inserted into. let insert = $insert || $len == 0; let end = if insert { $len } else { $len - 1 }; - if $index > end && $index != usize::MAX { - return AMresult::err(&format!("Invalid index {}", $index)).into(); + if $pos > end && $pos != usize::MAX { + return AMresult::error(&format!("Invalid pos {}", $pos)).into(); } - (std::cmp::min($index, end), insert) + (std::cmp::min($pos, end), insert) }}; } macro_rules! to_range { ($begin:expr, $end:expr) => {{ if $begin > $end { - return AMresult::err(&format!("Invalid range [{}-{})", $begin, $end)).into(); + return AMresult::error(&format!("Invalid range [{}-{})", $begin, $end)).into(); }; ($begin..$end) }}; } /// \memberof AMdoc -/// \brief Deletes an index in a list object. +/// \brief Deletes an item from a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -53,101 +50,109 @@ macro_rules! to_range { pub unsafe extern "C" fn AMlistDelete( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, _) = adjust!(index, false, doc.length(obj_id)); - to_result(doc.delete(obj_id, index)) + let (pos, _) = adjust!(pos, false, doc.length(obj_id)); + to_result(doc.delete(obj_id, pos)) } /// \memberof AMdoc -/// \brief Gets the current or historical value at an index in a list object. +/// \brief Gets a current or historical item within a list object. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical -/// value or `NULL` for the current value. -/// \return A pointer to an `AMresult` struct that doesn't contain a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical item at \p pos or `NULL` +/// to select the current item at \p pos. +/// \return A pointer to an `AMresult` struct with an `AMitem` struct. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMlistGet( doc: *const AMdoc, obj_id: *const AMobjId, - index: usize, - heads: *const AMchangeHashes, + pos: usize, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); - let (index, _) = adjust!(index, false, doc.length(obj_id)); - to_result(match heads.as_ref() { - None => doc.get(obj_id, index), - Some(heads) => doc.get_at(obj_id, index, heads.as_ref()), - }) + let (pos, _) = adjust!(pos, false, doc.length(obj_id)); + match heads.as_ref() { + None => to_result(doc.get(obj_id, pos)), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.get_at(obj_id, pos, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, + } } /// \memberof AMdoc -/// \brief Gets all of the historical values at an index in a list object until -/// its current one or a specific one. +/// \brief Gets all of the historical items at a position within a list object +/// until its current one or a specific one. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical -/// last value or `NULL` for the current last value. -/// \return A pointer to an `AMresult` struct containing an `AMobjItems` struct. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical last item or `NULL` to select +/// the current last item. +/// \return A pointer to an `AMresult` struct with an `AMitems` struct. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMlistGetAll( doc: *const AMdoc, obj_id: *const AMobjId, - index: usize, - heads: *const AMchangeHashes, + pos: usize, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); - let (index, _) = adjust!(index, false, doc.length(obj_id)); + let (pos, _) = adjust!(pos, false, doc.length(obj_id)); match heads.as_ref() { - None => to_result(doc.get_all(obj_id, index)), - Some(heads) => to_result(doc.get_all_at(obj_id, index, heads.as_ref())), + None => to_result(doc.get_all(obj_id, pos)), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.get_all_at(obj_id, pos, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } /// \memberof AMdoc -/// \brief Increments a counter at an index in a list object by the given -/// value. +/// \brief Increments a counter value in an item within a list object by the +/// given value. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -157,32 +162,33 @@ pub unsafe extern "C" fn AMlistGetAll( pub unsafe extern "C" fn AMlistIncrement( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, value: i64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, _) = adjust!(index, false, doc.length(obj_id)); - to_result(doc.increment(obj_id, index, value)) + let (pos, _) = adjust!(pos, false, doc.length(obj_id)); + to_result(doc.increment(obj_id, pos, value)) } /// \memberof AMdoc -/// \brief Puts a boolean as the value at an index in a list object. +/// \brief Puts a boolean value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A boolean. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -192,84 +198,85 @@ pub unsafe extern "C" fn AMlistIncrement( pub unsafe extern "C" fn AMlistPutBool( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: bool, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); let value = am::ScalarValue::Boolean(value); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts a sequence of bytes as the value at an index in a list object. +/// \brief Puts an array of bytes value at a position within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p src before \p index instead of -/// writing \p src over \p index. -/// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes to copy from \p src. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. +/// \param[in] value A view onto the array of bytes to copy from as an +/// `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \pre \p value.src `!= NULL` +/// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// src must be a byte array of size `>= count` +/// value.src must be a byte array of length >= value.count #[no_mangle] pub unsafe extern "C" fn AMlistPutBytes( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, - val: AMbyteSpan, + value: AMbyteSpan, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); - let mut value = Vec::new(); - value.extend_from_slice(std::slice::from_raw_parts(val.src, val.count)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); + let value: Vec = (&value).into(); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts a CRDT counter as the value at an index in a list object. +/// \brief Puts a CRDT counter value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -279,38 +286,39 @@ pub unsafe extern "C" fn AMlistPutBytes( pub unsafe extern "C" fn AMlistPutCounter( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: i64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); let value = am::ScalarValue::Counter(value.into()); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts a float as the value at an index in a list object. +/// \brief Puts a float value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A 64-bit float. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -320,37 +328,38 @@ pub unsafe extern "C" fn AMlistPutCounter( pub unsafe extern "C" fn AMlistPutF64( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: f64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts a signed integer as the value at an index in a list object. +/// \brief Puts a signed integer value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -360,36 +369,37 @@ pub unsafe extern "C" fn AMlistPutF64( pub unsafe extern "C" fn AMlistPutInt( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: i64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts null as the value at an index in a list object. +/// \brief Puts a null value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -399,38 +409,37 @@ pub unsafe extern "C" fn AMlistPutInt( pub unsafe extern "C" fn AMlistPutNull( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); to_result(if insert { - doc.insert(obj_id, index, ()) + doc.insert(obj_id, pos, ()) } else { - doc.put(obj_id, index, ()) + doc.put(obj_id, pos, ()) }) } /// \memberof AMdoc -/// \brief Puts an empty object as the value at an index in a list object. +/// \brief Puts an empty object value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] obj_type An `AMobjIdType` enum tag. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMobjId` struct. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \pre \p obj_type != `AM_OBJ_TYPE_VOID`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_OBJ_TYPE` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -440,82 +449,85 @@ pub unsafe extern "C" fn AMlistPutNull( pub unsafe extern "C" fn AMlistPutObject( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, obj_type: AMobjType, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); - let object = to_obj_type!(obj_type); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); + let obj_type = to_obj_type!(obj_type); to_result(if insert { - doc.insert_object(obj_id, index, object) + (doc.insert_object(obj_id, pos, obj_type), obj_type) } else { - doc.put_object(obj_id, index, object) + (doc.put_object(obj_id, pos, obj_type), obj_type) }) } /// \memberof AMdoc -/// \brief Puts a UTF-8 string as the value at an index in a list object. +/// \brief Puts a UTF-8 string value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \pre \p value `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \pre \p value.src `!= NULL` +/// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// value must be a null-terminated array of `c_char` +/// value.src must be a byte array of length >= value.count #[no_mangle] pub unsafe extern "C" fn AMlistPutStr( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: AMbyteSpan, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); let value = to_str!(value); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts a *nix timestamp (milliseconds) as the value at an index in a +/// \brief Puts a *nix timestamp (milliseconds) value into an item within a /// list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -525,38 +537,39 @@ pub unsafe extern "C" fn AMlistPutStr( pub unsafe extern "C" fn AMlistPutTimestamp( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: i64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); let value = am::ScalarValue::Timestamp(value); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Puts an unsigned integer as the value at an index in a list object. +/// \brief Puts an unsigned integer value into an item within a list object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] index An index in the list object identified by \p obj_id or -/// `SIZE_MAX` to indicate its last index if \p insert -/// `== false` or one past its last index if \p insert -/// `== true`. -/// \param[in] insert A flag to insert \p value before \p index instead of -/// writing \p value over \p index. +/// \param[in] pos The position of an item within the list object identified by +/// \p obj_id or `SIZE_MAX` to indicate its last item if +/// \p insert `== false` or one past its last item if +/// \p insert `== true`. +/// \param[in] insert A flag for inserting a new item for \p value before +/// \p pos instead of putting \p value into the item at +/// \p pos. /// \param[in] value A 64-bit unsigned integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -566,56 +579,58 @@ pub unsafe extern "C" fn AMlistPutTimestamp( pub unsafe extern "C" fn AMlistPutUint( doc: *mut AMdoc, obj_id: *const AMobjId, - index: usize, + pos: usize, insert: bool, value: u64, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let obj_id = to_obj_id!(obj_id); - let (index, insert) = adjust!(index, insert, doc.length(obj_id)); + let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); to_result(if insert { - doc.insert(obj_id, index, value) + doc.insert(obj_id, pos, value) } else { - doc.put(obj_id, index, value) + doc.put(obj_id, pos, value) }) } /// \memberof AMdoc -/// \brief Gets the current or historical indices and values of the list object -/// within the given range. +/// \brief Gets the current or historical items in the list object within the +/// given range. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] begin The first index in a range of indices. -/// \param[in] end At least one past the last index in a range of indices. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// indices and values or `NULL` for current indices and -/// values. -/// \return A pointer to an `AMresult` struct containing an `AMlistItems` -/// struct. -/// \pre \p doc `!= NULL`. -/// \pre \p begin `<=` \p end `<= SIZE_MAX`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] begin The first pos in a range of indices. +/// \param[in] end At least one past the last pos in a range of indices. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select historical items or `NULL` to select +/// current items. +/// \return A pointer to an `AMresult` struct with an `AMitems` struct. +/// \pre \p doc `!= NULL` +/// \pre \p begin `<=` \p end `<= SIZE_MAX` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMlistRange( doc: *const AMdoc, obj_id: *const AMobjId, begin: usize, end: usize, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); let range = to_range!(begin, end); match heads.as_ref() { None => to_result(doc.list_range(obj_id, range)), - Some(heads) => to_result(doc.list_range_at(obj_id, range, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.list_range_at(obj_id, range, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } diff --git a/rust/automerge-c/src/doc/list/item.rs b/rust/automerge-c/src/doc/list/item.rs deleted file mode 100644 index 7a3869f3..00000000 --- a/rust/automerge-c/src/doc/list/item.rs +++ /dev/null @@ -1,97 +0,0 @@ -use automerge as am; - -use crate::obj::AMobjId; -use crate::result::AMvalue; - -/// \struct AMlistItem -/// \installed_headerfile -/// \brief An item in a list object. -pub struct AMlistItem { - /// The index of an item in a list object. - index: usize, - /// The object identifier of an item in a list object. - obj_id: AMobjId, - /// The value of an item in a list object. - value: am::Value<'static>, -} - -impl AMlistItem { - pub fn new(index: usize, value: am::Value<'static>, obj_id: am::ObjId) -> Self { - Self { - index, - obj_id: AMobjId::new(obj_id), - value, - } - } -} - -impl PartialEq for AMlistItem { - fn eq(&self, other: &Self) -> bool { - self.index == other.index && self.obj_id == other.obj_id && self.value == other.value - } -} - -/* -impl From<&AMlistItem> for (usize, am::Value<'static>, am::ObjId) { - fn from(list_item: &AMlistItem) -> Self { - (list_item.index, list_item.value.0.clone(), list_item.obj_id.as_ref().clone()) - } -} -*/ - -/// \memberof AMlistItem -/// \brief Gets the index of an item in a list object. -/// -/// \param[in] list_item A pointer to an `AMlistItem` struct. -/// \return A 64-bit unsigned integer. -/// \pre \p list_item `!= NULL`. -/// \internal -/// -/// # Safety -/// list_item must be a valid pointer to an AMlistItem -#[no_mangle] -pub unsafe extern "C" fn AMlistItemIndex(list_item: *const AMlistItem) -> usize { - if let Some(list_item) = list_item.as_ref() { - list_item.index - } else { - usize::MAX - } -} - -/// \memberof AMlistItem -/// \brief Gets the object identifier of an item in a list object. -/// -/// \param[in] list_item A pointer to an `AMlistItem` struct. -/// \return A pointer to an `AMobjId` struct. -/// \pre \p list_item `!= NULL`. -/// \internal -/// -/// # Safety -/// list_item must be a valid pointer to an AMlistItem -#[no_mangle] -pub unsafe extern "C" fn AMlistItemObjId(list_item: *const AMlistItem) -> *const AMobjId { - if let Some(list_item) = list_item.as_ref() { - &list_item.obj_id - } else { - std::ptr::null() - } -} - -/// \memberof AMlistItem -/// \brief Gets the value of an item in a list object. -/// -/// \param[in] list_item A pointer to an `AMlistItem` struct. -/// \return An `AMvalue` struct. -/// \pre \p list_item `!= NULL`. -/// \internal -/// -/// # Safety -/// list_item must be a valid pointer to an AMlistItem -#[no_mangle] -pub unsafe extern "C" fn AMlistItemValue<'a>(list_item: *const AMlistItem) -> AMvalue<'a> { - if let Some(list_item) = list_item.as_ref() { - (&list_item.value).into() - } else { - AMvalue::Void - } -} diff --git a/rust/automerge-c/src/doc/list/items.rs b/rust/automerge-c/src/doc/list/items.rs deleted file mode 100644 index 5b4a11fd..00000000 --- a/rust/automerge-c/src/doc/list/items.rs +++ /dev/null @@ -1,348 +0,0 @@ -use std::ffi::c_void; -use std::mem::size_of; - -use crate::doc::list::item::AMlistItem; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(list_items: &[AMlistItem], offset: isize) -> Self { - Self { - len: list_items.len(), - offset, - ptr: list_items.as_ptr() as *const c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<&AMlistItem> { - if self.is_stopped() { - return None; - } - let slice: &[AMlistItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMlistItem, self.len) }; - let value = &slice[self.get_index()]; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<&AMlistItem> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[AMlistItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMlistItem, self.len) }; - Some(&slice[self.get_index()]) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) - .try_into() - .unwrap() - } - } -} - -/// \struct AMlistItems -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of list object items. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMlistItems { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_], -} - -impl AMlistItems { - pub fn new(list_items: &[AMlistItem]) -> Self { - Self { - detail: Detail::new(list_items, 0).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<&AMlistItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<&AMlistItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[AMlistItem]> for AMlistItems { - fn as_ref(&self) -> &[AMlistItem] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const AMlistItem, detail.len) } - } -} - -impl Default for AMlistItems { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMlistItems -/// \brief Advances an iterator over a sequence of list object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] list_items A pointer to an `AMlistItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsAdvance(list_items: *mut AMlistItems, n: isize) { - if let Some(list_items) = list_items.as_mut() { - list_items.advance(n); - }; -} - -/// \memberof AMlistItems -/// \brief Tests the equality of two sequences of list object items underlying -/// a pair of iterators. -/// -/// \param[in] list_items1 A pointer to an `AMlistItems` struct. -/// \param[in] list_items2 A pointer to an `AMlistItems` struct. -/// \return `true` if \p list_items1 `==` \p list_items2 and `false` otherwise. -/// \pre \p list_items1 `!= NULL`. -/// \pre \p list_items2 `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items1 must be a valid pointer to an AMlistItems -/// list_items2 must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsEqual( - list_items1: *const AMlistItems, - list_items2: *const AMlistItems, -) -> bool { - match (list_items1.as_ref(), list_items2.as_ref()) { - (Some(list_items1), Some(list_items2)) => list_items1.as_ref() == list_items2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} - -/// \memberof AMlistItems -/// \brief Gets the list object item at the current position of an iterator -/// over a sequence of list object items and then advances it by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] list_items A pointer to an `AMlistItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMlistItem` struct that's `NULL` when -/// \p list_items was previously advanced past its forward/reverse -/// limit. -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsNext( - list_items: *mut AMlistItems, - n: isize, -) -> *const AMlistItem { - if let Some(list_items) = list_items.as_mut() { - if let Some(list_item) = list_items.next(n) { - return list_item; - } - } - std::ptr::null() -} - -/// \memberof AMlistItems -/// \brief Advances an iterator over a sequence of list object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the list object item at its new -/// position. -/// -/// \param[in,out] list_items A pointer to an `AMlistItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMlistItem` struct that's `NULL` when -/// \p list_items is presently advanced past its forward/reverse limit. -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsPrev( - list_items: *mut AMlistItems, - n: isize, -) -> *const AMlistItem { - if let Some(list_items) = list_items.as_mut() { - if let Some(list_item) = list_items.prev(n) { - return list_item; - } - } - std::ptr::null() -} - -/// \memberof AMlistItems -/// \brief Gets the size of the sequence of list object items underlying an -/// iterator. -/// -/// \param[in] list_items A pointer to an `AMlistItems` struct. -/// \return The count of values in \p list_items. -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsSize(list_items: *const AMlistItems) -> usize { - if let Some(list_items) = list_items.as_ref() { - list_items.len() - } else { - 0 - } -} - -/// \memberof AMlistItems -/// \brief Creates an iterator over the same sequence of list object items as -/// the given one but with the opposite position and direction. -/// -/// \param[in] list_items A pointer to an `AMlistItems` struct. -/// \return An `AMlistItems` struct -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsReversed(list_items: *const AMlistItems) -> AMlistItems { - if let Some(list_items) = list_items.as_ref() { - list_items.reversed() - } else { - Default::default() - } -} - -/// \memberof AMlistItems -/// \brief Creates an iterator at the starting position over the same sequence -/// of list object items as the given one. -/// -/// \param[in] list_items A pointer to an `AMlistItems` struct. -/// \return An `AMlistItems` struct -/// \pre \p list_items `!= NULL`. -/// \internal -/// -/// #Safety -/// list_items must be a valid pointer to an AMlistItems -#[no_mangle] -pub unsafe extern "C" fn AMlistItemsRewound(list_items: *const AMlistItems) -> AMlistItems { - if let Some(list_items) = list_items.as_ref() { - list_items.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/doc/map.rs b/rust/automerge-c/src/doc/map.rs index 86c6b4a2..b2f7db02 100644 --- a/rust/automerge-c/src/doc/map.rs +++ b/rust/automerge-c/src/doc/map.rs @@ -3,31 +3,29 @@ use automerge::transaction::Transactable; use automerge::ReadDoc; use crate::byte_span::{to_str, AMbyteSpan}; -use crate::change_hashes::AMchangeHashes; -use crate::doc::{to_doc, to_doc_mut, to_obj_id, AMdoc}; -use crate::obj::{to_obj_type, AMobjId, AMobjType}; +use crate::doc::{to_doc, to_doc_mut, AMdoc}; +use crate::items::AMitems; +use crate::obj::{to_obj_id, to_obj_type, AMobjId, AMobjType}; use crate::result::{to_result, AMresult}; -pub mod item; -pub mod items; - /// \memberof AMdoc -/// \brief Deletes a key in a map object. +/// \brief Deletes an item from a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapDelete( doc: *mut AMdoc, @@ -40,96 +38,107 @@ pub unsafe extern "C" fn AMmapDelete( } /// \memberof AMdoc -/// \brief Gets the current or historical value for a key in a map object. +/// \brief Gets a current or historical item within a map object. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical -/// value or `NULL` for the current value. -/// \return A pointer to an `AMresult` struct that doesn't contain a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical item at \p key or `NULL` +/// to select the current item at \p key. +/// \return A pointer to an `AMresult` struct with an `AMitem` struct. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// key.src must be a byte array of length >= key.count +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMmapGet( doc: *const AMdoc, obj_id: *const AMobjId, key: AMbyteSpan, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); let key = to_str!(key); match heads.as_ref() { None => to_result(doc.get(obj_id, key)), - Some(heads) => to_result(doc.get_at(obj_id, key, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.get_at(obj_id, key, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } /// \memberof AMdoc -/// \brief Gets all of the historical values for a key in a map object until +/// \brief Gets all of the historical items at a key within a map object until /// its current one or a specific one. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical -/// last value or `NULL` for the current last value. -/// \return A pointer to an `AMresult` struct containing an `AMobjItems` struct. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select a historical last item or `NULL` to +/// select the current last item. +/// \return A pointer to an `AMresult` struct with an `AMItems` struct. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// key.src must be a byte array of length >= key.count +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMmapGetAll( doc: *const AMdoc, obj_id: *const AMobjId, key: AMbyteSpan, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); let key = to_str!(key); match heads.as_ref() { None => to_result(doc.get_all(obj_id, key)), - Some(heads) => to_result(doc.get_all_at(obj_id, key, heads.as_ref())), + Some(heads) => match >::try_from(heads) { + Ok(heads) => to_result(doc.get_all_at(obj_id, key, &heads)), + Err(e) => AMresult::error(&e.to_string()).into(), + }, } } /// \memberof AMdoc -/// \brief Increments a counter for a key in a map object by the given value. +/// \brief Increments a counter at a key in a map object by the given value. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapIncrement( doc: *mut AMdoc, @@ -145,21 +154,22 @@ pub unsafe extern "C" fn AMmapIncrement( /// \memberof AMdoc /// \brief Puts a boolean as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A boolean. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutBool( doc: *mut AMdoc, @@ -173,59 +183,58 @@ pub unsafe extern "C" fn AMmapPutBool( } /// \memberof AMdoc -/// \brief Puts a sequence of bytes as the value of a key in a map object. +/// \brief Puts an array of bytes value at a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. -/// \param[in] key A UTF-8 string view key for the map object identified by -/// \p obj_id as an `AMbyteSpan` struct. -/// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes to copy from \p src. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] key The UTF-8 string view key of an item within the map object +/// identified by \p obj_id as an `AMbyteSpan` struct. +/// \param[in] value A view onto an array of bytes as an `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \pre \p value.src `!= NULL` +/// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// src must be a byte array of size `>= count` +/// key.src must be a byte array of length >= key.count +/// value.src must be a byte array of length >= value.count #[no_mangle] pub unsafe extern "C" fn AMmapPutBytes( doc: *mut AMdoc, obj_id: *const AMobjId, key: AMbyteSpan, - val: AMbyteSpan, + value: AMbyteSpan, ) -> *mut AMresult { let doc = to_doc_mut!(doc); let key = to_str!(key); - let mut vec = Vec::new(); - vec.extend_from_slice(std::slice::from_raw_parts(val.src, val.count)); - to_result(doc.put(to_obj_id!(obj_id), key, vec)) + to_result(doc.put(to_obj_id!(obj_id), key, Vec::::from(&value))) } /// \memberof AMdoc /// \brief Puts a CRDT counter as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutCounter( doc: *mut AMdoc, @@ -245,20 +254,21 @@ pub unsafe extern "C" fn AMmapPutCounter( /// \memberof AMdoc /// \brief Puts null as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutNull( doc: *mut AMdoc, @@ -273,23 +283,22 @@ pub unsafe extern "C" fn AMmapPutNull( /// \memberof AMdoc /// \brief Puts an empty object as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] obj_type An `AMobjIdType` enum tag. -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMobjId` struct. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \pre \p obj_type != `AM_OBJ_TYPE_VOID`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_OBJ_TYPE` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutObject( doc: *mut AMdoc, @@ -299,27 +308,29 @@ pub unsafe extern "C" fn AMmapPutObject( ) -> *mut AMresult { let doc = to_doc_mut!(doc); let key = to_str!(key); - to_result(doc.put_object(to_obj_id!(obj_id), key, to_obj_type!(obj_type))) + let obj_type = to_obj_type!(obj_type); + to_result((doc.put_object(to_obj_id!(obj_id), key, obj_type), obj_type)) } /// \memberof AMdoc /// \brief Puts a float as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit float. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutF64( doc: *mut AMdoc, @@ -335,21 +346,22 @@ pub unsafe extern "C" fn AMmapPutF64( /// \memberof AMdoc /// \brief Puts a signed integer as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutInt( doc: *mut AMdoc, @@ -365,21 +377,22 @@ pub unsafe extern "C" fn AMmapPutInt( /// \memberof AMdoc /// \brief Puts a UTF-8 string as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutStr( doc: *mut AMdoc, @@ -395,21 +408,22 @@ pub unsafe extern "C" fn AMmapPutStr( /// \brief Puts a *nix timestamp (milliseconds) as the value of a key in a map /// object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit signed integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutTimestamp( doc: *mut AMdoc, @@ -425,21 +439,22 @@ pub unsafe extern "C" fn AMmapPutTimestamp( /// \memberof AMdoc /// \brief Puts an unsigned integer as the value of a key in a map object. /// -/// \param[in,out] doc A pointer to an `AMdoc` struct. +/// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] key A UTF-8 string view key for the map object identified by /// \p obj_id as an `AMbyteSpan` struct. /// \param[in] value A 64-bit unsigned integer. -/// \return A pointer to an `AMresult` struct containing a void. -/// \pre \p doc `!= NULL`. -/// \pre \p key `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item. +/// \pre \p doc `!= NULL` +/// \pre \p key.src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() +/// key.src must be a byte array of length >= key.count #[no_mangle] pub unsafe extern "C" fn AMmapPutUint( doc: *mut AMdoc, @@ -453,71 +468,82 @@ pub unsafe extern "C" fn AMmapPutUint( } /// \memberof AMdoc -/// \brief Gets the current or historical keys and values of the map object -/// within the given range. +/// \brief Gets the current or historical items of the map object within the +/// given range. /// /// \param[in] doc A pointer to an `AMdoc` struct. /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`. /// \param[in] begin The first key in a subrange or `AMstr(NULL)` to indicate the /// absolute first key. -/// \param[in] end The key one past the last key in a subrange or `AMstr(NULL)` to -/// indicate one past the absolute last key. -/// \param[in] heads A pointer to an `AMchangeHashes` struct for historical -/// keys and values or `NULL` for current keys and values. -/// \return A pointer to an `AMresult` struct containing an `AMmapItems` -/// struct. -/// \pre \p doc `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] end The key one past the last key in a subrange or `AMstr(NULL)` +/// to indicate one past the absolute last key. +/// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH` +/// items to select historical items or `NULL` to select +/// current items. +/// \return A pointer to an `AMresult` struct with an `AMitems` struct. +/// \pre \p doc `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// doc must be a valid pointer to an AMdoc /// obj_id must be a valid pointer to an AMobjId or std::ptr::null() -/// heads must be a valid pointer to an AMchangeHashes or std::ptr::null() +/// begin.src must be a byte array of length >= begin.count or std::ptr::null() +/// end.src must be a byte array of length >= end.count or std::ptr::null() +/// heads must be a valid pointer to an AMitems or std::ptr::null() #[no_mangle] pub unsafe extern "C" fn AMmapRange( doc: *const AMdoc, obj_id: *const AMobjId, begin: AMbyteSpan, end: AMbyteSpan, - heads: *const AMchangeHashes, + heads: *const AMitems, ) -> *mut AMresult { let doc = to_doc!(doc); let obj_id = to_obj_id!(obj_id); + let heads = match heads.as_ref() { + None => None, + Some(heads) => match >::try_from(heads) { + Ok(heads) => Some(heads), + Err(e) => { + return AMresult::error(&e.to_string()).into(); + } + }, + }; match (begin.is_null(), end.is_null()) { (false, false) => { let (begin, end) = (to_str!(begin).to_string(), to_str!(end).to_string()); if begin > end { - return AMresult::err(&format!("Invalid range [{}-{})", begin, end)).into(); + return AMresult::error(&format!("Invalid range [{}-{})", begin, end)).into(); }; let bounds = begin..end; - if let Some(heads) = heads.as_ref() { - to_result(doc.map_range_at(obj_id, bounds, heads.as_ref())) + if let Some(heads) = heads { + to_result(doc.map_range_at(obj_id, bounds, &heads)) } else { to_result(doc.map_range(obj_id, bounds)) } } (false, true) => { let bounds = to_str!(begin).to_string()..; - if let Some(heads) = heads.as_ref() { - to_result(doc.map_range_at(obj_id, bounds, heads.as_ref())) + if let Some(heads) = heads { + to_result(doc.map_range_at(obj_id, bounds, &heads)) } else { to_result(doc.map_range(obj_id, bounds)) } } (true, false) => { let bounds = ..to_str!(end).to_string(); - if let Some(heads) = heads.as_ref() { - to_result(doc.map_range_at(obj_id, bounds, heads.as_ref())) + if let Some(heads) = heads { + to_result(doc.map_range_at(obj_id, bounds, &heads)) } else { to_result(doc.map_range(obj_id, bounds)) } } (true, true) => { let bounds = ..; - if let Some(heads) = heads.as_ref() { - to_result(doc.map_range_at(obj_id, bounds, heads.as_ref())) + if let Some(heads) = heads { + to_result(doc.map_range_at(obj_id, bounds, &heads)) } else { to_result(doc.map_range(obj_id, bounds)) } diff --git a/rust/automerge-c/src/doc/map/item.rs b/rust/automerge-c/src/doc/map/item.rs deleted file mode 100644 index 7914fdc4..00000000 --- a/rust/automerge-c/src/doc/map/item.rs +++ /dev/null @@ -1,98 +0,0 @@ -use automerge as am; - -use crate::byte_span::AMbyteSpan; -use crate::obj::AMobjId; -use crate::result::AMvalue; - -/// \struct AMmapItem -/// \installed_headerfile -/// \brief An item in a map object. -pub struct AMmapItem { - /// The key of an item in a map object. - key: String, - /// The object identifier of an item in a map object. - obj_id: AMobjId, - /// The value of an item in a map object. - value: am::Value<'static>, -} - -impl AMmapItem { - pub fn new(key: &'static str, value: am::Value<'static>, obj_id: am::ObjId) -> Self { - Self { - key: key.to_string(), - obj_id: AMobjId::new(obj_id), - value, - } - } -} - -impl PartialEq for AMmapItem { - fn eq(&self, other: &Self) -> bool { - self.key == other.key && self.obj_id == other.obj_id && self.value == other.value - } -} - -/* -impl From<&AMmapItem> for (String, am::Value<'static>, am::ObjId) { - fn from(map_item: &AMmapItem) -> Self { - (map_item.key.into_string().unwrap(), map_item.value.0.clone(), map_item.obj_id.as_ref().clone()) - } -} -*/ - -/// \memberof AMmapItem -/// \brief Gets the key of an item in a map object. -/// -/// \param[in] map_item A pointer to an `AMmapItem` struct. -/// \return An `AMbyteSpan` view of a UTF-8 string. -/// \pre \p map_item `!= NULL`. -/// \internal -/// -/// # Safety -/// map_item must be a valid pointer to an AMmapItem -#[no_mangle] -pub unsafe extern "C" fn AMmapItemKey(map_item: *const AMmapItem) -> AMbyteSpan { - if let Some(map_item) = map_item.as_ref() { - map_item.key.as_bytes().into() - } else { - Default::default() - } -} - -/// \memberof AMmapItem -/// \brief Gets the object identifier of an item in a map object. -/// -/// \param[in] map_item A pointer to an `AMmapItem` struct. -/// \return A pointer to an `AMobjId` struct. -/// \pre \p map_item `!= NULL`. -/// \internal -/// -/// # Safety -/// map_item must be a valid pointer to an AMmapItem -#[no_mangle] -pub unsafe extern "C" fn AMmapItemObjId(map_item: *const AMmapItem) -> *const AMobjId { - if let Some(map_item) = map_item.as_ref() { - &map_item.obj_id - } else { - std::ptr::null() - } -} - -/// \memberof AMmapItem -/// \brief Gets the value of an item in a map object. -/// -/// \param[in] map_item A pointer to an `AMmapItem` struct. -/// \return An `AMvalue` struct. -/// \pre \p map_item `!= NULL`. -/// \internal -/// -/// # Safety -/// map_item must be a valid pointer to an AMmapItem -#[no_mangle] -pub unsafe extern "C" fn AMmapItemValue<'a>(map_item: *const AMmapItem) -> AMvalue<'a> { - if let Some(map_item) = map_item.as_ref() { - (&map_item.value).into() - } else { - AMvalue::Void - } -} diff --git a/rust/automerge-c/src/doc/map/items.rs b/rust/automerge-c/src/doc/map/items.rs deleted file mode 100644 index cd305971..00000000 --- a/rust/automerge-c/src/doc/map/items.rs +++ /dev/null @@ -1,340 +0,0 @@ -use std::ffi::c_void; -use std::mem::size_of; - -use crate::doc::map::item::AMmapItem; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(map_items: &[AMmapItem], offset: isize) -> Self { - Self { - len: map_items.len(), - offset, - ptr: map_items.as_ptr() as *const c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<&AMmapItem> { - if self.is_stopped() { - return None; - } - let slice: &[AMmapItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMmapItem, self.len) }; - let value = &slice[self.get_index()]; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<&AMmapItem> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[AMmapItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMmapItem, self.len) }; - Some(&slice[self.get_index()]) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) - .try_into() - .unwrap() - } - } -} - -/// \struct AMmapItems -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of map object items. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMmapItems { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_], -} - -impl AMmapItems { - pub fn new(map_items: &[AMmapItem]) -> Self { - Self { - detail: Detail::new(map_items, 0).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<&AMmapItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<&AMmapItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[AMmapItem]> for AMmapItems { - fn as_ref(&self) -> &[AMmapItem] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const AMmapItem, detail.len) } - } -} - -impl Default for AMmapItems { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMmapItems -/// \brief Advances an iterator over a sequence of map object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] map_items A pointer to an `AMmapItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsAdvance(map_items: *mut AMmapItems, n: isize) { - if let Some(map_items) = map_items.as_mut() { - map_items.advance(n); - }; -} - -/// \memberof AMmapItems -/// \brief Tests the equality of two sequences of map object items underlying -/// a pair of iterators. -/// -/// \param[in] map_items1 A pointer to an `AMmapItems` struct. -/// \param[in] map_items2 A pointer to an `AMmapItems` struct. -/// \return `true` if \p map_items1 `==` \p map_items2 and `false` otherwise. -/// \pre \p map_items1 `!= NULL`. -/// \pre \p map_items2 `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items1 must be a valid pointer to an AMmapItems -/// map_items2 must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsEqual( - map_items1: *const AMmapItems, - map_items2: *const AMmapItems, -) -> bool { - match (map_items1.as_ref(), map_items2.as_ref()) { - (Some(map_items1), Some(map_items2)) => map_items1.as_ref() == map_items2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} - -/// \memberof AMmapItems -/// \brief Gets the map object item at the current position of an iterator -/// over a sequence of map object items and then advances it by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] map_items A pointer to an `AMmapItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMmapItem` struct that's `NULL` when \p map_items -/// was previously advanced past its forward/reverse limit. -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsNext(map_items: *mut AMmapItems, n: isize) -> *const AMmapItem { - if let Some(map_items) = map_items.as_mut() { - if let Some(map_item) = map_items.next(n) { - return map_item; - } - } - std::ptr::null() -} - -/// \memberof AMmapItems -/// \brief Advances an iterator over a sequence of map object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the map object item at its new -/// position. -/// -/// \param[in,out] map_items A pointer to an `AMmapItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMmapItem` struct that's `NULL` when \p map_items -/// is presently advanced past its forward/reverse limit. -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsPrev(map_items: *mut AMmapItems, n: isize) -> *const AMmapItem { - if let Some(map_items) = map_items.as_mut() { - if let Some(map_item) = map_items.prev(n) { - return map_item; - } - } - std::ptr::null() -} - -/// \memberof AMmapItems -/// \brief Gets the size of the sequence of map object items underlying an -/// iterator. -/// -/// \param[in] map_items A pointer to an `AMmapItems` struct. -/// \return The count of values in \p map_items. -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsSize(map_items: *const AMmapItems) -> usize { - if let Some(map_items) = map_items.as_ref() { - map_items.len() - } else { - 0 - } -} - -/// \memberof AMmapItems -/// \brief Creates an iterator over the same sequence of map object items as -/// the given one but with the opposite position and direction. -/// -/// \param[in] map_items A pointer to an `AMmapItems` struct. -/// \return An `AMmapItems` struct -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsReversed(map_items: *const AMmapItems) -> AMmapItems { - if let Some(map_items) = map_items.as_ref() { - map_items.reversed() - } else { - Default::default() - } -} - -/// \memberof AMmapItems -/// \brief Creates an iterator at the starting position over the same sequence of map object items as the given one. -/// -/// \param[in] map_items A pointer to an `AMmapItems` struct. -/// \return An `AMmapItems` struct -/// \pre \p map_items `!= NULL`. -/// \internal -/// -/// #Safety -/// map_items must be a valid pointer to an AMmapItems -#[no_mangle] -pub unsafe extern "C" fn AMmapItemsRewound(map_items: *const AMmapItems) -> AMmapItems { - if let Some(map_items) = map_items.as_ref() { - map_items.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/doc/utils.rs b/rust/automerge-c/src/doc/utils.rs index d98a9a8b..ce465b84 100644 --- a/rust/automerge-c/src/doc/utils.rs +++ b/rust/automerge-c/src/doc/utils.rs @@ -1,9 +1,20 @@ +macro_rules! clamp { + ($index:expr, $len:expr, $param_name:expr) => {{ + if $index > $len && $index != usize::MAX { + return AMresult::error(&format!("Invalid {} {}", $param_name, $index)).into(); + } + std::cmp::min($index, $len) + }}; +} + +pub(crate) use clamp; + macro_rules! to_doc { ($handle:expr) => {{ let handle = $handle.as_ref(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMdoc pointer").into(), + None => return AMresult::error("Invalid `AMdoc*`").into(), } }}; } @@ -15,9 +26,21 @@ macro_rules! to_doc_mut { let handle = $handle.as_mut(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMdoc pointer").into(), + None => return AMresult::error("Invalid `AMdoc*`").into(), } }}; } pub(crate) use to_doc_mut; + +macro_rules! to_items { + ($handle:expr) => {{ + let handle = $handle.as_ref(); + match handle { + Some(b) => b, + None => return AMresult::error("Invalid `AMitems*`").into(), + } + }}; +} + +pub(crate) use to_items; diff --git a/rust/automerge-c/src/index.rs b/rust/automerge-c/src/index.rs new file mode 100644 index 00000000..f1ea153b --- /dev/null +++ b/rust/automerge-c/src/index.rs @@ -0,0 +1,84 @@ +use automerge as am; + +use std::any::type_name; + +use smol_str::SmolStr; + +use crate::byte_span::AMbyteSpan; + +/// \struct AMindex +/// \installed_headerfile +/// \brief An item index. +#[derive(PartialEq)] +pub enum AMindex { + /// A UTF-8 string key variant. + Key(SmolStr), + /// A 64-bit unsigned integer position variant. + Pos(usize), +} + +impl TryFrom<&AMindex> for AMbyteSpan { + type Error = am::AutomergeError; + + fn try_from(item: &AMindex) -> Result { + use am::AutomergeError::InvalidValueType; + use AMindex::*; + + if let Key(key) = item { + return Ok(key.into()); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl TryFrom<&AMindex> for usize { + type Error = am::AutomergeError; + + fn try_from(item: &AMindex) -> Result { + use am::AutomergeError::InvalidValueType; + use AMindex::*; + + if let Pos(pos) = item { + return Ok(*pos); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +/// \ingroup enumerations +/// \enum AMidxType +/// \installed_headerfile +/// \brief The type of an item's index. +#[derive(PartialEq, Eq)] +#[repr(u8)] +pub enum AMidxType { + /// The default tag, not a type signifier. + Default = 0, + /// A UTF-8 string view key. + Key, + /// A 64-bit unsigned integer position. + Pos, +} + +impl Default for AMidxType { + fn default() -> Self { + Self::Default + } +} + +impl From<&AMindex> for AMidxType { + fn from(index: &AMindex) -> Self { + use AMindex::*; + + match index { + Key(_) => Self::Key, + Pos(_) => Self::Pos, + } + } +} diff --git a/rust/automerge-c/src/item.rs b/rust/automerge-c/src/item.rs new file mode 100644 index 00000000..94735464 --- /dev/null +++ b/rust/automerge-c/src/item.rs @@ -0,0 +1,1963 @@ +use automerge as am; + +use std::any::type_name; +use std::borrow::Cow; +use std::cell::{RefCell, UnsafeCell}; +use std::rc::Rc; + +use crate::actor_id::AMactorId; +use crate::byte_span::{to_str, AMbyteSpan}; +use crate::change::AMchange; +use crate::doc::AMdoc; +use crate::index::{AMidxType, AMindex}; +use crate::obj::AMobjId; +use crate::result::{to_result, AMresult}; +use crate::sync::{AMsyncHave, AMsyncMessage, AMsyncState}; + +/// \struct AMunknownValue +/// \installed_headerfile +/// \brief A value (typically for a `set` operation) whose type is unknown. +#[derive(Default, Eq, PartialEq)] +#[repr(C)] +pub struct AMunknownValue { + /// The value's raw bytes. + bytes: AMbyteSpan, + /// The value's encoded type identifier. + type_code: u8, +} + +pub enum Value { + ActorId(am::ActorId, UnsafeCell>), + Change(Box, UnsafeCell>), + ChangeHash(am::ChangeHash), + Doc(RefCell), + SyncHave(AMsyncHave), + SyncMessage(AMsyncMessage), + SyncState(RefCell), + Value(am::Value<'static>), +} + +impl Value { + pub fn try_into_bytes(&self) -> Result { + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Self::Value(Scalar(scalar)) = &self { + if let Bytes(vector) = scalar.as_ref() { + return Ok(vector.as_slice().into()); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } + + pub fn try_into_change_hash(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Self::ChangeHash(change_hash) = &self { + return Ok(change_hash.into()); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } + + pub fn try_into_counter(&self) -> Result { + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Self::Value(Scalar(scalar)) = &self { + if let Counter(counter) = scalar.as_ref() { + return Ok(counter.into()); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } + + pub fn try_into_int(&self) -> Result { + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Self::Value(Scalar(scalar)) = &self { + if let Int(int) = scalar.as_ref() { + return Ok(*int); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } + + pub fn try_into_str(&self) -> Result { + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Self::Value(Scalar(scalar)) = &self { + if let Str(smol_str) = scalar.as_ref() { + return Ok(smol_str.into()); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } + + pub fn try_into_timestamp(&self) -> Result { + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Self::Value(Scalar(scalar)) = &self { + if let Timestamp(timestamp) = scalar.as_ref() { + return Ok(*timestamp); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl From for Value { + fn from(actor_id: am::ActorId) -> Self { + Self::ActorId(actor_id, Default::default()) + } +} + +impl From for Value { + fn from(auto_commit: am::AutoCommit) -> Self { + Self::Doc(RefCell::new(AMdoc::new(auto_commit))) + } +} + +impl From for Value { + fn from(change: am::Change) -> Self { + Self::Change(Box::new(change), Default::default()) + } +} + +impl From for Value { + fn from(change_hash: am::ChangeHash) -> Self { + Self::ChangeHash(change_hash) + } +} + +impl From for Value { + fn from(have: am::sync::Have) -> Self { + Self::SyncHave(AMsyncHave::new(have)) + } +} + +impl From for Value { + fn from(message: am::sync::Message) -> Self { + Self::SyncMessage(AMsyncMessage::new(message)) + } +} + +impl From for Value { + fn from(state: am::sync::State) -> Self { + Self::SyncState(RefCell::new(AMsyncState::new(state))) + } +} + +impl From> for Value { + fn from(value: am::Value<'static>) -> Self { + Self::Value(value) + } +} + +impl From for Value { + fn from(string: String) -> Self { + Self::Value(am::Value::Scalar(Cow::Owned(am::ScalarValue::Str( + string.into(), + )))) + } +} + +impl<'a> TryFrom<&'a Value> for &'a am::Change { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + Change(change, _) => Ok(change), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a Value> for &'a am::ChangeHash { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + ChangeHash(change_hash) => Ok(change_hash), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a Value> for &'a am::ScalarValue { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + use am::Value::*; + + if let Value(Scalar(scalar)) = value { + return Ok(scalar.as_ref()); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl<'a> TryFrom<&'a Value> for &'a AMactorId { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + ActorId(actor_id, c_actor_id) => unsafe { + Ok((*c_actor_id.get()).get_or_insert(AMactorId::new(actor_id))) + }, + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a mut Value> for &'a mut AMchange { + type Error = am::AutomergeError; + + fn try_from(value: &'a mut Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + Change(change, c_change) => unsafe { + Ok((*c_change.get()).get_or_insert(AMchange::new(change))) + }, + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a mut Value> for &'a mut AMdoc { + type Error = am::AutomergeError; + + fn try_from(value: &'a mut Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + Doc(doc) => Ok(doc.get_mut()), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a Value> for &'a AMsyncHave { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + SyncHave(sync_have) => Ok(sync_have), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a Value> for &'a AMsyncMessage { + type Error = am::AutomergeError; + + fn try_from(value: &'a Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + SyncMessage(sync_message) => Ok(sync_message), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl<'a> TryFrom<&'a mut Value> for &'a mut AMsyncState { + type Error = am::AutomergeError; + + fn try_from(value: &'a mut Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + + match value { + SyncState(sync_state) => Ok(sync_state.get_mut()), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), + } + } +} + +impl TryFrom<&Value> for bool { + type Error = am::AutomergeError; + + fn try_from(value: &Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Value(Scalar(scalar)) = value { + if let Boolean(boolean) = scalar.as_ref() { + return Ok(*boolean); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl TryFrom<&Value> for f64 { + type Error = am::AutomergeError; + + fn try_from(value: &Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Value(Scalar(scalar)) = value { + if let F64(float) = scalar.as_ref() { + return Ok(*float); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl TryFrom<&Value> for u64 { + type Error = am::AutomergeError; + + fn try_from(value: &Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Value(Scalar(scalar)) = value { + if let Uint(uint) = scalar.as_ref() { + return Ok(*uint); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl TryFrom<&Value> for AMunknownValue { + type Error = am::AutomergeError; + + fn try_from(value: &Value) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidValueType; + use am::ScalarValue::*; + use am::Value::*; + + if let Value(Scalar(scalar)) = value { + if let Unknown { bytes, type_code } = scalar.as_ref() { + return Ok(Self { + bytes: bytes.as_slice().into(), + type_code: *type_code, + }); + } + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }) + } +} + +impl PartialEq for Value { + fn eq(&self, other: &Self) -> bool { + use self::Value::*; + + match (self, other) { + (ActorId(lhs, _), ActorId(rhs, _)) => *lhs == *rhs, + (Change(lhs, _), Change(rhs, _)) => lhs == rhs, + (ChangeHash(lhs), ChangeHash(rhs)) => lhs == rhs, + (Doc(lhs), Doc(rhs)) => lhs.as_ptr() == rhs.as_ptr(), + (SyncMessage(lhs), SyncMessage(rhs)) => *lhs == *rhs, + (SyncState(lhs), SyncState(rhs)) => *lhs == *rhs, + (Value(lhs), Value(rhs)) => lhs == rhs, + _ => false, + } + } +} + +#[derive(Default)] +pub struct Item { + /// The item's index. + index: Option, + /// The item's identifier. + obj_id: Option, + /// The item's value. + value: Option, +} + +impl Item { + pub fn try_into_bytes(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_bytes(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + + pub fn try_into_change_hash(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_change_hash(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + + pub fn try_into_counter(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_counter(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + + pub fn try_into_int(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_int(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + + pub fn try_into_str(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_str(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + + pub fn try_into_timestamp(&self) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &self.value { + return value.try_into_timestamp(); + } + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } +} + +impl From for Item { + fn from(actor_id: am::ActorId) -> Self { + Value::from(actor_id).into() + } +} + +impl From for Item { + fn from(auto_commit: am::AutoCommit) -> Self { + Value::from(auto_commit).into() + } +} + +impl From for Item { + fn from(change: am::Change) -> Self { + Value::from(change).into() + } +} + +impl From for Item { + fn from(change_hash: am::ChangeHash) -> Self { + Value::from(change_hash).into() + } +} + +impl From<(am::ObjId, am::ObjType)> for Item { + fn from((obj_id, obj_type): (am::ObjId, am::ObjType)) -> Self { + Self { + index: None, + obj_id: Some(AMobjId::new(obj_id)), + value: Some(am::Value::Object(obj_type).into()), + } + } +} + +impl From for Item { + fn from(have: am::sync::Have) -> Self { + Value::from(have).into() + } +} + +impl From for Item { + fn from(message: am::sync::Message) -> Self { + Value::from(message).into() + } +} + +impl From for Item { + fn from(state: am::sync::State) -> Self { + Value::from(state).into() + } +} + +impl From> for Item { + fn from(value: am::Value<'static>) -> Self { + Value::from(value).into() + } +} + +impl From for Item { + fn from(string: String) -> Self { + Value::from(string).into() + } +} + +impl From for Item { + fn from(value: Value) -> Self { + Self { + index: None, + obj_id: None, + value: Some(value), + } + } +} + +impl PartialEq for Item { + fn eq(&self, other: &Self) -> bool { + self.index == other.index && self.obj_id == other.obj_id && self.value == other.value + } +} + +impl<'a> TryFrom<&'a Item> for &'a am::Change { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a Item> for &'a am::ChangeHash { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a Item> for &'a am::ScalarValue { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a Item> for &'a AMactorId { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a mut Item> for &'a mut AMchange { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &mut item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a mut Item> for &'a mut AMdoc { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &mut item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl From<&Item> for AMidxType { + fn from(item: &Item) -> Self { + if let Some(index) = &item.index { + return index.into(); + } + Default::default() + } +} + +impl<'a> TryFrom<&'a Item> for &'a AMsyncHave { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a Item> for &'a AMsyncMessage { + type Error = am::AutomergeError; + + fn try_from(item: &'a Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl<'a> TryFrom<&'a mut Item> for &'a mut AMsyncState { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &mut item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl TryFrom<&Item> for bool { + type Error = am::AutomergeError; + + fn try_from(item: &Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl TryFrom<&Item> for f64 { + type Error = am::AutomergeError; + + fn try_from(item: &Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl TryFrom<&Item> for u64 { + type Error = am::AutomergeError; + + fn try_from(item: &Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl TryFrom<&Item> for AMunknownValue { + type Error = am::AutomergeError; + + fn try_from(item: &Item) -> Result { + use am::AutomergeError::InvalidValueType; + + if let Some(value) = &item.value { + value.try_into() + } else { + Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::>().to_string(), + }) + } + } +} + +impl TryFrom<&Item> for (am::Value<'static>, am::ObjId) { + type Error = am::AutomergeError; + + fn try_from(item: &Item) -> Result { + use self::Value::*; + use am::AutomergeError::InvalidObjId; + use am::AutomergeError::InvalidValueType; + + let expected = type_name::().to_string(); + match (&item.obj_id, &item.value) { + (None, None) | (None, Some(_)) => Err(InvalidObjId("".to_string())), + (Some(_), None) => Err(InvalidValueType { + expected, + unexpected: type_name::>().to_string(), + }), + (Some(obj_id), Some(value)) => match value { + ActorId(_, _) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + ChangeHash(_) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + Change(_, _) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + Doc(_) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + SyncHave(_) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + SyncMessage(_) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + SyncState(_) => Err(InvalidValueType { + expected, + unexpected: type_name::().to_string(), + }), + Value(v) => Ok((v.clone(), obj_id.as_ref().clone())), + }, + } + } +} + +/// \struct AMitem +/// \installed_headerfile +/// \brief An item within a result. +#[derive(Clone)] +pub struct AMitem(Rc); + +impl AMitem { + pub fn exact(obj_id: am::ObjId, value: Value) -> Self { + Self(Rc::new(Item { + index: None, + obj_id: Some(AMobjId::new(obj_id)), + value: Some(value), + })) + } + + pub fn indexed(index: AMindex, obj_id: am::ObjId, value: Value) -> Self { + Self(Rc::new(Item { + index: Some(index), + obj_id: Some(AMobjId::new(obj_id)), + value: Some(value), + })) + } +} + +impl AsRef for AMitem { + fn as_ref(&self) -> &Item { + self.0.as_ref() + } +} + +impl Default for AMitem { + fn default() -> Self { + Self(Rc::new(Item { + index: None, + obj_id: None, + value: None, + })) + } +} + +impl From for AMitem { + fn from(actor_id: am::ActorId) -> Self { + Value::from(actor_id).into() + } +} + +impl From for AMitem { + fn from(auto_commit: am::AutoCommit) -> Self { + Value::from(auto_commit).into() + } +} + +impl From for AMitem { + fn from(change: am::Change) -> Self { + Value::from(change).into() + } +} + +impl From for AMitem { + fn from(change_hash: am::ChangeHash) -> Self { + Value::from(change_hash).into() + } +} + +impl From<(am::ObjId, am::ObjType)> for AMitem { + fn from((obj_id, obj_type): (am::ObjId, am::ObjType)) -> Self { + Self(Rc::new(Item::from((obj_id, obj_type)))) + } +} + +impl From for AMitem { + fn from(have: am::sync::Have) -> Self { + Value::from(have).into() + } +} + +impl From for AMitem { + fn from(message: am::sync::Message) -> Self { + Value::from(message).into() + } +} + +impl From for AMitem { + fn from(state: am::sync::State) -> Self { + Value::from(state).into() + } +} + +impl From> for AMitem { + fn from(value: am::Value<'static>) -> Self { + Value::from(value).into() + } +} + +impl From for AMitem { + fn from(string: String) -> Self { + Value::from(string).into() + } +} + +impl From for AMitem { + fn from(value: Value) -> Self { + Self(Rc::new(Item::from(value))) + } +} + +impl PartialEq for AMitem { + fn eq(&self, other: &Self) -> bool { + self.as_ref() == other.as_ref() + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a am::Change { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a am::ChangeHash { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a am::ScalarValue { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a AMactorId { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a mut AMitem> for &'a mut AMchange { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut AMitem) -> Result { + if let Some(item) = Rc::get_mut(&mut item.0) { + item.try_into() + } else { + Err(Self::Error::Fail) + } + } +} + +impl<'a> TryFrom<&'a mut AMitem> for &'a mut AMdoc { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut AMitem) -> Result { + if let Some(item) = Rc::get_mut(&mut item.0) { + item.try_into() + } else { + Err(Self::Error::Fail) + } + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a AMsyncHave { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a AMitem> for &'a AMsyncMessage { + type Error = am::AutomergeError; + + fn try_from(item: &'a AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl<'a> TryFrom<&'a mut AMitem> for &'a mut AMsyncState { + type Error = am::AutomergeError; + + fn try_from(item: &'a mut AMitem) -> Result { + if let Some(item) = Rc::get_mut(&mut item.0) { + item.try_into() + } else { + Err(Self::Error::Fail) + } + } +} + +impl TryFrom<&AMitem> for bool { + type Error = am::AutomergeError; + + fn try_from(item: &AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl TryFrom<&AMitem> for f64 { + type Error = am::AutomergeError; + + fn try_from(item: &AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl TryFrom<&AMitem> for u64 { + type Error = am::AutomergeError; + + fn try_from(item: &AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl TryFrom<&AMitem> for AMunknownValue { + type Error = am::AutomergeError; + + fn try_from(item: &AMitem) -> Result { + item.as_ref().try_into() + } +} + +impl TryFrom<&AMitem> for (am::Value<'static>, am::ObjId) { + type Error = am::AutomergeError; + + fn try_from(item: &AMitem) -> Result { + item.as_ref().try_into() + } +} + +/// \ingroup enumerations +/// \enum AMvalType +/// \installed_headerfile +/// \brief The type of an item's value. +#[derive(PartialEq, Eq)] +#[repr(u32)] +pub enum AMvalType { + /// An actor identifier value. + ActorId = 1 << 1, + /// A boolean value. + Bool = 1 << 2, + /// A view onto an array of bytes value. + Bytes = 1 << 3, + /// A change value. + Change = 1 << 4, + /// A change hash value. + ChangeHash = 1 << 5, + /// A CRDT counter value. + Counter = 1 << 6, + /// The default tag, not a type signifier. + Default = 0, + /// A document value. + Doc = 1 << 7, + /// A 64-bit float value. + F64 = 1 << 8, + /// A 64-bit signed integer value. + Int = 1 << 9, + /// A null value. + Null = 1 << 10, + /// An object type value. + ObjType = 1 << 11, + /// A UTF-8 string view value. + Str = 1 << 12, + /// A synchronization have value. + SyncHave = 1 << 13, + /// A synchronization message value. + SyncMessage = 1 << 14, + /// A synchronization state value. + SyncState = 1 << 15, + /// A *nix timestamp (milliseconds) value. + Timestamp = 1 << 16, + /// A 64-bit unsigned integer value. + Uint = 1 << 17, + /// An unknown type of value. + Unknown = 1 << 18, + /// A void. + Void = 1 << 0, +} + +impl Default for AMvalType { + fn default() -> Self { + Self::Default + } +} + +impl From<&am::Value<'static>> for AMvalType { + fn from(value: &am::Value<'static>) -> Self { + use am::ScalarValue::*; + use am::Value::*; + + match value { + Object(_) => Self::ObjType, + Scalar(scalar) => match scalar.as_ref() { + Boolean(_) => Self::Bool, + Bytes(_) => Self::Bytes, + Counter(_) => Self::Counter, + F64(_) => Self::F64, + Int(_) => Self::Int, + Null => Self::Null, + Str(_) => Self::Str, + Timestamp(_) => Self::Timestamp, + Uint(_) => Self::Uint, + Unknown { .. } => Self::Unknown, + }, + } + } +} + +impl From<&Value> for AMvalType { + fn from(value: &Value) -> Self { + use self::Value::*; + + match value { + ActorId(_, _) => Self::ActorId, + Change(_, _) => Self::Change, + ChangeHash(_) => Self::ChangeHash, + Doc(_) => Self::Doc, + SyncHave(_) => Self::SyncHave, + SyncMessage(_) => Self::SyncMessage, + SyncState(_) => Self::SyncState, + Value(v) => v.into(), + } + } +} + +impl From<&Item> for AMvalType { + fn from(item: &Item) -> Self { + if let Some(value) = &item.value { + return value.into(); + } + Self::Void + } +} + +/// \memberof AMitem +/// \brief Tests the equality of two items. +/// +/// \param[in] item1 A pointer to an `AMitem` struct. +/// \param[in] item2 A pointer to an `AMitem` struct. +/// \return `true` if \p item1 `==` \p item2 and `false` otherwise. +/// \pre \p item1 `!= NULL` +/// \pre \p item2 `!= NULL` +/// \post `!(`\p item1 `&&` \p item2 `) -> false` +/// \internal +/// +/// #Safety +/// item1 must be a valid AMitem pointer +/// item2 must be a valid AMitem pointer +#[no_mangle] +pub unsafe extern "C" fn AMitemEqual(item1: *const AMitem, item2: *const AMitem) -> bool { + match (item1.as_ref(), item2.as_ref()) { + (Some(item1), Some(item2)) => *item1 == *item2, + (None, None) | (None, Some(_)) | (Some(_), None) => false, + } +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a boolean value. +/// +/// \param[in] value A boolean. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BOOL` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromBool(value: bool) -> *mut AMresult { + AMresult::item(am::Value::from(value).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from an array of bytes value. +/// +/// \param[in] src A pointer to an array of bytes. +/// \param[in] count The count of bytes to copy from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BYTES` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// value.src must be a byte array of length >= value.count +#[no_mangle] +pub unsafe extern "C" fn AMitemFromBytes(src: *const u8, count: usize) -> *mut AMresult { + let value = std::slice::from_raw_parts(src, count); + AMresult::item(am::Value::bytes(value.to_vec()).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a change hash value. +/// +/// \param[in] value A change hash as an `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_CHANGE_HASH` item. +/// \pre \p value.src `!= NULL` +/// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// value.src must be a byte array of length >= value.count +#[no_mangle] +pub unsafe extern "C" fn AMitemFromChangeHash(value: AMbyteSpan) -> *mut AMresult { + to_result(am::ChangeHash::try_from(&value)) +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a CRDT counter value. +/// +/// \param[in] value A 64-bit signed integer. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_COUNTER` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromCounter(value: i64) -> *mut AMresult { + AMresult::item(am::Value::counter(value).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a float value. +/// +/// \param[in] value A 64-bit float. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_F64` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromF64(value: f64) -> *mut AMresult { + AMresult::item(am::Value::f64(value).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a signed integer value. +/// +/// \param[in] value A 64-bit signed integer. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_INT` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromInt(value: i64) -> *mut AMresult { + AMresult::item(am::Value::int(value).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a null value. +/// +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_NULL` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromNull() -> *mut AMresult { + AMresult::item(am::Value::from(()).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a UTF-8 string value. +/// +/// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_STR` item. +/// \pre \p value.src `!= NULL` +/// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// value.src must be a byte array of length >= value.count +#[no_mangle] +pub unsafe extern "C" fn AMitemFromStr(value: AMbyteSpan) -> *mut AMresult { + AMresult::item(am::Value::str(to_str!(value)).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from a *nix timestamp +/// (milliseconds) value. +/// +/// \param[in] value A 64-bit signed integer. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_TIMESTAMP` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromTimestamp(value: i64) -> *mut AMresult { + AMresult::item(am::Value::timestamp(value).into()).into() +} + +/// \memberof AMitem +/// \brief Allocates a new item and initializes it from an unsigned integer value. +/// +/// \param[in] value A 64-bit unsigned integer. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_UINT` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +#[no_mangle] +pub unsafe extern "C" fn AMitemFromUint(value: u64) -> *mut AMresult { + AMresult::item(am::Value::uint(value).into()).into() +} + +/// \memberof AMitem +/// \brief Gets the type of an item's index. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \return An `AMidxType` enum tag. +/// \pre \p item `!= NULL` +/// \post `(`\p item `== NULL) -> 0` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemIdxType(item: *const AMitem) -> AMidxType { + if let Some(item) = item.as_ref() { + return item.0.as_ref().into(); + } + Default::default() +} + +/// \memberof AMitem +/// \brief Gets the object identifier of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \return A pointer to an `AMobjId` struct. +/// \pre \p item `!= NULL` +/// \post `(`\p item `== NULL) -> NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemObjId(item: *const AMitem) -> *const AMobjId { + if let Some(item) = item.as_ref() { + if let Some(obj_id) = &item.as_ref().obj_id { + return obj_id; + } + } + std::ptr::null() +} + +/// \memberof AMitem +/// \brief Gets the UTF-8 string view key index of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a UTF-8 string view as an `AMbyteSpan` struct. +/// \return `true` if `AMitemIdxType(`\p item `) == AM_IDX_TYPE_KEY` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemKey(item: *const AMitem, value: *mut AMbyteSpan) -> bool { + if let Some(item) = item.as_ref() { + if let Some(index) = &item.as_ref().index { + if let Ok(key) = index.try_into() { + if !value.is_null() { + *value = key; + return true; + } + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the unsigned integer position index of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a `size_t`. +/// \return `true` if `AMitemIdxType(`\p item `) == AM_IDX_TYPE_POS` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemPos(item: *const AMitem, value: *mut usize) -> bool { + if let Some(item) = item.as_ref() { + if let Some(index) = &item.as_ref().index { + if let Ok(pos) = index.try_into() { + if !value.is_null() { + *value = pos; + return true; + } + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the reference count of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \return A 64-bit unsigned integer. +/// \pre \p item `!= NULL` +/// \post `(`\p item `== NULL) -> 0` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemRefCount(item: *const AMitem) -> usize { + if let Some(item) = item.as_ref() { + return Rc::strong_count(&item.0); + } + 0 +} + +/// \memberof AMitem +/// \brief Gets a new result for an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \return A pointer to an `AMresult` struct. +/// \pre \p item `!= NULL` +/// \post `(`\p item `== NULL) -> NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemResult(item: *const AMitem) -> *mut AMresult { + if let Some(item) = item.as_ref() { + return AMresult::item(item.clone()).into(); + } + std::ptr::null_mut() +} + +/// \memberof AMitem +/// \brief Gets the actor identifier value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMactorId` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_ACTOR_ID` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToActorId( + item: *const AMitem, + value: *mut *const AMactorId, +) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(actor_id) = <&AMactorId>::try_from(item) { + if !value.is_null() { + *value = actor_id; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the boolean value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a boolean. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_BOOL` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToBool(item: *const AMitem, value: *mut bool) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(boolean) = item.try_into() { + if !value.is_null() { + *value = boolean; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the array of bytes value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMbyteSpan` struct. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_BYTES` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToBytes(item: *const AMitem, value: *mut AMbyteSpan) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(bytes) = item.as_ref().try_into_bytes() { + if !value.is_null() { + *value = bytes; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the change value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMchange` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_CHANGE` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToChange(item: *mut AMitem, value: *mut *mut AMchange) -> bool { + if let Some(item) = item.as_mut() { + if let Ok(change) = <&mut AMchange>::try_from(item) { + if !value.is_null() { + *value = change; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the change hash value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMbyteSpan` struct. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_CHANGE_HASH` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToChangeHash(item: *const AMitem, value: *mut AMbyteSpan) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(change_hash) = item.as_ref().try_into_change_hash() { + if !value.is_null() { + *value = change_hash; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the CRDT counter value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a signed 64-bit integer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_COUNTER` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToCounter(item: *const AMitem, value: *mut i64) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(counter) = item.as_ref().try_into_counter() { + if !value.is_null() { + *value = counter; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the document value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMdoc` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_DOC` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToDoc(item: *mut AMitem, value: *mut *const AMdoc) -> bool { + if let Some(item) = item.as_mut() { + if let Ok(doc) = <&mut AMdoc>::try_from(item) { + if !value.is_null() { + *value = doc; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the float value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a 64-bit float. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_F64` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToF64(item: *const AMitem, value: *mut f64) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(float) = item.try_into() { + if !value.is_null() { + *value = float; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the integer value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a signed 64-bit integer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_INT` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToInt(item: *const AMitem, value: *mut i64) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(int) = item.as_ref().try_into_int() { + if !value.is_null() { + *value = int; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the UTF-8 string view value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a UTF-8 string view as an `AMbyteSpan` struct. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_STR` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToStr(item: *const AMitem, value: *mut AMbyteSpan) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(str) = item.as_ref().try_into_str() { + if !value.is_null() { + *value = str; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the synchronization have value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMsyncHave` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_SYNC_HAVE` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToSyncHave( + item: *const AMitem, + value: *mut *const AMsyncHave, +) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(sync_have) = <&AMsyncHave>::try_from(item) { + if !value.is_null() { + *value = sync_have; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the synchronization message value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMsyncMessage` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_SYNC_MESSAGE` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToSyncMessage( + item: *const AMitem, + value: *mut *const AMsyncMessage, +) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(sync_message) = <&AMsyncMessage>::try_from(item) { + if !value.is_null() { + *value = sync_message; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the synchronization state value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMsyncState` struct pointer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_SYNC_STATE` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToSyncState( + item: *mut AMitem, + value: *mut *mut AMsyncState, +) -> bool { + if let Some(item) = item.as_mut() { + if let Ok(sync_state) = <&mut AMsyncState>::try_from(item) { + if !value.is_null() { + *value = sync_state; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the *nix timestamp (milliseconds) value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a signed 64-bit integer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_TIMESTAMP` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToTimestamp(item: *const AMitem, value: *mut i64) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(timestamp) = item.as_ref().try_into_timestamp() { + if !value.is_null() { + *value = timestamp; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the unsigned integer value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to a unsigned 64-bit integer. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_UINT` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToUint(item: *const AMitem, value: *mut u64) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(uint) = item.try_into() { + if !value.is_null() { + *value = uint; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the unknown type of value of an item. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \param[out] value A pointer to an `AMunknownValue` struct. +/// \return `true` if `AMitemValType(`\p item `) == AM_VAL_TYPE_UNKNOWN` and +/// \p *value has been reassigned, `false` otherwise. +/// \pre \p item `!= NULL` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemToUnknown(item: *const AMitem, value: *mut AMunknownValue) -> bool { + if let Some(item) = item.as_ref() { + if let Ok(unknown) = item.try_into() { + if !value.is_null() { + *value = unknown; + return true; + } + } + } + false +} + +/// \memberof AMitem +/// \brief Gets the type of an item's value. +/// +/// \param[in] item A pointer to an `AMitem` struct. +/// \return An `AMvalType` enum tag. +/// \pre \p item `!= NULL` +/// \post `(`\p item `== NULL) -> 0` +/// \internal +/// +/// # Safety +/// item must be a valid pointer to an AMitem +#[no_mangle] +pub unsafe extern "C" fn AMitemValType(item: *const AMitem) -> AMvalType { + if let Some(item) = item.as_ref() { + return item.0.as_ref().into(); + } + Default::default() +} diff --git a/rust/automerge-c/src/items.rs b/rust/automerge-c/src/items.rs new file mode 100644 index 00000000..361078b3 --- /dev/null +++ b/rust/automerge-c/src/items.rs @@ -0,0 +1,401 @@ +use automerge as am; + +use std::ffi::c_void; +use std::marker::PhantomData; +use std::mem::size_of; + +use crate::item::AMitem; +use crate::result::AMresult; + +#[repr(C)] +struct Detail { + len: usize, + offset: isize, + ptr: *const c_void, +} + +/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call +/// (https://github.com/eqrion/cbindgen/issues/252) but it will +/// propagate the name of a constant initialized from it so if the +/// constant's name is a symbolic representation of the value it can be +/// converted into a number by post-processing the header it generated. +pub const USIZE_USIZE_USIZE_: usize = size_of::(); + +impl Detail { + fn new(items: &[AMitem], offset: isize) -> Self { + Self { + len: items.len(), + offset, + ptr: items.as_ptr() as *mut c_void, + } + } + + pub fn advance(&mut self, n: isize) { + if n == 0 { + return; + } + let len = self.len as isize; + self.offset = if self.offset < 0 { + // It's reversed. + let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); + if unclipped >= 0 { + // Clip it to the forward stop. + len + } else { + std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) + } + } else { + let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); + if unclipped < 0 { + // Clip it to the reverse stop. + -(len + 1) + } else { + std::cmp::max(0, std::cmp::min(unclipped, len)) + } + } + } + + pub fn get_index(&self) -> usize { + (self.offset + + if self.offset < 0 { + self.len as isize + } else { + 0 + }) as usize + } + + pub fn next(&mut self, n: isize) -> Option<&mut AMitem> { + if self.is_stopped() { + return None; + } + let slice: &mut [AMitem] = + unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut AMitem, self.len) }; + let value = &mut slice[self.get_index()]; + self.advance(n); + Some(value) + } + + pub fn is_stopped(&self) -> bool { + let len = self.len as isize; + self.offset < -len || self.offset == len + } + + pub fn prev(&mut self, n: isize) -> Option<&mut AMitem> { + self.advance(-n); + if self.is_stopped() { + return None; + } + let slice: &mut [AMitem] = + unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut AMitem, self.len) }; + Some(&mut slice[self.get_index()]) + } + + pub fn reversed(&self) -> Self { + Self { + len: self.len, + offset: -(self.offset + 1), + ptr: self.ptr, + } + } + + pub fn rewound(&self) -> Self { + Self { + len: self.len, + offset: if self.offset < 0 { -1 } else { 0 }, + ptr: self.ptr, + } + } +} + +impl From for [u8; USIZE_USIZE_USIZE_] { + fn from(detail: Detail) -> Self { + unsafe { + std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) + .try_into() + .unwrap() + } + } +} + +/// \struct AMitems +/// \installed_headerfile +/// \brief A random-access iterator over a sequence of `AMitem` structs. +#[repr(C)] +#[derive(Eq, PartialEq)] +pub struct AMitems<'a> { + /// An implementation detail that is intentionally opaque. + /// \warning Modifying \p detail will cause undefined behavior. + /// \note The actual size of \p detail will vary by platform, this is just + /// the one for the platform this documentation was built on. + detail: [u8; USIZE_USIZE_USIZE_], + phantom: PhantomData<&'a mut AMresult>, +} + +impl<'a> AMitems<'a> { + pub fn new(items: &[AMitem]) -> Self { + Self { + detail: Detail::new(items, 0).into(), + phantom: PhantomData, + } + } + + pub fn advance(&mut self, n: isize) { + let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; + detail.advance(n); + } + + pub fn len(&self) -> usize { + let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; + detail.len + } + + pub fn next(&mut self, n: isize) -> Option<&mut AMitem> { + let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; + detail.next(n) + } + + pub fn prev(&mut self, n: isize) -> Option<&mut AMitem> { + let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; + detail.prev(n) + } + + pub fn reversed(&self) -> Self { + let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; + Self { + detail: detail.reversed().into(), + phantom: PhantomData, + } + } + + pub fn rewound(&self) -> Self { + let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; + Self { + detail: detail.rewound().into(), + phantom: PhantomData, + } + } +} + +impl<'a> AsRef<[AMitem]> for AMitems<'a> { + fn as_ref(&self) -> &[AMitem] { + let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; + unsafe { std::slice::from_raw_parts(detail.ptr as *const AMitem, detail.len) } + } +} + +impl<'a> Default for AMitems<'a> { + fn default() -> Self { + Self { + detail: [0; USIZE_USIZE_USIZE_], + phantom: PhantomData, + } + } +} + +impl TryFrom<&AMitems<'_>> for Vec { + type Error = am::AutomergeError; + + fn try_from(items: &AMitems<'_>) -> Result { + let mut changes = Vec::::with_capacity(items.len()); + for item in items.as_ref().iter() { + match <&am::Change>::try_from(item.as_ref()) { + Ok(change) => { + changes.push(change.clone()); + } + Err(e) => { + return Err(e); + } + } + } + Ok(changes) + } +} + +impl TryFrom<&AMitems<'_>> for Vec { + type Error = am::AutomergeError; + + fn try_from(items: &AMitems<'_>) -> Result { + let mut change_hashes = Vec::::with_capacity(items.len()); + for item in items.as_ref().iter() { + match <&am::ChangeHash>::try_from(item.as_ref()) { + Ok(change_hash) => { + change_hashes.push(*change_hash); + } + Err(e) => { + return Err(e); + } + } + } + Ok(change_hashes) + } +} + +impl TryFrom<&AMitems<'_>> for Vec { + type Error = am::AutomergeError; + + fn try_from(items: &AMitems<'_>) -> Result { + let mut scalars = Vec::::with_capacity(items.len()); + for item in items.as_ref().iter() { + match <&am::ScalarValue>::try_from(item.as_ref()) { + Ok(scalar) => { + scalars.push(scalar.clone()); + } + Err(e) => { + return Err(e); + } + } + } + Ok(scalars) + } +} + +/// \memberof AMitems +/// \brief Advances an iterator over a sequence of object items by at most +/// \p |n| positions where the sign of \p n is relative to the +/// iterator's direction. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum +/// number of positions to advance. +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsAdvance(items: *mut AMitems, n: isize) { + if let Some(items) = items.as_mut() { + items.advance(n); + }; +} + +/// \memberof AMitems +/// \brief Tests the equality of two sequences of object items underlying a +/// pair of iterators. +/// +/// \param[in] items1 A pointer to an `AMitems` struct. +/// \param[in] items2 A pointer to an `AMitems` struct. +/// \return `true` if \p items1 `==` \p items2 and `false` otherwise. +/// \pre \p items1 `!= NULL` +/// \pre \p items1 `!= NULL` +/// \post `!(`\p items1 `&&` \p items2 `) -> false` +/// \internal +/// +/// #Safety +/// items1 must be a valid pointer to an AMitems +/// items2 must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsEqual(items1: *const AMitems, items2: *const AMitems) -> bool { + match (items1.as_ref(), items2.as_ref()) { + (Some(items1), Some(items2)) => items1.as_ref() == items2.as_ref(), + (None, None) | (None, Some(_)) | (Some(_), None) => false, + } +} + +/// \memberof AMitems +/// \brief Gets the object item at the current position of an iterator over a +/// sequence of object items and then advances it by at most \p |n| +/// positions where the sign of \p n is relative to the iterator's +/// direction. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum +/// number of positions to advance. +/// \return A pointer to an `AMitem` struct that's `NULL` when \p items +/// was previously advanced past its forward/reverse limit. +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsNext(items: *mut AMitems, n: isize) -> *mut AMitem { + if let Some(items) = items.as_mut() { + if let Some(item) = items.next(n) { + return item; + } + } + std::ptr::null_mut() +} + +/// \memberof AMitems +/// \brief Advances an iterator over a sequence of object items by at most +/// \p |n| positions where the sign of \p n is relative to the +/// iterator's direction and then gets the object item at its new +/// position. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum +/// number of positions to advance. +/// \return A pointer to an `AMitem` struct that's `NULL` when \p items +/// is presently advanced past its forward/reverse limit. +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsPrev(items: *mut AMitems, n: isize) -> *mut AMitem { + if let Some(items) = items.as_mut() { + if let Some(obj_item) = items.prev(n) { + return obj_item; + } + } + std::ptr::null_mut() +} + +/// \memberof AMitems +/// \brief Gets the size of the sequence underlying an iterator. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \return The count of items in \p items. +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsSize(items: *const AMitems) -> usize { + if let Some(items) = items.as_ref() { + return items.len(); + } + 0 +} + +/// \memberof AMitems +/// \brief Creates an iterator over the same sequence of items as the +/// given one but with the opposite position and direction. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \return An `AMitems` struct +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsReversed(items: *const AMitems) -> AMitems { + if let Some(items) = items.as_ref() { + return items.reversed(); + } + Default::default() +} + +/// \memberof AMitems +/// \brief Creates an iterator at the starting position over the same sequence +/// of items as the given one. +/// +/// \param[in] items A pointer to an `AMitems` struct. +/// \return An `AMitems` struct +/// \pre \p items `!= NULL` +/// \internal +/// +/// #Safety +/// items must be a valid pointer to an AMitems +#[no_mangle] +pub unsafe extern "C" fn AMitemsRewound(items: *const AMitems) -> AMitems { + if let Some(items) = items.as_ref() { + return items.rewound(); + } + Default::default() +} diff --git a/rust/automerge-c/src/lib.rs b/rust/automerge-c/src/lib.rs index 6418bd33..1ee1a85d 100644 --- a/rust/automerge-c/src/lib.rs +++ b/rust/automerge-c/src/lib.rs @@ -1,11 +1,12 @@ mod actor_id; mod byte_span; mod change; -mod change_hashes; -mod changes; mod doc; +mod index; +mod item; +mod items; mod obj; mod result; -mod result_stack; -mod strs; mod sync; + +// include!(concat!(env!("OUT_DIR"), "/enum_string_functions.rs")); diff --git a/rust/automerge-c/src/obj.rs b/rust/automerge-c/src/obj.rs index 46ff617b..3d52286c 100644 --- a/rust/automerge-c/src/obj.rs +++ b/rust/automerge-c/src/obj.rs @@ -1,12 +1,10 @@ use automerge as am; +use std::any::type_name; use std::cell::RefCell; use std::ops::Deref; use crate::actor_id::AMactorId; -pub mod item; -pub mod items; - macro_rules! to_obj_id { ($handle:expr) => {{ match $handle.as_ref() { @@ -19,12 +17,11 @@ macro_rules! to_obj_id { pub(crate) use to_obj_id; macro_rules! to_obj_type { - ($am_obj_type:expr) => {{ - match $am_obj_type { - AMobjType::Map => am::ObjType::Map, - AMobjType::List => am::ObjType::List, - AMobjType::Text => am::ObjType::Text, - AMobjType::Void => return AMresult::err("Invalid AMobjType value").into(), + ($c_obj_type:expr) => {{ + let result: Result = (&$c_obj_type).try_into(); + match result { + Ok(obj_type) => obj_type, + Err(e) => return AMresult::error(&e.to_string()).into(), } }}; } @@ -79,11 +76,11 @@ impl Deref for AMobjId { } /// \memberof AMobjId -/// \brief Gets the actor identifier of an object identifier. +/// \brief Gets the actor identifier component of an object identifier. /// /// \param[in] obj_id A pointer to an `AMobjId` struct. /// \return A pointer to an `AMactorId` struct or `NULL`. -/// \pre \p obj_id `!= NULL`. +/// \pre \p obj_id `!= NULL` /// \internal /// /// # Safety @@ -97,11 +94,11 @@ pub unsafe extern "C" fn AMobjIdActorId(obj_id: *const AMobjId) -> *const AMacto } /// \memberof AMobjId -/// \brief Gets the counter of an object identifier. +/// \brief Gets the counter component of an object identifier. /// /// \param[in] obj_id A pointer to an `AMobjId` struct. /// \return A 64-bit unsigned integer. -/// \pre \p obj_id `!= NULL`. +/// \pre \p obj_id `!= NULL` /// \internal /// /// # Safety @@ -124,8 +121,9 @@ pub unsafe extern "C" fn AMobjIdCounter(obj_id: *const AMobjId) -> u64 { /// \param[in] obj_id1 A pointer to an `AMobjId` struct. /// \param[in] obj_id2 A pointer to an `AMobjId` struct. /// \return `true` if \p obj_id1 `==` \p obj_id2 and `false` otherwise. -/// \pre \p obj_id1 `!= NULL`. -/// \pre \p obj_id2 `!= NULL`. +/// \pre \p obj_id1 `!= NULL` +/// \pre \p obj_id1 `!= NULL` +/// \post `!(`\p obj_id1 `&&` \p obj_id2 `) -> false` /// \internal /// /// #Safety @@ -135,26 +133,28 @@ pub unsafe extern "C" fn AMobjIdCounter(obj_id: *const AMobjId) -> u64 { pub unsafe extern "C" fn AMobjIdEqual(obj_id1: *const AMobjId, obj_id2: *const AMobjId) -> bool { match (obj_id1.as_ref(), obj_id2.as_ref()) { (Some(obj_id1), Some(obj_id2)) => obj_id1 == obj_id2, - (None, Some(_)) | (Some(_), None) | (None, None) => false, + (None, None) | (None, Some(_)) | (Some(_), None) => false, } } /// \memberof AMobjId -/// \brief Gets the index of an object identifier. +/// \brief Gets the index component of an object identifier. /// /// \param[in] obj_id A pointer to an `AMobjId` struct. /// \return A 64-bit unsigned integer. -/// \pre \p obj_id `!= NULL`. +/// \pre \p obj_id `!= NULL` /// \internal /// /// # Safety /// obj_id must be a valid pointer to an AMobjId #[no_mangle] pub unsafe extern "C" fn AMobjIdIndex(obj_id: *const AMobjId) -> usize { + use am::ObjId::*; + if let Some(obj_id) = obj_id.as_ref() { match obj_id.as_ref() { - am::ObjId::Id(_, _, index) => *index, - am::ObjId::Root => 0, + Id(_, _, index) => *index, + Root => 0, } } else { usize::MAX @@ -163,26 +163,54 @@ pub unsafe extern "C" fn AMobjIdIndex(obj_id: *const AMobjId) -> usize { /// \ingroup enumerations /// \enum AMobjType +/// \installed_headerfile /// \brief The type of an object value. +#[derive(PartialEq, Eq)] #[repr(u8)] pub enum AMobjType { - /// A void. - /// \note This tag is unalphabetized to evaluate as false. - Void = 0, + /// The default tag, not a type signifier. + Default = 0, /// A list. - List, + List = 1, /// A key-value map. Map, /// A list of Unicode graphemes. Text, } -impl From for AMobjType { - fn from(o: am::ObjType) -> Self { +impl Default for AMobjType { + fn default() -> Self { + Self::Default + } +} + +impl From<&am::ObjType> for AMobjType { + fn from(o: &am::ObjType) -> Self { + use am::ObjType::*; + match o { - am::ObjType::Map | am::ObjType::Table => AMobjType::Map, - am::ObjType::List => AMobjType::List, - am::ObjType::Text => AMobjType::Text, + List => Self::List, + Map | Table => Self::Map, + Text => Self::Text, + } + } +} + +impl TryFrom<&AMobjType> for am::ObjType { + type Error = am::AutomergeError; + + fn try_from(c_obj_type: &AMobjType) -> Result { + use am::AutomergeError::InvalidValueType; + use AMobjType::*; + + match c_obj_type { + List => Ok(Self::List), + Map => Ok(Self::Map), + Text => Ok(Self::Text), + _ => Err(InvalidValueType { + expected: type_name::().to_string(), + unexpected: type_name::().to_string(), + }), } } } diff --git a/rust/automerge-c/src/obj/item.rs b/rust/automerge-c/src/obj/item.rs deleted file mode 100644 index a2e99d06..00000000 --- a/rust/automerge-c/src/obj/item.rs +++ /dev/null @@ -1,73 +0,0 @@ -use automerge as am; - -use crate::obj::AMobjId; -use crate::result::AMvalue; - -/// \struct AMobjItem -/// \installed_headerfile -/// \brief An item in an object. -pub struct AMobjItem { - /// The object identifier of an item in an object. - obj_id: AMobjId, - /// The value of an item in an object. - value: am::Value<'static>, -} - -impl AMobjItem { - pub fn new(value: am::Value<'static>, obj_id: am::ObjId) -> Self { - Self { - obj_id: AMobjId::new(obj_id), - value, - } - } -} - -impl PartialEq for AMobjItem { - fn eq(&self, other: &Self) -> bool { - self.obj_id == other.obj_id && self.value == other.value - } -} - -impl From<&AMobjItem> for (am::Value<'static>, am::ObjId) { - fn from(obj_item: &AMobjItem) -> Self { - (obj_item.value.clone(), obj_item.obj_id.as_ref().clone()) - } -} - -/// \memberof AMobjItem -/// \brief Gets the object identifier of an item in an object. -/// -/// \param[in] obj_item A pointer to an `AMobjItem` struct. -/// \return A pointer to an `AMobjId` struct. -/// \pre \p obj_item `!= NULL`. -/// \internal -/// -/// # Safety -/// obj_item must be a valid pointer to an AMobjItem -#[no_mangle] -pub unsafe extern "C" fn AMobjItemObjId(obj_item: *const AMobjItem) -> *const AMobjId { - if let Some(obj_item) = obj_item.as_ref() { - &obj_item.obj_id - } else { - std::ptr::null() - } -} - -/// \memberof AMobjItem -/// \brief Gets the value of an item in an object. -/// -/// \param[in] obj_item A pointer to an `AMobjItem` struct. -/// \return An `AMvalue` struct. -/// \pre \p obj_item `!= NULL`. -/// \internal -/// -/// # Safety -/// obj_item must be a valid pointer to an AMobjItem -#[no_mangle] -pub unsafe extern "C" fn AMobjItemValue<'a>(obj_item: *const AMobjItem) -> AMvalue<'a> { - if let Some(obj_item) = obj_item.as_ref() { - (&obj_item.value).into() - } else { - AMvalue::Void - } -} diff --git a/rust/automerge-c/src/obj/items.rs b/rust/automerge-c/src/obj/items.rs deleted file mode 100644 index d6b847cf..00000000 --- a/rust/automerge-c/src/obj/items.rs +++ /dev/null @@ -1,341 +0,0 @@ -use std::ffi::c_void; -use std::mem::size_of; - -use crate::obj::item::AMobjItem; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(obj_items: &[AMobjItem], offset: isize) -> Self { - Self { - len: obj_items.len(), - offset, - ptr: obj_items.as_ptr() as *const c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<&AMobjItem> { - if self.is_stopped() { - return None; - } - let slice: &[AMobjItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMobjItem, self.len) }; - let value = &slice[self.get_index()]; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<&AMobjItem> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[AMobjItem] = - unsafe { std::slice::from_raw_parts(self.ptr as *const AMobjItem, self.len) }; - Some(&slice[self.get_index()]) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) - .try_into() - .unwrap() - } - } -} - -/// \struct AMobjItems -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of object items. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMobjItems { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_], -} - -impl AMobjItems { - pub fn new(obj_items: &[AMobjItem]) -> Self { - Self { - detail: Detail::new(obj_items, 0).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<&AMobjItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<&AMobjItem> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[AMobjItem]> for AMobjItems { - fn as_ref(&self) -> &[AMobjItem] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const AMobjItem, detail.len) } - } -} - -impl Default for AMobjItems { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMobjItems -/// \brief Advances an iterator over a sequence of object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] obj_items A pointer to an `AMobjItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsAdvance(obj_items: *mut AMobjItems, n: isize) { - if let Some(obj_items) = obj_items.as_mut() { - obj_items.advance(n); - }; -} - -/// \memberof AMobjItems -/// \brief Tests the equality of two sequences of object items underlying a -/// pair of iterators. -/// -/// \param[in] obj_items1 A pointer to an `AMobjItems` struct. -/// \param[in] obj_items2 A pointer to an `AMobjItems` struct. -/// \return `true` if \p obj_items1 `==` \p obj_items2 and `false` otherwise. -/// \pre \p obj_items1 `!= NULL`. -/// \pre \p obj_items2 `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items1 must be a valid pointer to an AMobjItems -/// obj_items2 must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsEqual( - obj_items1: *const AMobjItems, - obj_items2: *const AMobjItems, -) -> bool { - match (obj_items1.as_ref(), obj_items2.as_ref()) { - (Some(obj_items1), Some(obj_items2)) => obj_items1.as_ref() == obj_items2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} - -/// \memberof AMobjItems -/// \brief Gets the object item at the current position of an iterator over a -/// sequence of object items and then advances it by at most \p |n| -/// positions where the sign of \p n is relative to the iterator's -/// direction. -/// -/// \param[in,out] obj_items A pointer to an `AMobjItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMobjItem` struct that's `NULL` when \p obj_items -/// was previously advanced past its forward/reverse limit. -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsNext(obj_items: *mut AMobjItems, n: isize) -> *const AMobjItem { - if let Some(obj_items) = obj_items.as_mut() { - if let Some(obj_item) = obj_items.next(n) { - return obj_item; - } - } - std::ptr::null() -} - -/// \memberof AMobjItems -/// \brief Advances an iterator over a sequence of object items by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the object item at its new -/// position. -/// -/// \param[in,out] obj_items A pointer to an `AMobjItems` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMobjItem` struct that's `NULL` when \p obj_items -/// is presently advanced past its forward/reverse limit. -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsPrev(obj_items: *mut AMobjItems, n: isize) -> *const AMobjItem { - if let Some(obj_items) = obj_items.as_mut() { - if let Some(obj_item) = obj_items.prev(n) { - return obj_item; - } - } - std::ptr::null() -} - -/// \memberof AMobjItems -/// \brief Gets the size of the sequence of object items underlying an -/// iterator. -/// -/// \param[in] obj_items A pointer to an `AMobjItems` struct. -/// \return The count of values in \p obj_items. -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsSize(obj_items: *const AMobjItems) -> usize { - if let Some(obj_items) = obj_items.as_ref() { - obj_items.len() - } else { - 0 - } -} - -/// \memberof AMobjItems -/// \brief Creates an iterator over the same sequence of object items as the -/// given one but with the opposite position and direction. -/// -/// \param[in] obj_items A pointer to an `AMobjItems` struct. -/// \return An `AMobjItems` struct -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsReversed(obj_items: *const AMobjItems) -> AMobjItems { - if let Some(obj_items) = obj_items.as_ref() { - obj_items.reversed() - } else { - Default::default() - } -} - -/// \memberof AMobjItems -/// \brief Creates an iterator at the starting position over the same sequence -/// of object items as the given one. -/// -/// \param[in] obj_items A pointer to an `AMobjItems` struct. -/// \return An `AMobjItems` struct -/// \pre \p obj_items `!= NULL`. -/// \internal -/// -/// #Safety -/// obj_items must be a valid pointer to an AMobjItems -#[no_mangle] -pub unsafe extern "C" fn AMobjItemsRewound(obj_items: *const AMobjItems) -> AMobjItems { - if let Some(obj_items) = obj_items.as_ref() { - obj_items.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/result.rs b/rust/automerge-c/src/result.rs index 599ada96..2975f38b 100644 --- a/rust/automerge-c/src/result.rs +++ b/rust/automerge-c/src/result.rs @@ -1,513 +1,85 @@ use automerge as am; -use smol_str::SmolStr; -use std::any::type_name; -use std::collections::BTreeMap; use std::ops::{Range, RangeFrom, RangeFull, RangeTo}; -use crate::actor_id::AMactorId; use crate::byte_span::AMbyteSpan; -use crate::change::AMchange; -use crate::change_hashes::AMchangeHashes; -use crate::changes::AMchanges; -use crate::doc::list::{item::AMlistItem, items::AMlistItems}; -use crate::doc::map::{item::AMmapItem, items::AMmapItems}; -use crate::doc::AMdoc; -use crate::obj::item::AMobjItem; -use crate::obj::items::AMobjItems; -use crate::obj::AMobjId; -use crate::strs::AMstrs; -use crate::sync::{AMsyncMessage, AMsyncState}; - -/// \struct AMvalue -/// \installed_headerfile -/// \brief A discriminated union of value type variants for a result. -/// -/// \enum AMvalueVariant -/// \brief A value type discriminant. -/// -/// \var AMvalue::actor_id -/// An actor identifier as a pointer to an `AMactorId` struct. -/// -/// \var AMvalue::boolean -/// A boolean. -/// -/// \var AMvalue::bytes -/// A sequence of bytes as an `AMbyteSpan` struct. -/// -/// \var AMvalue::change_hashes -/// A sequence of change hashes as an `AMchangeHashes` struct. -/// -/// \var AMvalue::changes -/// A sequence of changes as an `AMchanges` struct. -/// -/// \var AMvalue::counter -/// A CRDT counter. -/// -/// \var AMvalue::doc -/// A document as a pointer to an `AMdoc` struct. -/// -/// \var AMvalue::f64 -/// A 64-bit float. -/// -/// \var AMvalue::int_ -/// A 64-bit signed integer. -/// -/// \var AMvalue::list_items -/// A sequence of list object items as an `AMlistItems` struct. -/// -/// \var AMvalue::map_items -/// A sequence of map object items as an `AMmapItems` struct. -/// -/// \var AMvalue::obj_id -/// An object identifier as a pointer to an `AMobjId` struct. -/// -/// \var AMvalue::obj_items -/// A sequence of object items as an `AMobjItems` struct. -/// -/// \var AMvalue::str -/// A UTF-8 string view as an `AMbyteSpan` struct. -/// -/// \var AMvalue::strs -/// A sequence of UTF-8 strings as an `AMstrs` struct. -/// -/// \var AMvalue::sync_message -/// A synchronization message as a pointer to an `AMsyncMessage` struct. -/// -/// \var AMvalue::sync_state -/// A synchronization state as a pointer to an `AMsyncState` struct. -/// -/// \var AMvalue::tag -/// The variant discriminator. -/// -/// \var AMvalue::timestamp -/// A *nix timestamp (milliseconds). -/// -/// \var AMvalue::uint -/// A 64-bit unsigned integer. -/// -/// \var AMvalue::unknown -/// A value of unknown type as an `AMunknownValue` struct. -#[repr(u8)] -pub enum AMvalue<'a> { - /// A void variant. - /// \note This tag is unalphabetized so that a zeroed struct will have it. - Void, - /// An actor identifier variant. - ActorId(&'a AMactorId), - /// A boolean variant. - Boolean(bool), - /// A byte array variant. - Bytes(AMbyteSpan), - /// A change hashes variant. - ChangeHashes(AMchangeHashes), - /// A changes variant. - Changes(AMchanges), - /// A CRDT counter variant. - Counter(i64), - /// A document variant. - Doc(*mut AMdoc), - /// A 64-bit float variant. - F64(f64), - /// A 64-bit signed integer variant. - Int(i64), - /// A list items variant. - ListItems(AMlistItems), - /// A map items variant. - MapItems(AMmapItems), - /// A null variant. - Null, - /// An object identifier variant. - ObjId(&'a AMobjId), - /// An object items variant. - ObjItems(AMobjItems), - /// A UTF-8 string view variant. - Str(AMbyteSpan), - /// A UTF-8 string views variant. - Strs(AMstrs), - /// A synchronization message variant. - SyncMessage(&'a AMsyncMessage), - /// A synchronization state variant. - SyncState(&'a mut AMsyncState), - /// A *nix timestamp (milliseconds) variant. - Timestamp(i64), - /// A 64-bit unsigned integer variant. - Uint(u64), - /// An unknown type of scalar value variant. - Unknown(AMunknownValue), -} - -impl<'a> PartialEq for AMvalue<'a> { - fn eq(&self, other: &Self) -> bool { - use AMvalue::*; - - match (self, other) { - (ActorId(lhs), ActorId(rhs)) => *lhs == *rhs, - (Boolean(lhs), Boolean(rhs)) => lhs == rhs, - (Bytes(lhs), Bytes(rhs)) => lhs == rhs, - (ChangeHashes(lhs), ChangeHashes(rhs)) => lhs == rhs, - (Changes(lhs), Changes(rhs)) => lhs == rhs, - (Counter(lhs), Counter(rhs)) => lhs == rhs, - (Doc(lhs), Doc(rhs)) => *lhs == *rhs, - (F64(lhs), F64(rhs)) => lhs == rhs, - (Int(lhs), Int(rhs)) => lhs == rhs, - (ListItems(lhs), ListItems(rhs)) => lhs == rhs, - (MapItems(lhs), MapItems(rhs)) => lhs == rhs, - (ObjId(lhs), ObjId(rhs)) => *lhs == *rhs, - (ObjItems(lhs), ObjItems(rhs)) => lhs == rhs, - (Str(lhs), Str(rhs)) => lhs == rhs, - (Strs(lhs), Strs(rhs)) => lhs == rhs, - (SyncMessage(lhs), SyncMessage(rhs)) => *lhs == *rhs, - (SyncState(lhs), SyncState(rhs)) => *lhs == *rhs, - (Timestamp(lhs), Timestamp(rhs)) => lhs == rhs, - (Uint(lhs), Uint(rhs)) => lhs == rhs, - (Unknown(lhs), Unknown(rhs)) => lhs == rhs, - (Null, Null) | (Void, Void) => true, - _ => false, - } - } -} - -impl From<&am::Value<'_>> for AMvalue<'_> { - fn from(value: &am::Value<'_>) -> Self { - match value { - am::Value::Scalar(scalar) => match scalar.as_ref() { - am::ScalarValue::Boolean(flag) => AMvalue::Boolean(*flag), - am::ScalarValue::Bytes(bytes) => AMvalue::Bytes(bytes.as_slice().into()), - am::ScalarValue::Counter(counter) => AMvalue::Counter(counter.into()), - am::ScalarValue::F64(float) => AMvalue::F64(*float), - am::ScalarValue::Int(int) => AMvalue::Int(*int), - am::ScalarValue::Null => AMvalue::Null, - am::ScalarValue::Str(smol_str) => AMvalue::Str(smol_str.as_bytes().into()), - am::ScalarValue::Timestamp(timestamp) => AMvalue::Timestamp(*timestamp), - am::ScalarValue::Uint(uint) => AMvalue::Uint(*uint), - am::ScalarValue::Unknown { bytes, type_code } => AMvalue::Unknown(AMunknownValue { - bytes: bytes.as_slice().into(), - type_code: *type_code, - }), - }, - // \todo Confirm that an object variant should be ignored - // when there's no object ID variant. - am::Value::Object(_) => AMvalue::Void, - } - } -} - -impl From<&AMvalue<'_>> for u8 { - fn from(value: &AMvalue) -> Self { - use AMvalue::*; - - // \warning These numbers must correspond to the order in which the - // variants of an AMvalue are declared within it. - match value { - ActorId(_) => 1, - Boolean(_) => 2, - Bytes(_) => 3, - ChangeHashes(_) => 4, - Changes(_) => 5, - Counter(_) => 6, - Doc(_) => 7, - F64(_) => 8, - Int(_) => 9, - ListItems(_) => 10, - MapItems(_) => 11, - Null => 12, - ObjId(_) => 13, - ObjItems(_) => 14, - Str(_) => 15, - Strs(_) => 16, - SyncMessage(_) => 17, - SyncState(_) => 18, - Timestamp(_) => 19, - Uint(_) => 20, - Unknown(..) => 21, - Void => 0, - } - } -} - -impl TryFrom<&AMvalue<'_>> for am::ScalarValue { - type Error = am::AutomergeError; - - fn try_from(c_value: &AMvalue) -> Result { - use am::AutomergeError::InvalidValueType; - use AMvalue::*; - - let expected = type_name::().to_string(); - match c_value { - Boolean(b) => Ok(am::ScalarValue::Boolean(*b)), - Bytes(span) => { - let slice = unsafe { std::slice::from_raw_parts(span.src, span.count) }; - Ok(am::ScalarValue::Bytes(slice.to_vec())) - } - Counter(c) => Ok(am::ScalarValue::Counter(c.into())), - F64(f) => Ok(am::ScalarValue::F64(*f)), - Int(i) => Ok(am::ScalarValue::Int(*i)), - Str(span) => { - let result: Result<&str, am::AutomergeError> = span.try_into(); - match result { - Ok(str_) => Ok(am::ScalarValue::Str(SmolStr::new(str_))), - Err(e) => Err(e), - } - } - Timestamp(t) => Ok(am::ScalarValue::Timestamp(*t)), - Uint(u) => Ok(am::ScalarValue::Uint(*u)), - Null => Ok(am::ScalarValue::Null), - Unknown(AMunknownValue { bytes, type_code }) => { - let slice = unsafe { std::slice::from_raw_parts(bytes.src, bytes.count) }; - Ok(am::ScalarValue::Unknown { - bytes: slice.to_vec(), - type_code: *type_code, - }) - } - ActorId(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - ChangeHashes(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - Changes(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - Doc(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - ListItems(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - MapItems(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - ObjId(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - ObjItems(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - Strs(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - SyncMessage(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - SyncState(_) => Err(InvalidValueType { - expected, - unexpected: type_name::().to_string(), - }), - Void => Err(InvalidValueType { - expected, - unexpected: type_name::<()>().to_string(), - }), - } - } -} - -/// \memberof AMvalue -/// \brief Tests the equality of two values. -/// -/// \param[in] value1 A pointer to an `AMvalue` struct. -/// \param[in] value2 A pointer to an `AMvalue` struct. -/// \return `true` if \p value1 `==` \p value2 and `false` otherwise. -/// \pre \p value1 `!= NULL`. -/// \pre \p value2 `!= NULL`. -/// \internal -/// -/// #Safety -/// value1 must be a valid AMvalue pointer -/// value2 must be a valid AMvalue pointer -#[no_mangle] -pub unsafe extern "C" fn AMvalueEqual(value1: *const AMvalue, value2: *const AMvalue) -> bool { - match (value1.as_ref(), value2.as_ref()) { - (Some(value1), Some(value2)) => *value1 == *value2, - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} +use crate::index::AMindex; +use crate::item::AMitem; +use crate::items::AMitems; /// \struct AMresult /// \installed_headerfile /// \brief A discriminated union of result variants. pub enum AMresult { - ActorId(am::ActorId, Option), - ChangeHashes(Vec), - Changes(Vec, Option>), - Doc(Box), + Items(Vec), Error(String), - ListItems(Vec), - MapItems(Vec), - ObjId(AMobjId), - ObjItems(Vec), - String(String), - Strings(Vec), - SyncMessage(AMsyncMessage), - SyncState(Box), - Value(am::Value<'static>), - Void, } impl AMresult { - pub(crate) fn err(s: &str) -> Self { - AMresult::Error(s.to_string()) + pub(crate) fn error(s: &str) -> Self { + Self::Error(s.to_string()) + } + + pub(crate) fn item(item: AMitem) -> Self { + Self::Items(vec![item]) + } + + pub(crate) fn items(items: Vec) -> Self { + Self::Items(items) + } +} + +impl Default for AMresult { + fn default() -> Self { + Self::Items(vec![]) } } impl From for AMresult { fn from(auto_commit: am::AutoCommit) -> Self { - AMresult::Doc(Box::new(AMdoc::new(auto_commit))) + Self::item(AMitem::exact(am::ROOT, auto_commit.into())) + } +} + +impl From for AMresult { + fn from(change: am::Change) -> Self { + Self::item(change.into()) } } impl From for AMresult { fn from(change_hash: am::ChangeHash) -> Self { - AMresult::ChangeHashes(vec![change_hash]) + Self::item(change_hash.into()) } } impl From> for AMresult { - fn from(c: Option) -> Self { - match c { - Some(c) => c.into(), - None => AMresult::Void, + fn from(maybe: Option) -> Self { + match maybe { + Some(change_hash) => change_hash.into(), + None => Self::item(Default::default()), } } } -impl From> for AMresult { - fn from(keys: am::Keys<'_, '_>) -> Self { - AMresult::Strings(keys.collect()) - } -} - -impl From> for AMresult { - fn from(keys: am::KeysAt<'_, '_>) -> Self { - AMresult::Strings(keys.collect()) - } -} - -impl From>> for AMresult { - fn from(list_range: am::ListRange<'static, Range>) -> Self { - AMresult::ListItems( - list_range - .map(|(i, v, o)| AMlistItem::new(i, v.clone(), o)) - .collect(), - ) - } -} - -impl From>> for AMresult { - fn from(list_range: am::ListRangeAt<'static, Range>) -> Self { - AMresult::ListItems( - list_range - .map(|(i, v, o)| AMlistItem::new(i, v.clone(), o)) - .collect(), - ) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRange<'static, Range>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRangeAt<'static, Range>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRange<'static, RangeFrom>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRangeAt<'static, RangeFrom>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From> for AMresult { - fn from(map_range: am::MapRange<'static, RangeFull>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From> for AMresult { - fn from(map_range: am::MapRangeAt<'static, RangeFull>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRange<'static, RangeTo>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) - } -} - -impl From>> for AMresult { - fn from(map_range: am::MapRangeAt<'static, RangeTo>) -> Self { - let map_items: Vec = map_range - .map(|(k, v, o): (&'_ str, am::Value<'_>, am::ObjId)| AMmapItem::new(k, v.clone(), o)) - .collect(); - AMresult::MapItems(map_items) +impl From> for AMresult { + fn from(maybe: Result) -> Self { + match maybe { + Ok(change_hash) => change_hash.into(), + Err(e) => Self::error(&e.to_string()), + } } } impl From for AMresult { fn from(state: am::sync::State) -> Self { - AMresult::SyncState(Box::new(AMsyncState::new(state))) + Self::item(state.into()) } } impl From> for AMresult { fn from(pairs: am::Values<'static>) -> Self { - AMresult::ObjItems(pairs.map(|(v, o)| AMobjItem::new(v.clone(), o)).collect()) - } -} - -impl From, am::ObjId)>, am::AutomergeError>> for AMresult { - fn from(maybe: Result, am::ObjId)>, am::AutomergeError>) -> Self { - match maybe { - Ok(pairs) => AMresult::ObjItems( - pairs - .into_iter() - .map(|(v, o)| AMobjItem::new(v, o)) - .collect(), - ), - Err(e) => AMresult::err(&e.to_string()), - } + Self::items(pairs.map(|(v, o)| AMitem::exact(o, v.into())).collect()) } } @@ -517,37 +89,150 @@ impl From for *mut AMresult { } } +impl From> for AMresult { + fn from(keys: am::Keys<'_, '_>) -> Self { + Self::items(keys.map(|s| s.into()).collect()) + } +} + +impl From> for AMresult { + fn from(keys: am::KeysAt<'_, '_>) -> Self { + Self::items(keys.map(|s| s.into()).collect()) + } +} + +impl From>> for AMresult { + fn from(list_range: am::ListRange<'static, Range>) -> Self { + Self::items( + list_range + .map(|(i, v, o)| AMitem::indexed(AMindex::Pos(i), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(list_range: am::ListRangeAt<'static, Range>) -> Self { + Self::items( + list_range + .map(|(i, v, o)| AMitem::indexed(AMindex::Pos(i), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRange<'static, Range>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRangeAt<'static, Range>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRange<'static, RangeFrom>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRangeAt<'static, RangeFrom>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From> for AMresult { + fn from(map_range: am::MapRange<'static, RangeFull>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From> for AMresult { + fn from(map_range: am::MapRangeAt<'static, RangeFull>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRange<'static, RangeTo>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + +impl From>> for AMresult { + fn from(map_range: am::MapRangeAt<'static, RangeTo>) -> Self { + Self::items( + map_range + .map(|(k, v, o)| AMitem::indexed(AMindex::Key(k.into()), o, v.into())) + .collect(), + ) + } +} + impl From> for AMresult { fn from(maybe: Option<&am::Change>) -> Self { - match maybe { - Some(change) => AMresult::Changes(vec![change.clone()], None), - None => AMresult::Void, - } + Self::item(match maybe { + Some(change) => change.clone().into(), + None => Default::default(), + }) } } impl From> for AMresult { fn from(maybe: Option) -> Self { - match maybe { - Some(message) => AMresult::SyncMessage(AMsyncMessage::new(message)), - None => AMresult::Void, - } + Self::item(match maybe { + Some(message) => message.into(), + None => Default::default(), + }) } } impl From> for AMresult { fn from(maybe: Result<(), am::AutomergeError>) -> Self { match maybe { - Ok(()) => AMresult::Void, - Err(e) => AMresult::err(&e.to_string()), + Ok(()) => Self::item(Default::default()), + Err(e) => Self::error(&e.to_string()), } } } + impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(actor_id) => AMresult::ActorId(actor_id, None), - Err(e) => AMresult::err(&e.to_string()), + Ok(actor_id) => Self::item(actor_id.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -555,8 +240,8 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(actor_id) => AMresult::ActorId(actor_id, None), - Err(e) => AMresult::err(&e.to_string()), + Ok(actor_id) => Self::item(actor_id.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -564,8 +249,8 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(auto_commit) => AMresult::Doc(Box::new(AMdoc::new(auto_commit))), - Err(e) => AMresult::err(&e.to_string()), + Ok(auto_commit) => Self::item(auto_commit.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -573,17 +258,17 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(change) => AMresult::Changes(vec![change], None), - Err(e) => AMresult::err(&e.to_string()), + Ok(change) => Self::item(change.into()), + Err(e) => Self::error(&e.to_string()), } } } -impl From> for AMresult { - fn from(maybe: Result) -> Self { - match maybe { - Ok(obj_id) => AMresult::ObjId(AMobjId::new(obj_id)), - Err(e) => AMresult::err(&e.to_string()), +impl From<(Result, am::ObjType)> for AMresult { + fn from(tuple: (Result, am::ObjType)) -> Self { + match tuple { + (Ok(obj_id), obj_type) => Self::item((obj_id, obj_type).into()), + (Err(e), _) => Self::error(&e.to_string()), } } } @@ -591,8 +276,8 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(message) => AMresult::SyncMessage(AMsyncMessage::new(message)), - Err(e) => AMresult::err(&e.to_string()), + Ok(message) => Self::item(message.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -600,8 +285,8 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(state) => AMresult::SyncState(Box::new(AMsyncState::new(state))), - Err(e) => AMresult::err(&e.to_string()), + Ok(state) => Self::item(state.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -609,8 +294,8 @@ impl From> for AMresult { impl From, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::AutomergeError>) -> Self { match maybe { - Ok(value) => AMresult::Value(value), - Err(e) => AMresult::err(&e.to_string()), + Ok(value) => Self::item(value.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -618,12 +303,9 @@ impl From, am::AutomergeError>> for AMresult { impl From, am::ObjId)>, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::ObjId)>, am::AutomergeError>) -> Self { match maybe { - Ok(Some((value, obj_id))) => match value { - am::Value::Object(_) => AMresult::ObjId(AMobjId::new(obj_id)), - _ => AMresult::Value(value), - }, - Ok(None) => AMresult::Void, - Err(e) => AMresult::err(&e.to_string()), + Ok(Some((value, obj_id))) => Self::item(AMitem::exact(obj_id, value.into())), + Ok(None) => Self::item(Default::default()), + Err(e) => Self::error(&e.to_string()), } } } @@ -631,8 +313,8 @@ impl From, am::ObjId)>, am::AutomergeError>> f impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(string) => AMresult::String(string), - Err(e) => AMresult::err(&e.to_string()), + Ok(string) => Self::item(string.into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -640,8 +322,8 @@ impl From> for AMresult { impl From> for AMresult { fn from(maybe: Result) -> Self { match maybe { - Ok(size) => AMresult::Value(am::Value::uint(size as u64)), - Err(e) => AMresult::err(&e.to_string()), + Ok(size) => Self::item(am::Value::uint(size as u64).into()), + Err(e) => Self::error(&e.to_string()), } } } @@ -649,17 +331,8 @@ impl From> for AMresult { impl From, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::AutomergeError>) -> Self { match maybe { - Ok(changes) => AMresult::Changes(changes, None), - Err(e) => AMresult::err(&e.to_string()), - } - } -} - -impl From, am::LoadChangeError>> for AMresult { - fn from(maybe: Result, am::LoadChangeError>) -> Self { - match maybe { - Ok(changes) => AMresult::Changes(changes, None), - Err(e) => AMresult::err(&e.to_string()), + Ok(changes) => Self::items(changes.into_iter().map(|change| change.into()).collect()), + Err(e) => Self::error(&e.to_string()), } } } @@ -667,12 +340,22 @@ impl From, am::LoadChangeError>> for AMresult { impl From, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::AutomergeError>) -> Self { match maybe { - Ok(changes) => { - let changes: Vec = - changes.iter().map(|&change| change.clone()).collect(); - AMresult::Changes(changes, None) - } - Err(e) => AMresult::err(&e.to_string()), + Ok(changes) => Self::items( + changes + .into_iter() + .map(|change| change.clone().into()) + .collect(), + ), + Err(e) => Self::error(&e.to_string()), + } + } +} + +impl From, am::LoadChangeError>> for AMresult { + fn from(maybe: Result, am::LoadChangeError>) -> Self { + match maybe { + Ok(changes) => Self::items(changes.into_iter().map(|change| change.into()).collect()), + Err(e) => Self::error(&e.to_string()), } } } @@ -680,8 +363,13 @@ impl From, am::AutomergeError>> for AMresult { impl From, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::AutomergeError>) -> Self { match maybe { - Ok(change_hashes) => AMresult::ChangeHashes(change_hashes), - Err(e) => AMresult::err(&e.to_string()), + Ok(change_hashes) => Self::items( + change_hashes + .into_iter() + .map(|change_hash| change_hash.into()) + .collect(), + ), + Err(e) => Self::error(&e.to_string()), } } } @@ -689,8 +377,27 @@ impl From, am::AutomergeError>> for AMresult { impl From, am::InvalidChangeHashSlice>> for AMresult { fn from(maybe: Result, am::InvalidChangeHashSlice>) -> Self { match maybe { - Ok(change_hashes) => AMresult::ChangeHashes(change_hashes), - Err(e) => AMresult::err(&e.to_string()), + Ok(change_hashes) => Self::items( + change_hashes + .into_iter() + .map(|change_hash| change_hash.into()) + .collect(), + ), + Err(e) => Self::error(&e.to_string()), + } + } +} + +impl From, am::ObjId)>, am::AutomergeError>> for AMresult { + fn from(maybe: Result, am::ObjId)>, am::AutomergeError>) -> Self { + match maybe { + Ok(pairs) => Self::items( + pairs + .into_iter() + .map(|(v, o)| AMitem::exact(o, v.into())) + .collect(), + ), + Err(e) => Self::error(&e.to_string()), } } } @@ -698,28 +405,66 @@ impl From, am::InvalidChangeHashSlice>> for AMresult impl From, am::AutomergeError>> for AMresult { fn from(maybe: Result, am::AutomergeError>) -> Self { match maybe { - Ok(bytes) => AMresult::Value(am::Value::bytes(bytes)), - Err(e) => AMresult::err(&e.to_string()), + Ok(bytes) => Self::item(am::Value::bytes(bytes).into()), + Err(e) => Self::error(&e.to_string()), } } } +impl From<&[am::Change]> for AMresult { + fn from(changes: &[am::Change]) -> Self { + Self::items(changes.iter().map(|change| change.clone().into()).collect()) + } +} + impl From> for AMresult { fn from(changes: Vec<&am::Change>) -> Self { - let changes: Vec = changes.iter().map(|&change| change.clone()).collect(); - AMresult::Changes(changes, None) + Self::items( + changes + .into_iter() + .map(|change| change.clone().into()) + .collect(), + ) + } +} + +impl From<&[am::ChangeHash]> for AMresult { + fn from(change_hashes: &[am::ChangeHash]) -> Self { + Self::items( + change_hashes + .iter() + .map(|change_hash| (*change_hash).into()) + .collect(), + ) + } +} + +impl From<&[am::sync::Have]> for AMresult { + fn from(haves: &[am::sync::Have]) -> Self { + Self::items(haves.iter().map(|have| have.clone().into()).collect()) } } impl From> for AMresult { fn from(change_hashes: Vec) -> Self { - AMresult::ChangeHashes(change_hashes) + Self::items( + change_hashes + .into_iter() + .map(|change_hash| change_hash.into()) + .collect(), + ) + } +} + +impl From> for AMresult { + fn from(haves: Vec) -> Self { + Self::items(haves.into_iter().map(|have| have.into()).collect()) } } impl From> for AMresult { fn from(bytes: Vec) -> Self { - AMresult::Value(am::Value::bytes(bytes)) + Self::item(am::Value::bytes(bytes).into()) } } @@ -729,8 +474,9 @@ pub fn to_result>(r: R) -> *mut AMresult { /// \ingroup enumerations /// \enum AMstatus +/// \installed_headerfile /// \brief The status of an API call. -#[derive(Debug)] +#[derive(PartialEq, Eq)] #[repr(u8)] pub enum AMstatus { /// Success. @@ -742,35 +488,80 @@ pub enum AMstatus { InvalidResult, } +/// \memberof AMresult +/// \brief Concatenates the items from two results. +/// +/// \param[in] dest A pointer to an `AMresult` struct. +/// \param[in] src A pointer to an `AMresult` struct. +/// \return A pointer to an `AMresult` struct with the items from \p dest in +/// their original order followed by the items from \p src in their +/// original order. +/// \pre \p dest `!= NULL` +/// \pre \p src `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +/// \internal +/// +/// # Safety +/// dest must be a valid pointer to an AMresult +/// src must be a valid pointer to an AMresult +#[no_mangle] +pub unsafe extern "C" fn AMresultCat(dest: *const AMresult, src: *const AMresult) -> *mut AMresult { + use AMresult::*; + + match (dest.as_ref(), src.as_ref()) { + (Some(dest), Some(src)) => match (dest, src) { + (Items(dest_items), Items(src_items)) => { + return AMresult::items( + dest_items + .iter() + .cloned() + .chain(src_items.iter().cloned()) + .collect(), + ) + .into(); + } + (Error(_), Error(_)) | (Error(_), Items(_)) | (Items(_), Error(_)) => { + AMresult::error("Invalid `AMresult`").into() + } + }, + (None, None) | (None, Some(_)) | (Some(_), None) => { + AMresult::error("Invalid `AMresult*`").into() + } + } +} + /// \memberof AMresult /// \brief Gets a result's error message string. /// /// \param[in] result A pointer to an `AMresult` struct. /// \return A UTF-8 string view as an `AMbyteSpan` struct. -/// \pre \p result `!= NULL`. +/// \pre \p result `!= NULL` /// \internal /// /// # Safety /// result must be a valid pointer to an AMresult #[no_mangle] -pub unsafe extern "C" fn AMerrorMessage(result: *const AMresult) -> AMbyteSpan { - match result.as_ref() { - Some(AMresult::Error(s)) => s.as_bytes().into(), - _ => Default::default(), +pub unsafe extern "C" fn AMresultError(result: *const AMresult) -> AMbyteSpan { + use AMresult::*; + + if let Some(Error(message)) = result.as_ref() { + return message.as_bytes().into(); } + Default::default() } /// \memberof AMresult /// \brief Deallocates the storage for a result. /// -/// \param[in,out] result A pointer to an `AMresult` struct. -/// \pre \p result `!= NULL`. +/// \param[in] result A pointer to an `AMresult` struct. +/// \pre \p result `!= NULL` /// \internal /// /// # Safety /// result must be a valid pointer to an AMresult #[no_mangle] -pub unsafe extern "C" fn AMfree(result: *mut AMresult) { +pub unsafe extern "C" fn AMresultFree(result: *mut AMresult) { if !result.is_null() { let result: AMresult = *Box::from_raw(result); drop(result) @@ -778,39 +569,67 @@ pub unsafe extern "C" fn AMfree(result: *mut AMresult) { } /// \memberof AMresult -/// \brief Gets the size of a result's value. +/// \brief Gets a result's first item. /// /// \param[in] result A pointer to an `AMresult` struct. -/// \return The count of values in \p result. -/// \pre \p result `!= NULL`. +/// \return A pointer to an `AMitem` struct. +/// \pre \p result `!= NULL` +/// \internal +/// +/// # Safety +/// result must be a valid pointer to an AMresult +#[no_mangle] +pub unsafe extern "C" fn AMresultItem(result: *mut AMresult) -> *mut AMitem { + use AMresult::*; + + if let Some(Items(items)) = result.as_mut() { + if !items.is_empty() { + return &mut items[0]; + } + } + std::ptr::null_mut() +} + +/// \memberof AMresult +/// \brief Gets a result's items. +/// +/// \param[in] result A pointer to an `AMresult` struct. +/// \return An `AMitems` struct. +/// \pre \p result `!= NULL` +/// \internal +/// +/// # Safety +/// result must be a valid pointer to an AMresult +#[no_mangle] +pub unsafe extern "C" fn AMresultItems<'a>(result: *mut AMresult) -> AMitems<'a> { + use AMresult::*; + + if let Some(Items(items)) = result.as_mut() { + if !items.is_empty() { + return AMitems::new(items); + } + } + Default::default() +} + +/// \memberof AMresult +/// \brief Gets the size of a result. +/// +/// \param[in] result A pointer to an `AMresult` struct. +/// \return The count of items within \p result. +/// \pre \p result `!= NULL` /// \internal /// /// # Safety /// result must be a valid pointer to an AMresult #[no_mangle] pub unsafe extern "C" fn AMresultSize(result: *const AMresult) -> usize { - if let Some(result) = result.as_ref() { - use AMresult::*; + use self::AMresult::*; - match result { - Error(_) | Void => 0, - ActorId(_, _) - | Doc(_) - | ObjId(_) - | String(_) - | SyncMessage(_) - | SyncState(_) - | Value(_) => 1, - ChangeHashes(change_hashes) => change_hashes.len(), - Changes(changes, _) => changes.len(), - ListItems(list_items) => list_items.len(), - MapItems(map_items) => map_items.len(), - ObjItems(obj_items) => obj_items.len(), - Strings(cstrings) => cstrings.len(), - } - } else { - 0 + if let Some(Items(items)) = result.as_ref() { + return items.len(); } + 0 } /// \memberof AMresult @@ -818,94 +637,24 @@ pub unsafe extern "C" fn AMresultSize(result: *const AMresult) -> usize { /// /// \param[in] result A pointer to an `AMresult` struct. /// \return An `AMstatus` enum tag. -/// \pre \p result `!= NULL`. +/// \pre \p result `!= NULL` /// \internal /// /// # Safety /// result must be a valid pointer to an AMresult #[no_mangle] pub unsafe extern "C" fn AMresultStatus(result: *const AMresult) -> AMstatus { - match result.as_ref() { - Some(AMresult::Error(_)) => AMstatus::Error, - None => AMstatus::InvalidResult, - _ => AMstatus::Ok, - } -} + use AMresult::*; -/// \memberof AMresult -/// \brief Gets a result's value. -/// -/// \param[in] result A pointer to an `AMresult` struct. -/// \return An `AMvalue` struct. -/// \pre \p result `!= NULL`. -/// \internal -/// -/// # Safety -/// result must be a valid pointer to an AMresult -#[no_mangle] -pub unsafe extern "C" fn AMresultValue<'a>(result: *mut AMresult) -> AMvalue<'a> { - let mut content = AMvalue::Void; - if let Some(result) = result.as_mut() { + if let Some(result) = result.as_ref() { match result { - AMresult::ActorId(actor_id, c_actor_id) => match c_actor_id { - None => { - content = AMvalue::ActorId(&*c_actor_id.insert(AMactorId::new(&*actor_id))); - } - Some(c_actor_id) => { - content = AMvalue::ActorId(&*c_actor_id); - } - }, - AMresult::ChangeHashes(change_hashes) => { - content = AMvalue::ChangeHashes(AMchangeHashes::new(change_hashes)); + Error(_) => { + return AMstatus::Error; } - AMresult::Changes(changes, storage) => { - content = AMvalue::Changes(AMchanges::new( - changes, - storage.get_or_insert(BTreeMap::new()), - )); + _ => { + return AMstatus::Ok; } - AMresult::Doc(doc) => content = AMvalue::Doc(&mut **doc), - AMresult::Error(_) => {} - AMresult::ListItems(list_items) => { - content = AMvalue::ListItems(AMlistItems::new(list_items)); - } - AMresult::MapItems(map_items) => { - content = AMvalue::MapItems(AMmapItems::new(map_items)); - } - AMresult::ObjId(obj_id) => { - content = AMvalue::ObjId(obj_id); - } - AMresult::ObjItems(obj_items) => { - content = AMvalue::ObjItems(AMobjItems::new(obj_items)); - } - AMresult::String(string) => content = AMvalue::Str(string.as_bytes().into()), - AMresult::Strings(strings) => { - content = AMvalue::Strs(AMstrs::new(strings)); - } - AMresult::SyncMessage(sync_message) => { - content = AMvalue::SyncMessage(sync_message); - } - AMresult::SyncState(sync_state) => { - content = AMvalue::SyncState(&mut *sync_state); - } - AMresult::Value(value) => { - content = (&*value).into(); - } - AMresult::Void => {} } - }; - content -} - -/// \struct AMunknownValue -/// \installed_headerfile -/// \brief A value (typically for a `set` operation) whose type is unknown. -/// -#[derive(Eq, PartialEq)] -#[repr(C)] -pub struct AMunknownValue { - /// The value's raw bytes. - bytes: AMbyteSpan, - /// The value's encoded type identifier. - type_code: u8, + } + AMstatus::InvalidResult } diff --git a/rust/automerge-c/src/result_stack.rs b/rust/automerge-c/src/result_stack.rs deleted file mode 100644 index cfb9c7d2..00000000 --- a/rust/automerge-c/src/result_stack.rs +++ /dev/null @@ -1,156 +0,0 @@ -use crate::result::{AMfree, AMresult, AMresultStatus, AMresultValue, AMstatus, AMvalue}; - -/// \struct AMresultStack -/// \installed_headerfile -/// \brief A node in a singly-linked list of result pointers. -/// -/// \note Using this data structure is purely optional because its only purpose -/// is to make memory management tolerable for direct usage of this API -/// in C, C++ and Objective-C. -#[repr(C)] -pub struct AMresultStack { - /// A result to be deallocated. - pub result: *mut AMresult, - /// The next node in the singly-linked list or `NULL`. - pub next: *mut AMresultStack, -} - -impl AMresultStack { - pub fn new(result: *mut AMresult, next: *mut AMresultStack) -> Self { - Self { result, next } - } -} - -/// \memberof AMresultStack -/// \brief Deallocates the storage for a stack of results. -/// -/// \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. -/// \return The number of `AMresult` structs freed. -/// \pre \p stack `!= NULL`. -/// \post `*stack == NULL`. -/// \note Calling this function is purely optional because its only purpose is -/// to make memory management tolerable for direct usage of this API in -/// C, C++ and Objective-C. -/// \internal -/// -/// # Safety -/// stack must be a valid AMresultStack pointer pointer -#[no_mangle] -pub unsafe extern "C" fn AMfreeStack(stack: *mut *mut AMresultStack) -> usize { - if stack.is_null() { - return 0; - } - let mut count: usize = 0; - while !(*stack).is_null() { - AMfree(AMpop(stack)); - count += 1; - } - count -} - -/// \memberof AMresultStack -/// \brief Gets the topmost result from the stack after removing it. -/// -/// \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. -/// \return A pointer to an `AMresult` struct or `NULL`. -/// \pre \p stack `!= NULL`. -/// \post `*stack == NULL`. -/// \note Calling this function is purely optional because its only purpose is -/// to make memory management tolerable for direct usage of this API in -/// C, C++ and Objective-C. -/// \internal -/// -/// # Safety -/// stack must be a valid AMresultStack pointer pointer -#[no_mangle] -pub unsafe extern "C" fn AMpop(stack: *mut *mut AMresultStack) -> *mut AMresult { - if stack.is_null() || (*stack).is_null() { - return std::ptr::null_mut(); - } - let top = Box::from_raw(*stack); - *stack = top.next; - let result = top.result; - drop(top); - result -} - -/// \memberof AMresultStack -/// \brief The prototype of a function to be called when a value matching the -/// given discriminant cannot be extracted from the result at the top of -/// the given stack. -/// -/// \note Implementing this function is purely optional because its only purpose -/// is to make memory management tolerable for direct usage of this API -/// in C, C++ and Objective-C. -pub type AMpushCallback = - Option ()>; - -/// \memberof AMresultStack -/// \brief Pushes the given result onto the given stack and then either extracts -/// a value matching the given discriminant from that result or, -/// failing that, calls the given function and gets a void value instead. -/// -/// \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. -/// \param[in] result A pointer to an `AMresult` struct. -/// \param[in] discriminant An `AMvalue` variant's corresponding enum tag. -/// \param[in] callback A pointer to a function with the same signature as -/// `AMpushCallback()` or `NULL`. -/// \return An `AMvalue` struct. -/// \pre \p stack `!= NULL`. -/// \pre \p result `!= NULL`. -/// \warning If \p stack `== NULL` then \p result is deallocated in order to -/// prevent a memory leak. -/// \note Calling this function is purely optional because its only purpose is -/// to make memory management tolerable for direct usage of this API in -/// C, C++ and Objective-C. -/// \internal -/// -/// # Safety -/// stack must be a valid AMresultStack pointer pointer -/// result must be a valid AMresult pointer -#[no_mangle] -pub unsafe extern "C" fn AMpush<'a>( - stack: *mut *mut AMresultStack, - result: *mut AMresult, - discriminant: u8, - callback: AMpushCallback, -) -> AMvalue<'a> { - if stack.is_null() { - // There's no stack to push the result onto so it has to be freed in - // order to prevent a memory leak. - AMfree(result); - if let Some(callback) = callback { - callback(stack, discriminant); - } - return AMvalue::Void; - } else if result.is_null() { - if let Some(callback) = callback { - callback(stack, discriminant); - } - return AMvalue::Void; - } - // Always push the result onto the stack, even if it's wrong, so that the - // given callback can retrieve it. - let node = Box::new(AMresultStack::new(result, *stack)); - let top = Box::into_raw(node); - *stack = top; - // Test that the result contains a value. - match AMresultStatus(result) { - AMstatus::Ok => {} - _ => { - if let Some(callback) = callback { - callback(stack, discriminant); - } - return AMvalue::Void; - } - } - // Test that the result's value matches the given discriminant. - let value = AMresultValue(result); - if discriminant != u8::from(&value) { - if let Some(callback) = callback { - callback(stack, discriminant); - } - return AMvalue::Void; - } - value -} diff --git a/rust/automerge-c/src/strs.rs b/rust/automerge-c/src/strs.rs deleted file mode 100644 index a36861b7..00000000 --- a/rust/automerge-c/src/strs.rs +++ /dev/null @@ -1,359 +0,0 @@ -use std::cmp::Ordering; -use std::ffi::c_void; -use std::mem::size_of; -use std::os::raw::c_char; - -use crate::byte_span::AMbyteSpan; - -/// \brief Creates a string view from a C string. -/// -/// \param[in] c_str A UTF-8 C string. -/// \return A UTF-8 string view as an `AMbyteSpan` struct. -/// \internal -/// -/// #Safety -/// c_str must be a null-terminated array of `c_char` -#[no_mangle] -pub unsafe extern "C" fn AMstr(c_str: *const c_char) -> AMbyteSpan { - c_str.into() -} - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new(strings: &[String], offset: isize) -> Self { - Self { - len: strings.len(), - offset, - ptr: strings.as_ptr() as *const c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option { - if self.is_stopped() { - return None; - } - let slice: &[String] = - unsafe { std::slice::from_raw_parts(self.ptr as *const String, self.len) }; - let value = slice[self.get_index()].as_bytes().into(); - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[String] = - unsafe { std::slice::from_raw_parts(self.ptr as *const String, self.len) }; - Some(slice[self.get_index()].as_bytes().into()) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) - .try_into() - .unwrap() - } - } -} - -/// \struct AMstrs -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of UTF-8 strings. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMstrs { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_], -} - -impl AMstrs { - pub fn new(strings: &[String]) -> Self { - Self { - detail: Detail::new(strings, 0).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[String]> for AMstrs { - fn as_ref(&self) -> &[String] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const String, detail.len) } - } -} - -impl Default for AMstrs { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMstrs -/// \brief Advances an iterator over a sequence of UTF-8 strings by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] strs A pointer to an `AMstrs` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsAdvance(strs: *mut AMstrs, n: isize) { - if let Some(strs) = strs.as_mut() { - strs.advance(n); - }; -} - -/// \memberof AMstrs -/// \brief Compares the sequences of UTF-8 strings underlying a pair of -/// iterators. -/// -/// \param[in] strs1 A pointer to an `AMstrs` struct. -/// \param[in] strs2 A pointer to an `AMstrs` struct. -/// \return `-1` if \p strs1 `<` \p strs2, `0` if -/// \p strs1 `==` \p strs2 and `1` if -/// \p strs1 `>` \p strs2. -/// \pre \p strs1 `!= NULL`. -/// \pre \p strs2 `!= NULL`. -/// \internal -/// -/// #Safety -/// strs1 must be a valid pointer to an AMstrs -/// strs2 must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsCmp(strs1: *const AMstrs, strs2: *const AMstrs) -> isize { - match (strs1.as_ref(), strs2.as_ref()) { - (Some(strs1), Some(strs2)) => match strs1.as_ref().cmp(strs2.as_ref()) { - Ordering::Less => -1, - Ordering::Equal => 0, - Ordering::Greater => 1, - }, - (None, Some(_)) => -1, - (Some(_), None) => 1, - (None, None) => 0, - } -} - -/// \memberof AMstrs -/// \brief Gets the key at the current position of an iterator over a sequence -/// of UTF-8 strings and then advances it by at most \p |n| positions -/// where the sign of \p n is relative to the iterator's direction. -/// -/// \param[in,out] strs A pointer to an `AMstrs` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A UTF-8 string view as an `AMbyteSpan` struct that's `AMstr(NULL)` -/// when \p strs was previously advanced past its forward/reverse limit. -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsNext(strs: *mut AMstrs, n: isize) -> AMbyteSpan { - if let Some(strs) = strs.as_mut() { - if let Some(key) = strs.next(n) { - return key; - } - } - Default::default() -} - -/// \memberof AMstrs -/// \brief Advances an iterator over a sequence of UTF-8 strings by at most -/// \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the key at its new position. -/// -/// \param[in,out] strs A pointer to an `AMstrs` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A UTF-8 string view as an `AMbyteSpan` struct that's `AMstr(NULL)` -/// when \p strs is presently advanced past its forward/reverse limit. -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsPrev(strs: *mut AMstrs, n: isize) -> AMbyteSpan { - if let Some(strs) = strs.as_mut() { - if let Some(key) = strs.prev(n) { - return key; - } - } - Default::default() -} - -/// \memberof AMstrs -/// \brief Gets the size of the sequence of UTF-8 strings underlying an -/// iterator. -/// -/// \param[in] strs A pointer to an `AMstrs` struct. -/// \return The count of values in \p strs. -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsSize(strs: *const AMstrs) -> usize { - if let Some(strs) = strs.as_ref() { - strs.len() - } else { - 0 - } -} - -/// \memberof AMstrs -/// \brief Creates an iterator over the same sequence of UTF-8 strings as the -/// given one but with the opposite position and direction. -/// -/// \param[in] strs A pointer to an `AMstrs` struct. -/// \return An `AMstrs` struct. -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsReversed(strs: *const AMstrs) -> AMstrs { - if let Some(strs) = strs.as_ref() { - strs.reversed() - } else { - AMstrs::default() - } -} - -/// \memberof AMstrs -/// \brief Creates an iterator at the starting position over the same sequence -/// of UTF-8 strings as the given one. -/// -/// \param[in] strs A pointer to an `AMstrs` struct. -/// \return An `AMstrs` struct -/// \pre \p strs `!= NULL`. -/// \internal -/// -/// #Safety -/// strs must be a valid pointer to an AMstrs -#[no_mangle] -pub unsafe extern "C" fn AMstrsRewound(strs: *const AMstrs) -> AMstrs { - if let Some(strs) = strs.as_ref() { - strs.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/sync.rs b/rust/automerge-c/src/sync.rs index cfed1af5..fe0332a1 100644 --- a/rust/automerge-c/src/sync.rs +++ b/rust/automerge-c/src/sync.rs @@ -1,7 +1,7 @@ mod have; -mod haves; mod message; mod state; +pub(crate) use have::AMsyncHave; pub(crate) use message::{to_sync_message, AMsyncMessage}; pub(crate) use state::AMsyncState; diff --git a/rust/automerge-c/src/sync/have.rs b/rust/automerge-c/src/sync/have.rs index 312151e7..37d2031f 100644 --- a/rust/automerge-c/src/sync/have.rs +++ b/rust/automerge-c/src/sync/have.rs @@ -1,23 +1,23 @@ use automerge as am; -use crate::change_hashes::AMchangeHashes; +use crate::result::{to_result, AMresult}; /// \struct AMsyncHave /// \installed_headerfile /// \brief A summary of the changes that the sender of a synchronization /// message already has. #[derive(Clone, Eq, PartialEq)] -pub struct AMsyncHave(*const am::sync::Have); +pub struct AMsyncHave(am::sync::Have); impl AMsyncHave { - pub fn new(have: &am::sync::Have) -> Self { + pub fn new(have: am::sync::Have) -> Self { Self(have) } } impl AsRef for AMsyncHave { fn as_ref(&self) -> &am::sync::Have { - unsafe { &*self.0 } + &self.0 } } @@ -25,17 +25,18 @@ impl AsRef for AMsyncHave { /// \brief Gets the heads of the sender. /// /// \param[in] sync_have A pointer to an `AMsyncHave` struct. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_have `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_have `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_have must be a valid pointer to an AMsyncHave #[no_mangle] -pub unsafe extern "C" fn AMsyncHaveLastSync(sync_have: *const AMsyncHave) -> AMchangeHashes { - if let Some(sync_have) = sync_have.as_ref() { - AMchangeHashes::new(&sync_have.as_ref().last_sync) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncHaveLastSync(sync_have: *const AMsyncHave) -> *mut AMresult { + to_result(match sync_have.as_ref() { + Some(sync_have) => sync_have.as_ref().last_sync.as_slice(), + None => Default::default(), + }) } diff --git a/rust/automerge-c/src/sync/haves.rs b/rust/automerge-c/src/sync/haves.rs deleted file mode 100644 index c74b8e96..00000000 --- a/rust/automerge-c/src/sync/haves.rs +++ /dev/null @@ -1,378 +0,0 @@ -use automerge as am; -use std::collections::BTreeMap; -use std::ffi::c_void; -use std::mem::size_of; - -use crate::sync::have::AMsyncHave; - -#[repr(C)] -struct Detail { - len: usize, - offset: isize, - ptr: *const c_void, - storage: *mut c_void, -} - -/// \note cbindgen won't propagate the value of a `std::mem::size_of()` call -/// (https://github.com/eqrion/cbindgen/issues/252) but it will -/// propagate the name of a constant initialized from it so if the -/// constant's name is a symbolic representation of the value it can be -/// converted into a number by post-processing the header it generated. -pub const USIZE_USIZE_USIZE_USIZE_: usize = size_of::(); - -impl Detail { - fn new( - haves: &[am::sync::Have], - offset: isize, - storage: &mut BTreeMap, - ) -> Self { - let storage: *mut BTreeMap = storage; - Self { - len: haves.len(), - offset, - ptr: haves.as_ptr() as *const c_void, - storage: storage as *mut c_void, - } - } - - pub fn advance(&mut self, n: isize) { - if n == 0 { - return; - } - let len = self.len as isize; - self.offset = if self.offset < 0 { - // It's reversed. - let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); - if unclipped >= 0 { - // Clip it to the forward stop. - len - } else { - std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) - } - } else { - let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); - if unclipped < 0 { - // Clip it to the reverse stop. - -(len + 1) - } else { - std::cmp::max(0, std::cmp::min(unclipped, len)) - } - } - } - - pub fn get_index(&self) -> usize { - (self.offset - + if self.offset < 0 { - self.len as isize - } else { - 0 - }) as usize - } - - pub fn next(&mut self, n: isize) -> Option<*const AMsyncHave> { - if self.is_stopped() { - return None; - } - let slice: &[am::sync::Have] = - unsafe { std::slice::from_raw_parts(self.ptr as *const am::sync::Have, self.len) }; - let storage = unsafe { &mut *(self.storage as *mut BTreeMap) }; - let index = self.get_index(); - let value = match storage.get_mut(&index) { - Some(value) => value, - None => { - storage.insert(index, AMsyncHave::new(&slice[index])); - storage.get_mut(&index).unwrap() - } - }; - self.advance(n); - Some(value) - } - - pub fn is_stopped(&self) -> bool { - let len = self.len as isize; - self.offset < -len || self.offset == len - } - - pub fn prev(&mut self, n: isize) -> Option<*const AMsyncHave> { - self.advance(-n); - if self.is_stopped() { - return None; - } - let slice: &[am::sync::Have] = - unsafe { std::slice::from_raw_parts(self.ptr as *const am::sync::Have, self.len) }; - let storage = unsafe { &mut *(self.storage as *mut BTreeMap) }; - let index = self.get_index(); - Some(match storage.get_mut(&index) { - Some(value) => value, - None => { - storage.insert(index, AMsyncHave::new(&slice[index])); - storage.get_mut(&index).unwrap() - } - }) - } - - pub fn reversed(&self) -> Self { - Self { - len: self.len, - offset: -(self.offset + 1), - ptr: self.ptr, - storage: self.storage, - } - } - - pub fn rewound(&self) -> Self { - Self { - len: self.len, - offset: if self.offset < 0 { -1 } else { 0 }, - ptr: self.ptr, - storage: self.storage, - } - } -} - -impl From for [u8; USIZE_USIZE_USIZE_USIZE_] { - fn from(detail: Detail) -> Self { - unsafe { - std::slice::from_raw_parts( - (&detail as *const Detail) as *const u8, - USIZE_USIZE_USIZE_USIZE_, - ) - .try_into() - .unwrap() - } - } -} - -/// \struct AMsyncHaves -/// \installed_headerfile -/// \brief A random-access iterator over a sequence of synchronization haves. -#[repr(C)] -#[derive(Eq, PartialEq)] -pub struct AMsyncHaves { - /// An implementation detail that is intentionally opaque. - /// \warning Modifying \p detail will cause undefined behavior. - /// \note The actual size of \p detail will vary by platform, this is just - /// the one for the platform this documentation was built on. - detail: [u8; USIZE_USIZE_USIZE_USIZE_], -} - -impl AMsyncHaves { - pub fn new(haves: &[am::sync::Have], storage: &mut BTreeMap) -> Self { - Self { - detail: Detail::new(haves, 0, storage).into(), - } - } - - pub fn advance(&mut self, n: isize) { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.advance(n); - } - - pub fn len(&self) -> usize { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - detail.len - } - - pub fn next(&mut self, n: isize) -> Option<*const AMsyncHave> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.next(n) - } - - pub fn prev(&mut self, n: isize) -> Option<*const AMsyncHave> { - let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; - detail.prev(n) - } - - pub fn reversed(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.reversed().into(), - } - } - - pub fn rewound(&self) -> Self { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - Self { - detail: detail.rewound().into(), - } - } -} - -impl AsRef<[am::sync::Have]> for AMsyncHaves { - fn as_ref(&self) -> &[am::sync::Have] { - let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; - unsafe { std::slice::from_raw_parts(detail.ptr as *const am::sync::Have, detail.len) } - } -} - -impl Default for AMsyncHaves { - fn default() -> Self { - Self { - detail: [0; USIZE_USIZE_USIZE_USIZE_], - } - } -} - -/// \memberof AMsyncHaves -/// \brief Advances an iterator over a sequence of synchronization haves by at -/// most \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] sync_haves A pointer to an `AMsyncHaves` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesAdvance(sync_haves: *mut AMsyncHaves, n: isize) { - if let Some(sync_haves) = sync_haves.as_mut() { - sync_haves.advance(n); - }; -} - -/// \memberof AMsyncHaves -/// \brief Tests the equality of two sequences of synchronization haves -/// underlying a pair of iterators. -/// -/// \param[in] sync_haves1 A pointer to an `AMsyncHaves` struct. -/// \param[in] sync_haves2 A pointer to an `AMsyncHaves` struct. -/// \return `true` if \p sync_haves1 `==` \p sync_haves2 and `false` otherwise. -/// \pre \p sync_haves1 `!= NULL`. -/// \pre \p sync_haves2 `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves1 must be a valid pointer to an AMsyncHaves -/// sync_haves2 must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesEqual( - sync_haves1: *const AMsyncHaves, - sync_haves2: *const AMsyncHaves, -) -> bool { - match (sync_haves1.as_ref(), sync_haves2.as_ref()) { - (Some(sync_haves1), Some(sync_haves2)) => sync_haves1.as_ref() == sync_haves2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, - } -} - -/// \memberof AMsyncHaves -/// \brief Gets the synchronization have at the current position of an iterator -/// over a sequence of synchronization haves and then advances it by at -/// most \p |n| positions where the sign of \p n is relative to the -/// iterator's direction. -/// -/// \param[in,out] sync_haves A pointer to an `AMsyncHaves` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMsyncHave` struct that's `NULL` when -/// \p sync_haves was previously advanced past its forward/reverse -/// limit. -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesNext( - sync_haves: *mut AMsyncHaves, - n: isize, -) -> *const AMsyncHave { - if let Some(sync_haves) = sync_haves.as_mut() { - if let Some(sync_have) = sync_haves.next(n) { - return sync_have; - } - } - std::ptr::null() -} - -/// \memberof AMsyncHaves -/// \brief Advances an iterator over a sequence of synchronization haves by at -/// most \p |n| positions where the sign of \p n is relative to the -/// iterator's direction and then gets the synchronization have at its -/// new position. -/// -/// \param[in,out] sync_haves A pointer to an `AMsyncHaves` struct. -/// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum -/// number of positions to advance. -/// \return A pointer to an `AMsyncHave` struct that's `NULL` when -/// \p sync_haves is presently advanced past its forward/reverse limit. -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesPrev( - sync_haves: *mut AMsyncHaves, - n: isize, -) -> *const AMsyncHave { - if let Some(sync_haves) = sync_haves.as_mut() { - if let Some(sync_have) = sync_haves.prev(n) { - return sync_have; - } - } - std::ptr::null() -} - -/// \memberof AMsyncHaves -/// \brief Gets the size of the sequence of synchronization haves underlying an -/// iterator. -/// -/// \param[in] sync_haves A pointer to an `AMsyncHaves` struct. -/// \return The count of values in \p sync_haves. -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesSize(sync_haves: *const AMsyncHaves) -> usize { - if let Some(sync_haves) = sync_haves.as_ref() { - sync_haves.len() - } else { - 0 - } -} - -/// \memberof AMsyncHaves -/// \brief Creates an iterator over the same sequence of synchronization haves -/// as the given one but with the opposite position and direction. -/// -/// \param[in] sync_haves A pointer to an `AMsyncHaves` struct. -/// \return An `AMsyncHaves` struct -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesReversed(sync_haves: *const AMsyncHaves) -> AMsyncHaves { - if let Some(sync_haves) = sync_haves.as_ref() { - sync_haves.reversed() - } else { - Default::default() - } -} - -/// \memberof AMsyncHaves -/// \brief Creates an iterator at the starting position over the same sequence -/// of synchronization haves as the given one. -/// -/// \param[in] sync_haves A pointer to an `AMsyncHaves` struct. -/// \return An `AMsyncHaves` struct -/// \pre \p sync_haves `!= NULL`. -/// \internal -/// -/// #Safety -/// sync_haves must be a valid pointer to an AMsyncHaves -#[no_mangle] -pub unsafe extern "C" fn AMsyncHavesRewound(sync_haves: *const AMsyncHaves) -> AMsyncHaves { - if let Some(sync_haves) = sync_haves.as_ref() { - sync_haves.rewound() - } else { - Default::default() - } -} diff --git a/rust/automerge-c/src/sync/message.rs b/rust/automerge-c/src/sync/message.rs index 46a6d29a..bdb1db34 100644 --- a/rust/automerge-c/src/sync/message.rs +++ b/rust/automerge-c/src/sync/message.rs @@ -3,18 +3,15 @@ use std::cell::RefCell; use std::collections::BTreeMap; use crate::change::AMchange; -use crate::change_hashes::AMchangeHashes; -use crate::changes::AMchanges; use crate::result::{to_result, AMresult}; use crate::sync::have::AMsyncHave; -use crate::sync::haves::AMsyncHaves; macro_rules! to_sync_message { ($handle:expr) => {{ let handle = $handle.as_ref(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMsyncMessage pointer").into(), + None => return AMresult::error("Invalid `AMsyncMessage*`").into(), } }}; } @@ -51,55 +48,52 @@ impl AsRef for AMsyncMessage { /// \brief Gets the changes for the recipient to apply. /// /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return An `AMchanges` struct. -/// \pre \p sync_message `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE` items. +/// \pre \p sync_message `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_message must be a valid pointer to an AMsyncMessage #[no_mangle] -pub unsafe extern "C" fn AMsyncMessageChanges(sync_message: *const AMsyncMessage) -> AMchanges { - if let Some(sync_message) = sync_message.as_ref() { - AMchanges::new( - &sync_message.body.changes, - &mut sync_message.changes_storage.borrow_mut(), - ) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncMessageChanges(sync_message: *const AMsyncMessage) -> *mut AMresult { + to_result(match sync_message.as_ref() { + Some(sync_message) => sync_message.body.changes.as_slice(), + None => Default::default(), + }) } /// \memberof AMsyncMessage -/// \brief Decodes a sequence of bytes into a synchronization message. +/// \brief Decodes an array of bytes into a synchronization message. /// /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to decode. -/// \return A pointer to an `AMresult` struct containing an `AMsyncMessage` -/// struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to decode from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_SYNC_MESSAGE` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] pub unsafe extern "C" fn AMsyncMessageDecode(src: *const u8, count: usize) -> *mut AMresult { - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result(am::sync::Message::decode(&data)) + let data = std::slice::from_raw_parts(src, count); + to_result(am::sync::Message::decode(data)) } /// \memberof AMsyncMessage -/// \brief Encodes a synchronization message as a sequence of bytes. +/// \brief Encodes a synchronization message as an array of bytes. /// /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return A pointer to an `AMresult` struct containing an array of bytes as -/// an `AMbyteSpan` struct. -/// \pre \p sync_message `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BYTES` item. +/// \pre \p sync_message `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -114,41 +108,40 @@ pub unsafe extern "C" fn AMsyncMessageEncode(sync_message: *const AMsyncMessage) /// \brief Gets a summary of the changes that the sender already has. /// /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return An `AMhaves` struct. -/// \pre \p sync_message `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_SYNC_HAVE` items. +/// \pre \p sync_message `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_message must be a valid pointer to an AMsyncMessage #[no_mangle] -pub unsafe extern "C" fn AMsyncMessageHaves(sync_message: *const AMsyncMessage) -> AMsyncHaves { - if let Some(sync_message) = sync_message.as_ref() { - AMsyncHaves::new( - &sync_message.as_ref().have, - &mut sync_message.haves_storage.borrow_mut(), - ) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncMessageHaves(sync_message: *const AMsyncMessage) -> *mut AMresult { + to_result(match sync_message.as_ref() { + Some(sync_message) => sync_message.as_ref().have.as_slice(), + None => Default::default(), + }) } /// \memberof AMsyncMessage /// \brief Gets the heads of the sender. /// /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_message `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_message `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_message must be a valid pointer to an AMsyncMessage #[no_mangle] -pub unsafe extern "C" fn AMsyncMessageHeads(sync_message: *const AMsyncMessage) -> AMchangeHashes { - if let Some(sync_message) = sync_message.as_ref() { - AMchangeHashes::new(&sync_message.as_ref().heads) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncMessageHeads(sync_message: *const AMsyncMessage) -> *mut AMresult { + to_result(match sync_message.as_ref() { + Some(sync_message) => sync_message.as_ref().heads.as_slice(), + None => Default::default(), + }) } /// \memberof AMsyncMessage @@ -156,17 +149,18 @@ pub unsafe extern "C" fn AMsyncMessageHeads(sync_message: *const AMsyncMessage) /// by the recipient. /// /// \param[in] sync_message A pointer to an `AMsyncMessage` struct. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_message `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_message `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_message must be a valid pointer to an AMsyncMessage #[no_mangle] -pub unsafe extern "C" fn AMsyncMessageNeeds(sync_message: *const AMsyncMessage) -> AMchangeHashes { - if let Some(sync_message) = sync_message.as_ref() { - AMchangeHashes::new(&sync_message.as_ref().need) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncMessageNeeds(sync_message: *const AMsyncMessage) -> *mut AMresult { + to_result(match sync_message.as_ref() { + Some(sync_message) => sync_message.as_ref().need.as_slice(), + None => Default::default(), + }) } diff --git a/rust/automerge-c/src/sync/state.rs b/rust/automerge-c/src/sync/state.rs index 1c1d316f..1d85ed98 100644 --- a/rust/automerge-c/src/sync/state.rs +++ b/rust/automerge-c/src/sync/state.rs @@ -2,17 +2,15 @@ use automerge as am; use std::cell::RefCell; use std::collections::BTreeMap; -use crate::change_hashes::AMchangeHashes; use crate::result::{to_result, AMresult}; use crate::sync::have::AMsyncHave; -use crate::sync::haves::AMsyncHaves; macro_rules! to_sync_state { ($handle:expr) => {{ let handle = $handle.as_ref(); match handle { Some(b) => b, - None => return AMresult::err("Invalid AMsyncState pointer").into(), + None => return AMresult::error("Invalid `AMsyncState*`").into(), } }}; } @@ -56,36 +54,35 @@ impl From for *mut AMsyncState { } /// \memberof AMsyncState -/// \brief Decodes a sequence of bytes into a synchronization state. +/// \brief Decodes an array of bytes into a synchronization state. /// /// \param[in] src A pointer to an array of bytes. -/// \param[in] count The number of bytes in \p src to decode. -/// \return A pointer to an `AMresult` struct containing an `AMsyncState` -/// struct. -/// \pre \p src `!= NULL`. -/// \pre `0 <` \p count `<= sizeof(`\p src`)`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \param[in] count The count of bytes to decode from the array pointed to by +/// \p src. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_SYNC_STATE` item. +/// \pre \p src `!= NULL` +/// \pre `sizeof(`\p src `) > 0` +/// \pre \p count `<= sizeof(`\p src `)` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety -/// src must be a byte array of size `>= count` +/// src must be a byte array of length `>= count` #[no_mangle] pub unsafe extern "C" fn AMsyncStateDecode(src: *const u8, count: usize) -> *mut AMresult { - let mut data = Vec::new(); - data.extend_from_slice(std::slice::from_raw_parts(src, count)); - to_result(am::sync::State::decode(&data)) + let data = std::slice::from_raw_parts(src, count); + to_result(am::sync::State::decode(data)) } /// \memberof AMsyncState -/// \brief Encodes a synchronizaton state as a sequence of bytes. +/// \brief Encodes a synchronization state as an array of bytes. /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. -/// \return A pointer to an `AMresult` struct containing an array of bytes as -/// an `AMbyteSpan` struct. -/// \pre \p sync_state `!= NULL`. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_BYTE_SPAN` item. +/// \pre \p sync_state `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety @@ -102,8 +99,9 @@ pub unsafe extern "C" fn AMsyncStateEncode(sync_state: *const AMsyncState) -> *m /// \param[in] sync_state1 A pointer to an `AMsyncState` struct. /// \param[in] sync_state2 A pointer to an `AMsyncState` struct. /// \return `true` if \p sync_state1 `==` \p sync_state2 and `false` otherwise. -/// \pre \p sync_state1 `!= NULL`. -/// \pre \p sync_state2 `!= NULL`. +/// \pre \p sync_state1 `!= NULL` +/// \pre \p sync_state2 `!= NULL` +/// \post `!(`\p sync_state1 `&&` \p sync_state2 `) -> false` /// \internal /// /// #Safety @@ -116,18 +114,17 @@ pub unsafe extern "C" fn AMsyncStateEqual( ) -> bool { match (sync_state1.as_ref(), sync_state2.as_ref()) { (Some(sync_state1), Some(sync_state2)) => sync_state1.as_ref() == sync_state2.as_ref(), - (None, Some(_)) | (Some(_), None) | (None, None) => false, + (None, None) | (None, Some(_)) | (Some(_), None) => false, } } /// \memberof AMsyncState -/// \brief Allocates a new synchronization state and initializes it with -/// defaults. +/// \brief Allocates a new synchronization state and initializes it from +/// default values. /// -/// \return A pointer to an `AMresult` struct containing a pointer to an -/// `AMsyncState` struct. -/// \warning The returned `AMresult` struct must be deallocated with `AMfree()` -/// in order to prevent a memory leak. +/// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_SYNC_STATE` item. +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. #[no_mangle] pub extern "C" fn AMsyncStateInit() -> *mut AMresult { to_result(am::sync::State::new()) @@ -137,40 +134,36 @@ pub extern "C" fn AMsyncStateInit() -> *mut AMresult { /// \brief Gets the heads that are shared by both peers. /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_state `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_state `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_state must be a valid pointer to an AMsyncState #[no_mangle] -pub unsafe extern "C" fn AMsyncStateSharedHeads(sync_state: *const AMsyncState) -> AMchangeHashes { - if let Some(sync_state) = sync_state.as_ref() { - AMchangeHashes::new(&sync_state.as_ref().shared_heads) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncStateSharedHeads(sync_state: *const AMsyncState) -> *mut AMresult { + let sync_state = to_sync_state!(sync_state); + to_result(sync_state.as_ref().shared_heads.as_slice()) } /// \memberof AMsyncState /// \brief Gets the heads that were last sent by this peer. /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_state `!= NULL`. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_state `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_state must be a valid pointer to an AMsyncState #[no_mangle] -pub unsafe extern "C" fn AMsyncStateLastSentHeads( - sync_state: *const AMsyncState, -) -> AMchangeHashes { - if let Some(sync_state) = sync_state.as_ref() { - AMchangeHashes::new(&sync_state.as_ref().last_sent_heads) - } else { - Default::default() - } +pub unsafe extern "C" fn AMsyncStateLastSentHeads(sync_state: *const AMsyncState) -> *mut AMresult { + let sync_state = to_sync_state!(sync_state); + to_result(sync_state.as_ref().last_sent_heads.as_slice()) } /// \memberof AMsyncState @@ -178,11 +171,13 @@ pub unsafe extern "C" fn AMsyncStateLastSentHeads( /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. /// \param[out] has_value A pointer to a boolean flag that is set to `true` if -/// the returned `AMhaves` struct is relevant, `false` otherwise. -/// \return An `AMhaves` struct. -/// \pre \p sync_state `!= NULL`. -/// \pre \p has_value `!= NULL`. -/// \internal +/// the returned `AMitems` struct is relevant, `false` otherwise. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_SYNC_HAVE` items. +/// \pre \p sync_state `!= NULL` +/// \pre \p has_value `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. +//// \internal /// /// # Safety /// sync_state must be a valid pointer to an AMsyncState @@ -191,15 +186,15 @@ pub unsafe extern "C" fn AMsyncStateLastSentHeads( pub unsafe extern "C" fn AMsyncStateTheirHaves( sync_state: *const AMsyncState, has_value: *mut bool, -) -> AMsyncHaves { +) -> *mut AMresult { if let Some(sync_state) = sync_state.as_ref() { if let Some(haves) = &sync_state.as_ref().their_have { *has_value = true; - return AMsyncHaves::new(haves, &mut sync_state.their_haves_storage.borrow_mut()); - }; + return to_result(haves.as_slice()); + } }; *has_value = false; - Default::default() + to_result(Vec::::new()) } /// \memberof AMsyncState @@ -207,29 +202,31 @@ pub unsafe extern "C" fn AMsyncStateTheirHaves( /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. /// \param[out] has_value A pointer to a boolean flag that is set to `true` if -/// the returned `AMchangeHashes` struct is relevant, `false` -/// otherwise. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_state `!= NULL`. -/// \pre \p has_value `!= NULL`. +/// the returned `AMitems` struct is relevant, `false` +/// otherwise. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_state `!= NULL` +/// \pre \p has_value `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_state must be a valid pointer to an AMsyncState -/// has_value must be a valid pointer to a bool. +/// has_value must be a valid pointer to a bool #[no_mangle] pub unsafe extern "C" fn AMsyncStateTheirHeads( sync_state: *const AMsyncState, has_value: *mut bool, -) -> AMchangeHashes { +) -> *mut AMresult { if let Some(sync_state) = sync_state.as_ref() { if let Some(change_hashes) = &sync_state.as_ref().their_heads { *has_value = true; - return AMchangeHashes::new(change_hashes); + return to_result(change_hashes.as_slice()); } }; *has_value = false; - Default::default() + to_result(Vec::::new()) } /// \memberof AMsyncState @@ -237,27 +234,29 @@ pub unsafe extern "C" fn AMsyncStateTheirHeads( /// /// \param[in] sync_state A pointer to an `AMsyncState` struct. /// \param[out] has_value A pointer to a boolean flag that is set to `true` if -/// the returned `AMchangeHashes` struct is relevant, `false` -/// otherwise. -/// \return An `AMchangeHashes` struct. -/// \pre \p sync_state `!= NULL`. -/// \pre \p has_value `!= NULL`. +/// the returned `AMitems` struct is relevant, `false` +/// otherwise. +/// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items. +/// \pre \p sync_state `!= NULL` +/// \pre \p has_value `!= NULL` +/// \warning The returned `AMresult` struct pointer must be passed to +/// `AMresultFree()` in order to avoid a memory leak. /// \internal /// /// # Safety /// sync_state must be a valid pointer to an AMsyncState -/// has_value must be a valid pointer to a bool. +/// has_value must be a valid pointer to a bool #[no_mangle] pub unsafe extern "C" fn AMsyncStateTheirNeeds( sync_state: *const AMsyncState, has_value: *mut bool, -) -> AMchangeHashes { +) -> *mut AMresult { if let Some(sync_state) = sync_state.as_ref() { if let Some(change_hashes) = &sync_state.as_ref().their_need { *has_value = true; - return AMchangeHashes::new(change_hashes); + return to_result(change_hashes.as_slice()); } }; *has_value = false; - Default::default() + to_result(Vec::::new()) } diff --git a/rust/automerge-c/src/utils/result.c b/rust/automerge-c/src/utils/result.c new file mode 100644 index 00000000..f922ca31 --- /dev/null +++ b/rust/automerge-c/src/utils/result.c @@ -0,0 +1,33 @@ +#include + +#include + +AMresult* AMresultFrom(int count, ...) { + AMresult* result = NULL; + bool is_ok = true; + va_list args; + va_start(args, count); + for (int i = 0; i != count; ++i) { + AMresult* src = va_arg(args, AMresult*); + AMresult* dest = result; + is_ok = (AMresultStatus(src) == AM_STATUS_OK); + if (is_ok) { + if (dest) { + result = AMresultCat(dest, src); + is_ok = (AMresultStatus(result) == AM_STATUS_OK); + AMresultFree(dest); + AMresultFree(src); + } else { + result = src; + } + } else { + AMresultFree(src); + } + } + va_end(args); + if (!is_ok) { + AMresultFree(result); + result = NULL; + } + return result; +} diff --git a/rust/automerge-c/src/utils/stack.c b/rust/automerge-c/src/utils/stack.c new file mode 100644 index 00000000..2cad7c5c --- /dev/null +++ b/rust/automerge-c/src/utils/stack.c @@ -0,0 +1,106 @@ +#include +#include + +#include +#include + +void AMstackFree(AMstack** stack) { + if (stack) { + while (*stack) { + AMresultFree(AMstackPop(stack, NULL)); + } + } +} + +AMresult* AMstackPop(AMstack** stack, const AMresult* result) { + if (!stack) { + return NULL; + } + AMstack** prev = stack; + if (result) { + while (*prev && ((*prev)->result != result)) { + *prev = (*prev)->prev; + } + } + if (!*prev) { + return NULL; + } + AMstack* target = *prev; + *prev = target->prev; + AMresult* popped = target->result; + free(target); + return popped; +} + +AMresult* AMstackResult(AMstack** stack, AMresult* result, AMstackCallback callback, void* data) { + if (!stack) { + if (callback) { + /* Create a local stack so that the callback can still examine the + * result. */ + AMstack node = {.result = result, .prev = NULL}; + AMstack* stack = &node; + callback(&stack, data); + } else { + /* \note There is no reason to call this function when both the + * stack and the callback are null. */ + fprintf(stderr, "ERROR: NULL AMstackCallback!\n"); + } + /* \note Nothing can be returned without a stack regardless of + * whether or not the callback validated the result. */ + AMresultFree(result); + return NULL; + } + /* Always push the result onto the stack, even if it's null, so that the + * callback can examine it. */ + AMstack* next = calloc(1, sizeof(AMstack)); + *next = (AMstack){.result = result, .prev = *stack}; + AMstack* top = next; + *stack = top; + if (callback) { + if (!callback(stack, data)) { + /* The result didn't pass the callback's examination. */ + return NULL; + } + } else { + /* Report an obvious error. */ + if (result) { + AMbyteSpan const err_msg = AMresultError(result); + if (err_msg.src && err_msg.count) { + /* \note The callback may be null because the result is supposed + * to be examined externally so return it despite an + * error. */ + char* const cstr = AMstrdup(err_msg, NULL); + fprintf(stderr, "WARNING: %s.\n", cstr); + free(cstr); + } + } else { + /* \note There's no reason to call this function when both the + * result and the callback are null. */ + fprintf(stderr, "ERROR: NULL AMresult*!\n"); + return NULL; + } + } + return result; +} + +AMitem* AMstackItem(AMstack** stack, AMresult* result, AMstackCallback callback, void* data) { + AMitems items = AMstackItems(stack, result, callback, data); + return AMitemsNext(&items, 1); +} + +AMitems AMstackItems(AMstack** stack, AMresult* result, AMstackCallback callback, void* data) { + return (AMstackResult(stack, result, callback, data)) ? AMresultItems(result) : (AMitems){0}; +} + +size_t AMstackSize(AMstack const* const stack) { + if (!stack) { + return 0; + } + size_t count = 0; + AMstack const* prev = stack; + while (prev) { + ++count; + prev = prev->prev; + } + return count; +} \ No newline at end of file diff --git a/rust/automerge-c/src/utils/stack_callback_data.c b/rust/automerge-c/src/utils/stack_callback_data.c new file mode 100644 index 00000000..f1e988d8 --- /dev/null +++ b/rust/automerge-c/src/utils/stack_callback_data.c @@ -0,0 +1,9 @@ +#include + +#include + +AMstackCallbackData* AMstackCallbackDataInit(AMvalType const bitmask, char const* const file, int const line) { + AMstackCallbackData* data = malloc(sizeof(AMstackCallbackData)); + *data = (AMstackCallbackData){.bitmask = bitmask, .file = file, .line = line}; + return data; +} diff --git a/rust/automerge-c/src/utils/string.c b/rust/automerge-c/src/utils/string.c new file mode 100644 index 00000000..a0d1ebe3 --- /dev/null +++ b/rust/automerge-c/src/utils/string.c @@ -0,0 +1,46 @@ +#include +#include + +#include + +char* AMstrdup(AMbyteSpan const str, char const* nul) { + if (!str.src) { + return NULL; + } else if (!str.count) { + return strdup(""); + } + nul = (nul) ? nul : "\\0"; + size_t const nul_len = strlen(nul); + char* dup = NULL; + size_t dup_len = 0; + char const* begin = str.src; + char const* end = begin; + for (size_t i = 0; i != str.count; ++i, ++end) { + if (!*end) { + size_t const len = end - begin; + size_t const alloc_len = dup_len + len + nul_len; + if (dup) { + dup = realloc(dup, alloc_len + 1); + } else { + dup = malloc(alloc_len + 1); + } + memcpy(dup + dup_len, begin, len); + memcpy(dup + dup_len + len, nul, nul_len); + dup[alloc_len] = '\0'; + begin = end + 1; + dup_len = alloc_len; + } + } + if (begin != end) { + size_t const len = end - begin; + size_t const alloc_len = dup_len + len; + if (dup) { + dup = realloc(dup, alloc_len + 1); + } else { + dup = malloc(alloc_len + 1); + } + memcpy(dup + dup_len, begin, len); + dup[alloc_len] = '\0'; + } + return dup; +} diff --git a/rust/automerge-c/test/CMakeLists.txt b/rust/automerge-c/test/CMakeLists.txt index 704a27da..1759f140 100644 --- a/rust/automerge-c/test/CMakeLists.txt +++ b/rust/automerge-c/test/CMakeLists.txt @@ -1,53 +1,51 @@ -cmake_minimum_required(VERSION 3.18 FATAL_ERROR) - -find_package(cmocka REQUIRED) +find_package(cmocka CONFIG REQUIRED) add_executable( - test_${LIBRARY_NAME} + ${LIBRARY_NAME}_test actor_id_tests.c + base_state.c + byte_span_tests.c + cmocka_utils.c + enum_string_tests.c + doc_state.c doc_tests.c - group_state.c + item_tests.c list_tests.c macro_utils.c main.c map_tests.c - stack_utils.c str_utils.c ported_wasm/basic_tests.c ported_wasm/suite.c ported_wasm/sync_tests.c ) -set_target_properties(test_${LIBRARY_NAME} PROPERTIES LINKER_LANGUAGE C) +set_target_properties(${LIBRARY_NAME}_test PROPERTIES LINKER_LANGUAGE C) -# \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't -# contain a non-existent path so its build-time include directory -# must be specified for all of its dependent targets instead. -target_include_directories( - test_${LIBRARY_NAME} - PRIVATE "$" -) +if(WIN32) + set(CMOCKA "cmocka::cmocka") +else() + set(CMOCKA "cmocka") +endif() -target_link_libraries(test_${LIBRARY_NAME} PRIVATE cmocka ${LIBRARY_NAME}) +target_link_libraries(${LIBRARY_NAME}_test PRIVATE ${CMOCKA} ${LIBRARY_NAME}) -add_dependencies(test_${LIBRARY_NAME} ${LIBRARY_NAME}_artifacts) +add_dependencies(${LIBRARY_NAME}_test ${BINDINGS_NAME}_artifacts) if(BUILD_SHARED_LIBS AND WIN32) add_custom_command( - TARGET test_${LIBRARY_NAME} + TARGET ${LIBRARY_NAME}_test POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX} - ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Copying the DLL built by Cargo into the test directory..." + COMMAND ${CMAKE_COMMAND} -E copy_if_different $ $ + COMMENT "Copying the DLL into the tests directory..." VERBATIM ) endif() -add_test(NAME test_${LIBRARY_NAME} COMMAND test_${LIBRARY_NAME}) +add_test(NAME ${LIBRARY_NAME}_test COMMAND ${LIBRARY_NAME}_test) add_custom_command( - TARGET test_${LIBRARY_NAME} + TARGET ${LIBRARY_NAME}_test POST_BUILD COMMAND ${CMAKE_CTEST_COMMAND} --config $ --output-on-failure diff --git a/rust/automerge-c/test/actor_id_tests.c b/rust/automerge-c/test/actor_id_tests.c index c98f2554..918d6213 100644 --- a/rust/automerge-c/test/actor_id_tests.c +++ b/rust/automerge-c/test/actor_id_tests.c @@ -14,99 +14,126 @@ #include "cmocka_utils.h" #include "str_utils.h" +/** + * \brief State for a group of cmocka test cases. + */ typedef struct { + /** An actor ID as an array of bytes. */ uint8_t* src; - AMbyteSpan str; + /** The count of bytes in \p src. */ size_t count; -} GroupState; + /** A stack of results. */ + AMstack* stack; + /** An actor ID as a hexadecimal string. */ + AMbyteSpan str; +} DocState; static int group_setup(void** state) { - GroupState* group_state = test_calloc(1, sizeof(GroupState)); - group_state->str.src = "000102030405060708090a0b0c0d0e0f"; - group_state->str.count = strlen(group_state->str.src); - group_state->count = group_state->str.count / 2; - group_state->src = test_malloc(group_state->count); - hex_to_bytes(group_state->str.src, group_state->src, group_state->count); - *state = group_state; + DocState* doc_state = test_calloc(1, sizeof(DocState)); + doc_state->str = AMstr("000102030405060708090a0b0c0d0e0f"); + doc_state->count = doc_state->str.count / 2; + doc_state->src = test_calloc(doc_state->count, sizeof(uint8_t)); + hex_to_bytes(doc_state->str.src, doc_state->src, doc_state->count); + *state = doc_state; return 0; } static int group_teardown(void** state) { - GroupState* group_state = *state; - test_free(group_state->src); - test_free(group_state); + DocState* doc_state = *state; + test_free(doc_state->src); + AMstackFree(&doc_state->stack); + test_free(doc_state); return 0; } -static void test_AMactorIdInit() { +static void test_AMactorIdFromBytes(void** state) { + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->stack; + /* Non-empty string. */ + AMresult* result = AMstackResult(stack_ptr, AMactorIdFromBytes(doc_state->src, doc_state->count), NULL, NULL); + if (AMresultStatus(result) != AM_STATUS_OK) { + fail_msg_view("%s", AMresultError(result)); + } + assert_int_equal(AMresultSize(result), 1); + AMitem* const item = AMresultItem(result); + assert_int_equal(AMitemValType(item), AM_VAL_TYPE_ACTOR_ID); + AMactorId const* actor_id; + assert_true(AMitemToActorId(item, &actor_id)); + AMbyteSpan const bytes = AMactorIdBytes(actor_id); + assert_int_equal(bytes.count, doc_state->count); + assert_memory_equal(bytes.src, doc_state->src, bytes.count); + /* Empty array. */ + /** \todo Find out if this is intentionally allowed. */ + result = AMstackResult(stack_ptr, AMactorIdFromBytes(doc_state->src, 0), NULL, NULL); + if (AMresultStatus(result) != AM_STATUS_OK) { + fail_msg_view("%s", AMresultError(result)); + } + /* NULL array. */ + result = AMstackResult(stack_ptr, AMactorIdFromBytes(NULL, doc_state->count), NULL, NULL); + if (AMresultStatus(result) == AM_STATUS_OK) { + fail_msg("AMactorId from NULL."); + } +} + +static void test_AMactorIdFromStr(void** state) { + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->stack; + AMresult* result = AMstackResult(stack_ptr, AMactorIdFromStr(doc_state->str), NULL, NULL); + if (AMresultStatus(result) != AM_STATUS_OK) { + fail_msg_view("%s", AMresultError(result)); + } + assert_int_equal(AMresultSize(result), 1); + AMitem* const item = AMresultItem(result); + assert_int_equal(AMitemValType(item), AM_VAL_TYPE_ACTOR_ID); + /* The hexadecimal string should've been decoded as identical bytes. */ + AMactorId const* actor_id; + assert_true(AMitemToActorId(item, &actor_id)); + AMbyteSpan const bytes = AMactorIdBytes(actor_id); + assert_int_equal(bytes.count, doc_state->count); + assert_memory_equal(bytes.src, doc_state->src, bytes.count); + /* The bytes should've been encoded as an identical hexadecimal string. */ + assert_true(AMitemToActorId(item, &actor_id)); + AMbyteSpan const str = AMactorIdStr(actor_id); + assert_int_equal(str.count, doc_state->str.count); + assert_memory_equal(str.src, doc_state->str.src, str.count); +} + +static void test_AMactorIdInit(void** state) { + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->stack; AMresult* prior_result = NULL; AMbyteSpan prior_bytes = {NULL, 0}; AMbyteSpan prior_str = {NULL, 0}; - AMresult* result = NULL; for (size_t i = 0; i != 11; ++i) { - result = AMactorIdInit(); + AMresult* result = AMstackResult(stack_ptr, AMactorIdInit(), NULL, NULL); if (AMresultStatus(result) != AM_STATUS_OK) { - fail_msg_view("%s", AMerrorMessage(result)); + fail_msg_view("%s", AMresultError(result)); } assert_int_equal(AMresultSize(result), 1); - AMvalue const value = AMresultValue(result); - assert_int_equal(value.tag, AM_VALUE_ACTOR_ID); - AMbyteSpan const bytes = AMactorIdBytes(value.actor_id); - AMbyteSpan const str = AMactorIdStr(value.actor_id); + AMitem* const item = AMresultItem(result); + assert_int_equal(AMitemValType(item), AM_VAL_TYPE_ACTOR_ID); + AMactorId const* actor_id; + assert_true(AMitemToActorId(item, &actor_id)); + AMbyteSpan const bytes = AMactorIdBytes(actor_id); + assert_true(AMitemToActorId(item, &actor_id)); + AMbyteSpan const str = AMactorIdStr(actor_id); if (prior_result) { size_t const max_byte_count = fmax(bytes.count, prior_bytes.count); assert_memory_not_equal(bytes.src, prior_bytes.src, max_byte_count); size_t const max_char_count = fmax(str.count, prior_str.count); assert_memory_not_equal(str.src, prior_str.src, max_char_count); - AMfree(prior_result); } prior_result = result; prior_bytes = bytes; prior_str = str; } - AMfree(result); -} - -static void test_AMactorIdInitBytes(void **state) { - GroupState* group_state = *state; - AMresult* const result = AMactorIdInitBytes(group_state->src, group_state->count); - if (AMresultStatus(result) != AM_STATUS_OK) { - fail_msg_view("%s", AMerrorMessage(result)); - } - assert_int_equal(AMresultSize(result), 1); - AMvalue const value = AMresultValue(result); - assert_int_equal(value.tag, AM_VALUE_ACTOR_ID); - AMbyteSpan const bytes = AMactorIdBytes(value.actor_id); - assert_int_equal(bytes.count, group_state->count); - assert_memory_equal(bytes.src, group_state->src, bytes.count); - AMfree(result); -} - -static void test_AMactorIdInitStr(void **state) { - GroupState* group_state = *state; - AMresult* const result = AMactorIdInitStr(group_state->str); - if (AMresultStatus(result) != AM_STATUS_OK) { - fail_msg_view("%s", AMerrorMessage(result)); - } - assert_int_equal(AMresultSize(result), 1); - AMvalue const value = AMresultValue(result); - assert_int_equal(value.tag, AM_VALUE_ACTOR_ID); - /* The hexadecimal string should've been decoded as identical bytes. */ - AMbyteSpan const bytes = AMactorIdBytes(value.actor_id); - assert_int_equal(bytes.count, group_state->count); - assert_memory_equal(bytes.src, group_state->src, bytes.count); - /* The bytes should've been encoded as an identical hexadecimal string. */ - AMbyteSpan const str = AMactorIdStr(value.actor_id); - assert_int_equal(str.count, group_state->str.count); - assert_memory_equal(str.src, group_state->str.src, str.count); - AMfree(result); } int run_actor_id_tests(void) { const struct CMUnitTest tests[] = { + cmocka_unit_test(test_AMactorIdFromBytes), + cmocka_unit_test(test_AMactorIdFromStr), cmocka_unit_test(test_AMactorIdInit), - cmocka_unit_test(test_AMactorIdInitBytes), - cmocka_unit_test(test_AMactorIdInitStr), }; return cmocka_run_group_tests(tests, group_setup, group_teardown); diff --git a/rust/automerge-c/test/base_state.c b/rust/automerge-c/test/base_state.c new file mode 100644 index 00000000..53325a99 --- /dev/null +++ b/rust/automerge-c/test/base_state.c @@ -0,0 +1,17 @@ +#include + +/* local */ +#include "base_state.h" + +int setup_base(void** state) { + BaseState* base_state = calloc(1, sizeof(BaseState)); + *state = base_state; + return 0; +} + +int teardown_base(void** state) { + BaseState* base_state = *state; + AMstackFree(&base_state->stack); + free(base_state); + return 0; +} diff --git a/rust/automerge-c/test/base_state.h b/rust/automerge-c/test/base_state.h new file mode 100644 index 00000000..3c4ff01b --- /dev/null +++ b/rust/automerge-c/test/base_state.h @@ -0,0 +1,39 @@ +#ifndef TESTS_BASE_STATE_H +#define TESTS_BASE_STATE_H + +#include + +/* local */ +#include +#include + +/** + * \struct BaseState + * \brief The shared state for one or more cmocka test cases. + */ +typedef struct { + /** A stack of results. */ + AMstack* stack; +} BaseState; + +/** + * \memberof BaseState + * \brief Sets up the shared state for one or more cmocka test cases. + * + * \param[in,out] state A pointer to a pointer to a `BaseState` struct. + * \pre \p state `!= NULL`. + * \warning The `BaseState` struct returned through \p state must be + * passed to `teardown_base()` in order to avoid a memory leak. + */ +int setup_base(void** state); + +/** + * \memberof BaseState + * \brief Tears down the shared state for one or more cmocka test cases. + * + * \param[in] state A pointer to a pointer to a `BaseState` struct. + * \pre \p state `!= NULL`. + */ +int teardown_base(void** state); + +#endif /* TESTS_BASE_STATE_H */ diff --git a/rust/automerge-c/test/byte_span_tests.c b/rust/automerge-c/test/byte_span_tests.c new file mode 100644 index 00000000..43856f3b --- /dev/null +++ b/rust/automerge-c/test/byte_span_tests.c @@ -0,0 +1,118 @@ +#include +#include +#include +#include +#include + +/* third-party */ +#include + +/* local */ +#include +#include + +static void test_AMbytes(void** state) { + static char const DATA[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; + + AMbyteSpan bytes = AMbytes(DATA, sizeof(DATA)); + assert_int_equal(bytes.count, sizeof(DATA)); + assert_memory_equal(bytes.src, DATA, bytes.count); + assert_ptr_equal(bytes.src, DATA); + /* Empty view */ + bytes = AMbytes(DATA, 0); + assert_int_equal(bytes.count, 0); + assert_ptr_equal(bytes.src, DATA); + /* Invalid array */ + bytes = AMbytes(NULL, SIZE_MAX); + assert_int_not_equal(bytes.count, SIZE_MAX); + assert_int_equal(bytes.count, 0); + assert_ptr_equal(bytes.src, NULL); +} + +static void test_AMstr(void** state) { + AMbyteSpan str = AMstr("abcdefghijkl"); + assert_int_equal(str.count, strlen("abcdefghijkl")); + assert_memory_equal(str.src, "abcdefghijkl", str.count); + /* Empty string */ + static char const* const EMPTY = ""; + + str = AMstr(EMPTY); + assert_int_equal(str.count, 0); + assert_ptr_equal(str.src, EMPTY); + /* Invalid string */ + str = AMstr(NULL); + assert_int_equal(str.count, 0); + assert_ptr_equal(str.src, NULL); +} + +static void test_AMstrCmp(void** state) { + /* Length ordering */ + assert_int_equal(AMstrCmp(AMstr("abcdef"), AMstr("abcdefghijkl")), -1); + assert_int_equal(AMstrCmp(AMstr("abcdefghijkl"), AMstr("abcdefghijkl")), 0); + assert_int_equal(AMstrCmp(AMstr("abcdefghijkl"), AMstr("abcdef")), 1); + /* Lexicographical ordering */ + assert_int_equal(AMstrCmp(AMstr("abcdef"), AMstr("ghijkl")), -1); + assert_int_equal(AMstrCmp(AMstr("ghijkl"), AMstr("abcdef")), 1); + /* Case ordering */ + assert_int_equal(AMstrCmp(AMstr("ABCDEFGHIJKL"), AMstr("abcdefghijkl")), -1); + assert_int_equal(AMstrCmp(AMstr("ABCDEFGHIJKL"), AMstr("ABCDEFGHIJKL")), 0); + assert_int_equal(AMstrCmp(AMstr("abcdefghijkl"), AMstr("ABCDEFGHIJKL")), 1); + assert_int_equal(AMstrCmp(AMstr("ABCDEFGHIJKL"), AMstr("abcdef")), -1); + assert_int_equal(AMstrCmp(AMstr("abcdef"), AMstr("ABCDEFGHIJKL")), 1); + assert_int_equal(AMstrCmp(AMstr("GHIJKL"), AMstr("abcdef")), -1); + assert_int_equal(AMstrCmp(AMstr("abcdef"), AMstr("GHIJKL")), 1); + /* NUL character inclusion */ + static char const SRC[] = {'a', 'b', 'c', 'd', 'e', 'f', '\0', 'g', 'h', 'i', 'j', 'k', 'l'}; + static AMbyteSpan const NUL_STR = {.src = SRC, .count = 13}; + + assert_int_equal(AMstrCmp(AMstr("abcdef"), NUL_STR), -1); + assert_int_equal(AMstrCmp(NUL_STR, NUL_STR), 0); + assert_int_equal(AMstrCmp(NUL_STR, AMstr("abcdef")), 1); + /* Empty string */ + assert_int_equal(AMstrCmp(AMstr(""), AMstr("abcdefghijkl")), -1); + assert_int_equal(AMstrCmp(AMstr(""), AMstr("")), 0); + assert_int_equal(AMstrCmp(AMstr("abcdefghijkl"), AMstr("")), 1); + /* Invalid string */ + assert_int_equal(AMstrCmp(AMstr(NULL), AMstr("abcdefghijkl")), -1); + assert_int_equal(AMstrCmp(AMstr(NULL), AMstr(NULL)), 0); + assert_int_equal(AMstrCmp(AMstr("abcdefghijkl"), AMstr(NULL)), 1); +} + +static void test_AMstrdup(void** state) { + static char const SRC[] = {'a', 'b', 'c', '\0', 'd', 'e', 'f', '\0', 'g', 'h', 'i', '\0', 'j', 'k', 'l'}; + static AMbyteSpan const NUL_STR = {.src = SRC, .count = 15}; + + /* Default substitution ("\\0") for NUL */ + char* dup = AMstrdup(NUL_STR, NULL); + assert_int_equal(strlen(dup), 18); + assert_string_equal(dup, "abc\\0def\\0ghi\\0jkl"); + free(dup); + /* Arbitrary substitution for NUL */ + dup = AMstrdup(NUL_STR, ":-O"); + assert_int_equal(strlen(dup), 21); + assert_string_equal(dup, "abc:-Odef:-Oghi:-Ojkl"); + free(dup); + /* Empty substitution for NUL */ + dup = AMstrdup(NUL_STR, ""); + assert_int_equal(strlen(dup), 12); + assert_string_equal(dup, "abcdefghijkl"); + free(dup); + /* Empty string */ + dup = AMstrdup(AMstr(""), NULL); + assert_int_equal(strlen(dup), 0); + assert_string_equal(dup, ""); + free(dup); + /* Invalid string */ + assert_null(AMstrdup(AMstr(NULL), NULL)); +} + +int run_byte_span_tests(void) { + const struct CMUnitTest tests[] = { + cmocka_unit_test(test_AMbytes), + cmocka_unit_test(test_AMstr), + cmocka_unit_test(test_AMstrCmp), + cmocka_unit_test(test_AMstrdup), + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/rust/automerge-c/test/cmocka_utils.c b/rust/automerge-c/test/cmocka_utils.c new file mode 100644 index 00000000..37c57fb1 --- /dev/null +++ b/rust/automerge-c/test/cmocka_utils.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include + +/* third-party */ +#include +#include +#include +#include + +/* local */ +#include "cmocka_utils.h" + +/** + * \brief Assert that the given expression is true and report failure in terms + * of a line number within a file. + * + * \param[in] c An expression. + * \param[in] file A file's full path string. + * \param[in] line A line number. + */ +#define assert_true_where(c, file, line) _assert_true(cast_ptr_to_largest_integral_type(c), #c, file, line) + +/** + * \brief Assert that the given pointer is non-NULL and report failure in terms + * of a line number within a file. + * + * \param[in] c An expression. + * \param[in] file A file's full path string. + * \param[in] line A line number. + */ +#define assert_non_null_where(c, file, line) assert_true_where(c, file, line) + +/** + * \brief Forces the test to fail immediately and quit, printing the reason in + * terms of a line number within a file. + * + * \param[in] msg A message string into which \p str is interpolated. + * \param[in] str An owned string. + * \param[in] file A file's full path string. + * \param[in] line A line number. + */ +#define fail_msg_where(msg, str, file, line) \ + do { \ + print_error("ERROR: " msg "\n", str); \ + _fail(file, line); \ + } while (0) + +/** + * \brief Forces the test to fail immediately and quit, printing the reason in + * terms of a line number within a file. + * + * \param[in] msg A message string into which \p view.src is interpolated. + * \param[in] view A UTF-8 string view as an `AMbyteSpan` struct. + * \param[in] file A file's full path string. + * \param[in] line A line number. + */ +#define fail_msg_view_where(msg, view, file, line) \ + do { \ + char* const str = AMstrdup(view, NULL); \ + print_error("ERROR: " msg "\n", str); \ + free(str); \ + _fail(file, line); \ + } while (0) + +bool cmocka_cb(AMstack** stack, void* data) { + assert_non_null(data); + AMstackCallbackData* const sc_data = (AMstackCallbackData*)data; + assert_non_null_where(stack, sc_data->file, sc_data->line); + assert_non_null_where(*stack, sc_data->file, sc_data->line); + assert_non_null_where((*stack)->result, sc_data->file, sc_data->line); + if (AMresultStatus((*stack)->result) != AM_STATUS_OK) { + fail_msg_view_where("%s", AMresultError((*stack)->result), sc_data->file, sc_data->line); + return false; + } + /* Test that the types of all item values are members of the mask. */ + AMitems items = AMresultItems((*stack)->result); + AMitem* item = NULL; + while ((item = AMitemsNext(&items, 1)) != NULL) { + AMvalType const tag = AMitemValType(item); + if (!(tag & sc_data->bitmask)) { + fail_msg_where("Unexpected value type `%s`.", AMvalTypeToString(tag), sc_data->file, sc_data->line); + return false; + } + } + return true; +} diff --git a/rust/automerge-c/test/cmocka_utils.h b/rust/automerge-c/test/cmocka_utils.h index 1b488362..b6611bcc 100644 --- a/rust/automerge-c/test/cmocka_utils.h +++ b/rust/automerge-c/test/cmocka_utils.h @@ -1,22 +1,42 @@ -#ifndef CMOCKA_UTILS_H -#define CMOCKA_UTILS_H +#ifndef TESTS_CMOCKA_UTILS_H +#define TESTS_CMOCKA_UTILS_H +#include #include /* third-party */ +#include #include +/* local */ +#include "base_state.h" + /** * \brief Forces the test to fail immediately and quit, printing the reason. * - * \param[in] view A string view as an `AMbyteSpan` struct. + * \param[in] msg A message string into which \p view.src is interpolated. + * \param[in] view A UTF-8 string view as an `AMbyteSpan` struct. */ -#define fail_msg_view(msg, view) do { \ - char* const c_str = test_calloc(1, view.count + 1); \ - strncpy(c_str, view.src, view.count); \ - print_error(msg, c_str); \ - test_free(c_str); \ - fail(); \ -} while (0) +#define fail_msg_view(msg, view) \ + do { \ + char* const c_str = AMstrdup(view, NULL); \ + print_error("ERROR: " msg "\n", c_str); \ + free(c_str); \ + fail(); \ + } while (0) -#endif /* CMOCKA_UTILS_H */ +/** + * \brief Validates the top result in a stack based upon the parameters + * specified within the given data structure and reports violations + * using cmocka assertions. + * + * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. + * \param[in] data A pointer to an owned `AMpushData` struct. + * \return `true` if the top `AMresult` struct in \p stack is valid, `false` + * otherwise. + * \pre \p stack `!= NULL`. + * \pre \p data `!= NULL`. + */ +bool cmocka_cb(AMstack** stack, void* data); + +#endif /* TESTS_CMOCKA_UTILS_H */ diff --git a/rust/automerge-c/test/doc_state.c b/rust/automerge-c/test/doc_state.c new file mode 100644 index 00000000..3cbece50 --- /dev/null +++ b/rust/automerge-c/test/doc_state.c @@ -0,0 +1,27 @@ +#include +#include +#include + +/* third-party */ +#include + +/* local */ +#include +#include "cmocka_utils.h" +#include "doc_state.h" + +int setup_doc(void** state) { + DocState* doc_state = test_calloc(1, sizeof(DocState)); + setup_base((void**)&doc_state->base_state); + AMitemToDoc(AMstackItem(&doc_state->base_state->stack, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), + &doc_state->doc); + *state = doc_state; + return 0; +} + +int teardown_doc(void** state) { + DocState* doc_state = *state; + teardown_base((void**)&doc_state->base_state); + test_free(doc_state); + return 0; +} diff --git a/rust/automerge-c/test/doc_state.h b/rust/automerge-c/test/doc_state.h new file mode 100644 index 00000000..525a49fa --- /dev/null +++ b/rust/automerge-c/test/doc_state.h @@ -0,0 +1,17 @@ +#ifndef TESTS_DOC_STATE_H +#define TESTS_DOC_STATE_H + +/* local */ +#include +#include "base_state.h" + +typedef struct { + BaseState* base_state; + AMdoc* doc; +} DocState; + +int setup_doc(void** state); + +int teardown_doc(void** state); + +#endif /* TESTS_DOC_STATE_H */ diff --git a/rust/automerge-c/test/doc_tests.c b/rust/automerge-c/test/doc_tests.c index 217a4862..c1d21928 100644 --- a/rust/automerge-c/test/doc_tests.c +++ b/rust/automerge-c/test/doc_tests.c @@ -9,12 +9,14 @@ /* local */ #include -#include "group_state.h" -#include "stack_utils.h" +#include +#include "base_state.h" +#include "cmocka_utils.h" +#include "doc_state.h" #include "str_utils.h" typedef struct { - GroupState* group_state; + DocState* doc_state; AMbyteSpan actor_id_str; uint8_t* actor_id_bytes; size_t actor_id_size; @@ -22,7 +24,7 @@ typedef struct { static int setup(void** state) { TestState* test_state = test_calloc(1, sizeof(TestState)); - group_setup((void**)&test_state->group_state); + setup_doc((void**)&test_state->doc_state); test_state->actor_id_str.src = "000102030405060708090a0b0c0d0e0f"; test_state->actor_id_str.count = strlen(test_state->actor_id_str.src); test_state->actor_id_size = test_state->actor_id_str.count / 2; @@ -34,204 +36,195 @@ static int setup(void** state) { static int teardown(void** state) { TestState* test_state = *state; - group_teardown((void**)&test_state->group_state); + teardown_doc((void**)&test_state->doc_state); test_free(test_state->actor_id_bytes); test_free(test_state); return 0; } -static void test_AMkeys_empty() { - AMresultStack* stack = NULL; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMstrs forward = AMpush(&stack, - AMkeys(doc, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsSize(&forward), 0); - AMstrs reverse = AMstrsReversed(&forward); - assert_int_equal(AMstrsSize(&reverse), 0); - assert_null(AMstrsNext(&forward, 1).src); - assert_null(AMstrsPrev(&forward, 1).src); - assert_null(AMstrsNext(&reverse, 1).src); - assert_null(AMstrsPrev(&reverse, 1).src); - AMfreeStack(&stack); -} - -static void test_AMkeys_list() { - AMresultStack* stack = NULL; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMobjId const* const list = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMlistPutInt(doc, list, 0, true, 0)); - AMfree(AMlistPutInt(doc, list, 1, true, 0)); - AMfree(AMlistPutInt(doc, list, 2, true, 0)); - AMstrs forward = AMpush(&stack, - AMkeys(doc, list, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsSize(&forward), 3); - AMstrs reverse = AMstrsReversed(&forward); - assert_int_equal(AMstrsSize(&reverse), 3); - /* Forward iterator forward. */ - AMbyteSpan str = AMstrsNext(&forward, 1); - assert_ptr_equal(strstr(str.src, "2@"), str.src); - str = AMstrsNext(&forward, 1); - assert_ptr_equal(strstr(str.src, "3@"), str.src); - str = AMstrsNext(&forward, 1); - assert_ptr_equal(strstr(str.src, "4@"), str.src); - assert_null(AMstrsNext(&forward, 1).src); - // /* Forward iterator reverse. */ - str = AMstrsPrev(&forward, 1); - assert_ptr_equal(strstr(str.src, "4@"), str.src); - str = AMstrsPrev(&forward, 1); - assert_ptr_equal(strstr(str.src, "3@"), str.src); - str = AMstrsPrev(&forward, 1); - assert_ptr_equal(strstr(str.src, "2@"), str.src); - assert_null(AMstrsPrev(&forward, 1).src); - /* Reverse iterator forward. */ - str = AMstrsNext(&reverse, 1); - assert_ptr_equal(strstr(str.src, "4@"), str.src); - str = AMstrsNext(&reverse, 1); - assert_ptr_equal(strstr(str.src, "3@"), str.src); - str = AMstrsNext(&reverse, 1); - assert_ptr_equal(strstr(str.src, "2@"), str.src); - assert_null(AMstrsNext(&reverse, 1).src); - /* Reverse iterator reverse. */ - str = AMstrsPrev(&reverse, 1); - assert_ptr_equal(strstr(str.src, "2@"), str.src); - str = AMstrsPrev(&reverse, 1); - assert_ptr_equal(strstr(str.src, "3@"), str.src); - str = AMstrsPrev(&reverse, 1); - assert_ptr_equal(strstr(str.src, "4@"), str.src); - assert_null(AMstrsPrev(&reverse, 1).src); - AMfreeStack(&stack); -} - -static void test_AMkeys_map() { - AMresultStack* stack = NULL; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("one"), 1)); - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("two"), 2)); - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("three"), 3)); - AMstrs forward = AMpush(&stack, - AMkeys(doc, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsSize(&forward), 3); - AMstrs reverse = AMstrsReversed(&forward); - assert_int_equal(AMstrsSize(&reverse), 3); - /* Forward iterator forward. */ - AMbyteSpan str = AMstrsNext(&forward, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "one", str.count); - str = AMstrsNext(&forward, 1); - assert_int_equal(str.count, 5); - assert_memory_equal(str.src, "three", str.count); - str = AMstrsNext(&forward, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "two", str.count); - assert_null(AMstrsNext(&forward, 1).src); - /* Forward iterator reverse. */ - str = AMstrsPrev(&forward, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "two", str.count); - str = AMstrsPrev(&forward, 1); - assert_int_equal(str.count, 5); - assert_memory_equal(str.src, "three", str.count); - str = AMstrsPrev(&forward, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "one", str.count); - assert_null(AMstrsPrev(&forward, 1).src); - /* Reverse iterator forward. */ - str = AMstrsNext(&reverse, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "two", str.count); - str = AMstrsNext(&reverse, 1); - assert_int_equal(str.count, 5); - assert_memory_equal(str.src, "three", str.count); - str = AMstrsNext(&reverse, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "one", str.count); - assert_null(AMstrsNext(&reverse, 1).src); - /* Reverse iterator reverse. */ - str = AMstrsPrev(&reverse, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "one", str.count); - str = AMstrsPrev(&reverse, 1); - assert_int_equal(str.count, 5); - assert_memory_equal(str.src, "three", str.count); - str = AMstrsPrev(&reverse, 1); - assert_int_equal(str.count, 3); - assert_memory_equal(str.src, "two", str.count); - assert_null(AMstrsPrev(&reverse, 1).src); - AMfreeStack(&stack); -} - -static void test_AMputActor_bytes(void **state) { +static void test_AMkeys_empty(void** state) { TestState* test_state = *state; - AMactorId const* actor_id = AMpush(&test_state->group_state->stack, - AMactorIdInitBytes( - test_state->actor_id_bytes, - test_state->actor_id_size), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(test_state->group_state->doc, actor_id)); - actor_id = AMpush(&test_state->group_state->stack, - AMgetActorId(test_state->group_state->doc), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMitems forward = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_int_equal(AMitemsSize(&forward), 0); + AMitems reverse = AMitemsReversed(&forward); + assert_int_equal(AMitemsSize(&reverse), 0); + assert_null(AMitemsNext(&forward, 1)); + assert_null(AMitemsPrev(&forward, 1)); + assert_null(AMitemsNext(&reverse, 1)); + assert_null(AMitemsPrev(&reverse, 1)); +} + +static void test_AMkeys_list(void** state) { + TestState* test_state = *state; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMlistPutInt(doc, list, 0, true, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutInt(doc, list, 1, true, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutInt(doc, list, 2, true, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMitems forward = AMstackItems(stack_ptr, AMkeys(doc, list, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&forward), 3); + AMitems reverse = AMitemsReversed(&forward); + assert_int_equal(AMitemsSize(&reverse), 3); + /* Forward iterator forward. */ + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "2@"), str.src); + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "3@"), str.src); + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "4@"), str.src); + assert_null(AMitemsNext(&forward, 1)); + // /* Forward iterator reverse. */ + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "4@"), str.src); + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "3@"), str.src); + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_ptr_equal(strstr(str.src, "2@"), str.src); + assert_null(AMitemsPrev(&forward, 1)); + /* Reverse iterator forward. */ + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "4@"), str.src); + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "3@"), str.src); + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "2@"), str.src); + assert_null(AMitemsNext(&reverse, 1)); + /* Reverse iterator reverse. */ + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "2@"), str.src); + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "3@"), str.src); + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_ptr_equal(strstr(str.src, "4@"), str.src); + assert_null(AMitemsPrev(&reverse, 1)); +} + +static void test_AMkeys_map(void** state) { + TestState* test_state = *state; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("one"), 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("two"), 2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("three"), 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMitems forward = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&forward), 3); + AMitems reverse = AMitemsReversed(&forward); + assert_int_equal(AMitemsSize(&reverse), 3); + /* Forward iterator forward. */ + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "one", str.count); + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_int_equal(str.count, 5); + assert_memory_equal(str.src, "three", str.count); + assert_true(AMitemToStr(AMitemsNext(&forward, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "two", str.count); + assert_null(AMitemsNext(&forward, 1)); + /* Forward iterator reverse. */ + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "two", str.count); + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_int_equal(str.count, 5); + assert_memory_equal(str.src, "three", str.count); + assert_true(AMitemToStr(AMitemsPrev(&forward, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "one", str.count); + assert_null(AMitemsPrev(&forward, 1)); + /* Reverse iterator forward. */ + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "two", str.count); + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_int_equal(str.count, 5); + assert_memory_equal(str.src, "three", str.count); + assert_true(AMitemToStr(AMitemsNext(&reverse, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "one", str.count); + assert_null(AMitemsNext(&reverse, 1)); + /* Reverse iterator reverse. */ + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "one", str.count); + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_int_equal(str.count, 5); + assert_memory_equal(str.src, "three", str.count); + assert_true(AMitemToStr(AMitemsPrev(&reverse, 1), &str)); + assert_int_equal(str.count, 3); + assert_memory_equal(str.src, "two", str.count); + assert_null(AMitemsPrev(&reverse, 1)); +} + +static void test_AMputActor_bytes(void** state) { + TestState* test_state = *state; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromBytes(test_state->actor_id_bytes, test_state->actor_id_size), cmocka_cb, + AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->doc_state->doc, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMgetActorId(test_state->doc_state->doc), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); AMbyteSpan const bytes = AMactorIdBytes(actor_id); assert_int_equal(bytes.count, test_state->actor_id_size); assert_memory_equal(bytes.src, test_state->actor_id_bytes, bytes.count); } -static void test_AMputActor_str(void **state) { +static void test_AMputActor_str(void** state) { TestState* test_state = *state; - AMactorId const* actor_id = AMpush(&test_state->group_state->stack, - AMactorIdInitStr(test_state->actor_id_str), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(test_state->group_state->doc, actor_id)); - actor_id = AMpush(&test_state->group_state->stack, - AMgetActorId(test_state->group_state->doc), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(test_state->actor_id_str), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->doc_state->doc, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMgetActorId(test_state->doc_state->doc), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); AMbyteSpan const str = AMactorIdStr(actor_id); assert_int_equal(str.count, test_state->actor_id_str.count); assert_memory_equal(str.src, test_state->actor_id_str.src, str.count); } -static void test_AMspliceText() { - AMresultStack* stack = NULL; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMobjId const* const text = AMpush(&stack, - AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMspliceText(doc, text, 0, 0, AMstr("one + "))); - AMfree(AMspliceText(doc, text, 4, 2, AMstr("two = "))); - AMfree(AMspliceText(doc, text, 8, 2, AMstr("three"))); - AMbyteSpan const str = AMpush(&stack, - AMtext(doc, text, NULL), - AM_VALUE_STR, - cmocka_cb).str; - static char const* const STR_VALUE = "one two three"; - assert_int_equal(str.count, strlen(STR_VALUE)); - assert_memory_equal(str.src, STR_VALUE, str.count); - AMfreeStack(&stack); +static void test_AMspliceText(void** state) { + TestState* test_state = *state; + AMstack** stack_ptr = &test_state->doc_state->base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMobjId const* const text = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMspliceText(doc, text, 0, 0, AMstr("one + ")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMspliceText(doc, text, 4, 2, AMstr("two = ")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMspliceText(doc, text, 8, 2, AMstr("three")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMbyteSpan str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMtext(doc, text, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); + assert_int_equal(str.count, strlen("one two three")); + assert_memory_equal(str.src, "one two three", str.count); } int run_doc_tests(void) { const struct CMUnitTest tests[] = { - cmocka_unit_test(test_AMkeys_empty), - cmocka_unit_test(test_AMkeys_list), - cmocka_unit_test(test_AMkeys_map), + cmocka_unit_test_setup_teardown(test_AMkeys_empty, setup, teardown), + cmocka_unit_test_setup_teardown(test_AMkeys_list, setup, teardown), + cmocka_unit_test_setup_teardown(test_AMkeys_map, setup, teardown), cmocka_unit_test_setup_teardown(test_AMputActor_bytes, setup, teardown), cmocka_unit_test_setup_teardown(test_AMputActor_str, setup, teardown), - cmocka_unit_test(test_AMspliceText), + cmocka_unit_test_setup_teardown(test_AMspliceText, setup, teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); diff --git a/rust/automerge-c/test/enum_string_tests.c b/rust/automerge-c/test/enum_string_tests.c new file mode 100644 index 00000000..11131e43 --- /dev/null +++ b/rust/automerge-c/test/enum_string_tests.c @@ -0,0 +1,148 @@ +#include +#include +#include +#include +#include + +/* third-party */ +#include + +/* local */ +#include +#include + +#define assert_to_string(function, tag) assert_string_equal(function(tag), #tag) + +#define assert_from_string(function, type, tag) \ + do { \ + type out; \ + assert_true(function(&out, #tag)); \ + assert_int_equal(out, tag); \ + } while (0) + +static void test_AMidxTypeToString(void** state) { + assert_to_string(AMidxTypeToString, AM_IDX_TYPE_DEFAULT); + assert_to_string(AMidxTypeToString, AM_IDX_TYPE_KEY); + assert_to_string(AMidxTypeToString, AM_IDX_TYPE_POS); + /* Zero tag */ + assert_string_equal(AMidxTypeToString(0), "AM_IDX_TYPE_DEFAULT"); + /* Invalid tag */ + assert_string_equal(AMidxTypeToString(-1), "???"); +} + +static void test_AMidxTypeFromString(void** state) { + assert_from_string(AMidxTypeFromString, AMidxType, AM_IDX_TYPE_DEFAULT); + assert_from_string(AMidxTypeFromString, AMidxType, AM_IDX_TYPE_KEY); + assert_from_string(AMidxTypeFromString, AMidxType, AM_IDX_TYPE_POS); + /* Invalid tag */ + AMidxType out = -1; + assert_false(AMidxTypeFromString(&out, "???")); + assert_int_equal(out, (AMidxType)-1); +} + +static void test_AMobjTypeToString(void** state) { + assert_to_string(AMobjTypeToString, AM_OBJ_TYPE_DEFAULT); + assert_to_string(AMobjTypeToString, AM_OBJ_TYPE_LIST); + assert_to_string(AMobjTypeToString, AM_OBJ_TYPE_MAP); + assert_to_string(AMobjTypeToString, AM_OBJ_TYPE_TEXT); + /* Zero tag */ + assert_string_equal(AMobjTypeToString(0), "AM_OBJ_TYPE_DEFAULT"); + /* Invalid tag */ + assert_string_equal(AMobjTypeToString(-1), "???"); +} + +static void test_AMobjTypeFromString(void** state) { + assert_from_string(AMobjTypeFromString, AMobjType, AM_OBJ_TYPE_DEFAULT); + assert_from_string(AMobjTypeFromString, AMobjType, AM_OBJ_TYPE_LIST); + assert_from_string(AMobjTypeFromString, AMobjType, AM_OBJ_TYPE_MAP); + assert_from_string(AMobjTypeFromString, AMobjType, AM_OBJ_TYPE_TEXT); + /* Invalid tag */ + AMobjType out = -1; + assert_false(AMobjTypeFromString(&out, "???")); + assert_int_equal(out, (AMobjType)-1); +} + +static void test_AMstatusToString(void** state) { + assert_to_string(AMstatusToString, AM_STATUS_ERROR); + assert_to_string(AMstatusToString, AM_STATUS_INVALID_RESULT); + assert_to_string(AMstatusToString, AM_STATUS_OK); + /* Zero tag */ + assert_string_equal(AMstatusToString(0), "AM_STATUS_OK"); + /* Invalid tag */ + assert_string_equal(AMstatusToString(-1), "???"); +} + +static void test_AMstatusFromString(void** state) { + assert_from_string(AMstatusFromString, AMstatus, AM_STATUS_ERROR); + assert_from_string(AMstatusFromString, AMstatus, AM_STATUS_INVALID_RESULT); + assert_from_string(AMstatusFromString, AMstatus, AM_STATUS_OK); + /* Invalid tag */ + AMstatus out = -1; + assert_false(AMstatusFromString(&out, "???")); + assert_int_equal(out, (AMstatus)-1); +} + +static void test_AMvalTypeToString(void** state) { + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_ACTOR_ID); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_BOOL); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_BYTES); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_CHANGE); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_CHANGE_HASH); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_COUNTER); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_DEFAULT); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_DOC); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_F64); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_INT); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_NULL); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_OBJ_TYPE); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_STR); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_SYNC_HAVE); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_SYNC_MESSAGE); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_SYNC_STATE); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_TIMESTAMP); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_UINT); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_UNKNOWN); + assert_to_string(AMvalTypeToString, AM_VAL_TYPE_VOID); + /* Zero tag */ + assert_string_equal(AMvalTypeToString(0), "AM_VAL_TYPE_DEFAULT"); + /* Invalid tag */ + assert_string_equal(AMvalTypeToString(-1), "???"); +} + +static void test_AMvalTypeFromString(void** state) { + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_ACTOR_ID); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_BOOL); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_BYTES); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_CHANGE); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_CHANGE_HASH); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_COUNTER); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_DEFAULT); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_DOC); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_F64); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_INT); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_NULL); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_OBJ_TYPE); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_STR); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_SYNC_HAVE); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_SYNC_MESSAGE); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_SYNC_STATE); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_TIMESTAMP); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_UINT); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_UNKNOWN); + assert_from_string(AMvalTypeFromString, AMvalType, AM_VAL_TYPE_VOID); + /* Invalid tag */ + AMvalType out = -1; + assert_false(AMvalTypeFromString(&out, "???")); + assert_int_equal(out, (AMvalType)-1); +} + +int run_enum_string_tests(void) { + const struct CMUnitTest tests[] = { + cmocka_unit_test(test_AMidxTypeToString), cmocka_unit_test(test_AMidxTypeFromString), + cmocka_unit_test(test_AMobjTypeToString), cmocka_unit_test(test_AMobjTypeFromString), + cmocka_unit_test(test_AMstatusToString), cmocka_unit_test(test_AMstatusFromString), + cmocka_unit_test(test_AMvalTypeToString), cmocka_unit_test(test_AMvalTypeFromString), + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/rust/automerge-c/test/group_state.c b/rust/automerge-c/test/group_state.c deleted file mode 100644 index 0ee14317..00000000 --- a/rust/automerge-c/test/group_state.c +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include -#include - -/* third-party */ -#include - -/* local */ -#include "group_state.h" -#include "stack_utils.h" - -int group_setup(void** state) { - GroupState* group_state = test_calloc(1, sizeof(GroupState)); - group_state->doc = AMpush(&group_state->stack, - AMcreate(NULL), - AM_VALUE_DOC, - cmocka_cb).doc; - *state = group_state; - return 0; -} - -int group_teardown(void** state) { - GroupState* group_state = *state; - AMfreeStack(&group_state->stack); - test_free(group_state); - return 0; -} diff --git a/rust/automerge-c/test/group_state.h b/rust/automerge-c/test/group_state.h deleted file mode 100644 index a71d9dc9..00000000 --- a/rust/automerge-c/test/group_state.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef GROUP_STATE_H -#define GROUP_STATE_H - -/* local */ -#include - -typedef struct { - AMresultStack* stack; - AMdoc* doc; -} GroupState; - -int group_setup(void** state); - -int group_teardown(void** state); - -#endif /* GROUP_STATE_H */ diff --git a/rust/automerge-c/test/item_tests.c b/rust/automerge-c/test/item_tests.c new file mode 100644 index 00000000..a30b0556 --- /dev/null +++ b/rust/automerge-c/test/item_tests.c @@ -0,0 +1,94 @@ +#include +#include +#include +#include +#include + +/* third-party */ +#include + +/* local */ +#include +#include +#include "cmocka_utils.h" +#include "doc_state.h" + +static void test_AMitemResult(void** state) { + enum { ITEM_COUNT = 1000 }; + + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + /* Append the strings to a list so that they'll be in numerical order. */ + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + for (size_t pos = 0; pos != ITEM_COUNT; ++pos) { + size_t const count = snprintf(NULL, 0, "%zu", pos); + char* const src = test_calloc(count + 1, sizeof(char)); + assert_int_equal(sprintf(src, "%zu", pos), count); + AMstackItem(NULL, AMlistPutStr(doc_state->doc, list, pos, true, AMbytes(src, count)), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + test_free(src); + } + /* Get an item iterator. */ + AMitems items = AMstackItems(stack_ptr, AMlistRange(doc_state->doc, list, 0, SIZE_MAX, NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + /* Get the item iterator's result so that it can be freed later. */ + AMresult const* const items_result = (*stack_ptr)->result; + /* Iterate over all of the items and copy their pointers into an array. */ + AMitem* item_ptrs[ITEM_COUNT] = {NULL}; + AMitem* item = NULL; + for (size_t pos = 0; (item = AMitemsNext(&items, 1)) != NULL; ++pos) { + /* The item's reference count should be 1. */ + assert_int_equal(AMitemRefCount(item), 1); + if (pos & 1) { + /* Create a redundant result for an odd item. */ + AMitem* const new_item = AMstackItem(stack_ptr, AMitemResult(item), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + /* The item's old and new pointers will never match. */ + assert_ptr_not_equal(new_item, item); + /* The item's reference count will have been incremented. */ + assert_int_equal(AMitemRefCount(item), 2); + assert_int_equal(AMitemRefCount(new_item), 2); + /* The item's old and new indices should match. */ + assert_int_equal(AMitemIdxType(item), AMitemIdxType(new_item)); + assert_int_equal(AMitemIdxType(item), AM_IDX_TYPE_POS); + size_t pos, new_pos; + assert_true(AMitemPos(item, &pos)); + assert_true(AMitemPos(new_item, &new_pos)); + assert_int_equal(pos, new_pos); + /* The item's old and new object IDs should match. */ + AMobjId const* const obj_id = AMitemObjId(item); + AMobjId const* const new_obj_id = AMitemObjId(new_item); + assert_true(AMobjIdEqual(obj_id, new_obj_id)); + /* The item's old and new value types should match. */ + assert_int_equal(AMitemValType(item), AMitemValType(new_item)); + /* The item's old and new string values should match. */ + AMbyteSpan str; + assert_true(AMitemToStr(item, &str)); + AMbyteSpan new_str; + assert_true(AMitemToStr(new_item, &new_str)); + assert_int_equal(str.count, new_str.count); + assert_memory_equal(str.src, new_str.src, new_str.count); + /* The item's old and new object IDs are one and the same. */ + assert_ptr_equal(obj_id, new_obj_id); + /* The item's old and new string values are one and the same. */ + assert_ptr_equal(str.src, new_str.src); + /* Save the item's new pointer. */ + item_ptrs[pos] = new_item; + } + } + /* Free the item iterator's result. */ + AMresultFree(AMstackPop(stack_ptr, items_result)); + /* An odd item's reference count should be 1 again. */ + for (size_t pos = 1; pos < ITEM_COUNT; pos += 2) { + assert_int_equal(AMitemRefCount(item_ptrs[pos]), 1); + } +} + +int run_item_tests(void) { + const struct CMUnitTest tests[] = { + cmocka_unit_test(test_AMitemResult), + }; + + return cmocka_run_group_tests(tests, setup_doc, teardown_doc); +} diff --git a/rust/automerge-c/test/list_tests.c b/rust/automerge-c/test/list_tests.c index f9bbb340..723dd038 100644 --- a/rust/automerge-c/test/list_tests.c +++ b/rust/automerge-c/test/list_tests.c @@ -11,367 +11,417 @@ /* local */ #include +#include +#include "base_state.h" #include "cmocka_utils.h" -#include "group_state.h" +#include "doc_state.h" #include "macro_utils.h" -#include "stack_utils.h" static void test_AMlistIncrement(void** state) { - GroupState* group_state = *state; - AMobjId const* const list = AMpush( - &group_state->stack, - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMlistPutCounter(group_state->doc, list, 0, true, 0)); - assert_int_equal(AMpush(&group_state->stack, - AMlistGet(group_state->doc, list, 0, NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 0); - AMfree(AMpop(&group_state->stack)); - AMfree(AMlistIncrement(group_state->doc, list, 0, 3)); - assert_int_equal(AMpush(&group_state->stack, - AMlistGet(group_state->doc, list, 0, NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 3); - AMfree(AMpop(&group_state->stack)); + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMlistPutCounter(doc_state->doc, list, 0, true, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + int64_t counter; + assert_true(AMitemToCounter( + AMstackItem(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 0); + AMresultFree(AMstackPop(stack_ptr, NULL)); + AMstackItem(NULL, AMlistIncrement(doc_state->doc, list, 0, 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToCounter( + AMstackItem(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 3); + AMresultFree(AMstackPop(stack_ptr, NULL)); } -#define test_AMlistPut(suffix, mode) test_AMlistPut ## suffix ## _ ## mode +#define test_AMlistPut(suffix, mode) test_AMlistPut##suffix##_##mode -#define static_void_test_AMlistPut(suffix, mode, member, scalar_value) \ -static void test_AMlistPut ## suffix ## _ ## mode(void **state) { \ - GroupState* group_state = *state; \ - AMobjId const* const list = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST),\ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - AMfree(AMlistPut ## suffix(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"), \ - scalar_value)); \ - assert_true(AMpush( \ - &group_state->stack, \ - AMlistGet(group_state->doc, list, 0, NULL), \ - AMvalue_discriminant(#suffix), \ - cmocka_cb).member == scalar_value); \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMlistPut(suffix, mode, type, scalar_value) \ + static void test_AMlistPut##suffix##_##mode(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjId const* const list = AMitemObjId( \ + AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + AMstackItem(NULL, AMlistPut##suffix(doc_state->doc, list, 0, !strcmp(#mode, "insert"), scalar_value), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); \ + type value; \ + assert_true(AMitemTo##suffix(AMstackItem(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), cmocka_cb, \ + AMexpect(suffix_to_val_type(#suffix))), \ + &value)); \ + assert_true(value == scalar_value); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -#define test_AMlistPutBytes(mode) test_AMlistPutBytes ## _ ## mode +#define test_AMlistPutBytes(mode) test_AMlistPutBytes##_##mode -#define static_void_test_AMlistPutBytes(mode, bytes_value) \ -static void test_AMlistPutBytes_ ## mode(void **state) { \ - static size_t const BYTES_SIZE = sizeof(bytes_value) / sizeof(uint8_t); \ - \ - GroupState* group_state = *state; \ - AMobjId const* const list = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST),\ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - AMfree(AMlistPutBytes(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"), \ - AMbytes(bytes_value, BYTES_SIZE))); \ - AMbyteSpan const bytes = AMpush( \ - &group_state->stack, \ - AMlistGet(group_state->doc, list, 0, NULL), \ - AM_VALUE_BYTES, \ - cmocka_cb).bytes; \ - assert_int_equal(bytes.count, BYTES_SIZE); \ - assert_memory_equal(bytes.src, bytes_value, BYTES_SIZE); \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMlistPutBytes(mode, bytes_value) \ + static void test_AMlistPutBytes_##mode(void** state) { \ + static size_t const BYTES_SIZE = sizeof(bytes_value) / sizeof(uint8_t); \ + \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjId const* const list = AMitemObjId( \ + AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + AMstackItem( \ + NULL, AMlistPutBytes(doc_state->doc, list, 0, !strcmp(#mode, "insert"), AMbytes(bytes_value, BYTES_SIZE)), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); \ + AMbyteSpan bytes; \ + assert_true(AMitemToBytes( \ + AMstackItem(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), \ + &bytes)); \ + assert_int_equal(bytes.count, BYTES_SIZE); \ + assert_memory_equal(bytes.src, bytes_value, BYTES_SIZE); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -#define test_AMlistPutNull(mode) test_AMlistPutNull_ ## mode +#define test_AMlistPutNull(mode) test_AMlistPutNull_##mode -#define static_void_test_AMlistPutNull(mode) \ -static void test_AMlistPutNull_ ## mode(void **state) { \ - GroupState* group_state = *state; \ - AMobjId const* const list = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST),\ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - AMfree(AMlistPutNull(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"))); \ - AMresult* const result = AMlistGet(group_state->doc, list, 0, NULL); \ - if (AMresultStatus(result) != AM_STATUS_OK) { \ - fail_msg_view("%s", AMerrorMessage(result)); \ - } \ - assert_int_equal(AMresultSize(result), 1); \ - assert_int_equal(AMresultValue(result).tag, AM_VALUE_NULL); \ - AMfree(result); \ -} +#define static_void_test_AMlistPutNull(mode) \ + static void test_AMlistPutNull_##mode(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjId const* const list = AMitemObjId( \ + AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + AMstackItem(NULL, AMlistPutNull(doc_state->doc, list, 0, !strcmp(#mode, "insert")), cmocka_cb, \ + AMexpect(AM_VAL_TYPE_VOID)); \ + AMresult* result = AMstackResult(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), NULL, NULL); \ + if (AMresultStatus(result) != AM_STATUS_OK) { \ + fail_msg_view("%s", AMresultError(result)); \ + } \ + assert_int_equal(AMresultSize(result), 1); \ + assert_int_equal(AMitemValType(AMresultItem(result)), AM_VAL_TYPE_NULL); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -#define test_AMlistPutObject(label, mode) test_AMlistPutObject_ ## label ## _ ## mode +#define test_AMlistPutObject(label, mode) test_AMlistPutObject_##label##_##mode -#define static_void_test_AMlistPutObject(label, mode) \ -static void test_AMlistPutObject_ ## label ## _ ## mode(void **state) { \ - GroupState* group_state = *state; \ - AMobjId const* const list = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST),\ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - AMobjType const obj_type = AMobjType_tag(#label); \ - if (obj_type != AM_OBJ_TYPE_VOID) { \ - AMobjId const* const obj_id = AMpush( \ - &group_state->stack, \ - AMlistPutObject(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"), \ - obj_type), \ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - assert_non_null(obj_id); \ - assert_int_equal(AMobjObjType(group_state->doc, obj_id), obj_type); \ - assert_int_equal(AMobjSize(group_state->doc, obj_id, NULL), 0); \ - } \ - else { \ - AMpush(&group_state->stack, \ - AMlistPutObject(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"), \ - obj_type), \ - AM_VALUE_VOID, \ - NULL); \ - assert_int_not_equal(AMresultStatus(group_state->stack->result), \ - AM_STATUS_OK); \ - } \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMlistPutObject(label, mode) \ + static void test_AMlistPutObject_##label##_##mode(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjId const* const list = AMitemObjId( \ + AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + AMobjType const obj_type = suffix_to_obj_type(#label); \ + AMobjId const* const obj_id = AMitemObjId( \ + AMstackItem(stack_ptr, AMlistPutObject(doc_state->doc, list, 0, !strcmp(#mode, "insert"), obj_type), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + assert_non_null(obj_id); \ + assert_int_equal(AMobjObjType(doc_state->doc, obj_id), obj_type); \ + assert_int_equal(AMobjSize(doc_state->doc, obj_id, NULL), 0); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -#define test_AMlistPutStr(mode) test_AMlistPutStr ## _ ## mode +#define test_AMlistPutStr(mode) test_AMlistPutStr##_##mode -#define static_void_test_AMlistPutStr(mode, str_value) \ -static void test_AMlistPutStr_ ## mode(void **state) { \ - GroupState* group_state = *state; \ - AMobjId const* const list = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST),\ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - AMfree(AMlistPutStr(group_state->doc, \ - list, \ - 0, \ - !strcmp(#mode, "insert"), \ - AMstr(str_value))); \ - AMbyteSpan const str = AMpush( \ - &group_state->stack, \ - AMlistGet(group_state->doc, list, 0, NULL), \ - AM_VALUE_STR, \ - cmocka_cb).str; \ - assert_int_equal(str.count, strlen(str_value)); \ - assert_memory_equal(str.src, str_value, str.count); \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMlistPutStr(mode, str_value) \ + static void test_AMlistPutStr_##mode(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjId const* const list = AMitemObjId( \ + AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + AMstackItem(NULL, AMlistPutStr(doc_state->doc, list, 0, !strcmp(#mode, "insert"), AMstr(str_value)), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); \ + AMbyteSpan str; \ + assert_true(AMitemToStr( \ + AMstackItem(stack_ptr, AMlistGet(doc_state->doc, list, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), \ + &str)); \ + assert_int_equal(str.count, strlen(str_value)); \ + assert_memory_equal(str.src, str_value, str.count); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -static_void_test_AMlistPut(Bool, insert, boolean, true) +static_void_test_AMlistPut(Bool, insert, bool, true); -static_void_test_AMlistPut(Bool, update, boolean, true) +static_void_test_AMlistPut(Bool, update, bool, true); static uint8_t const BYTES_VALUE[] = {INT8_MIN, INT8_MAX / 2, INT8_MAX}; -static_void_test_AMlistPutBytes(insert, BYTES_VALUE) +static_void_test_AMlistPutBytes(insert, BYTES_VALUE); -static_void_test_AMlistPutBytes(update, BYTES_VALUE) +static_void_test_AMlistPutBytes(update, BYTES_VALUE); -static_void_test_AMlistPut(Counter, insert, counter, INT64_MAX) +static_void_test_AMlistPut(Counter, insert, int64_t, INT64_MAX); -static_void_test_AMlistPut(Counter, update, counter, INT64_MAX) +static_void_test_AMlistPut(Counter, update, int64_t, INT64_MAX); -static_void_test_AMlistPut(F64, insert, f64, DBL_MAX) +static_void_test_AMlistPut(F64, insert, double, DBL_MAX); -static_void_test_AMlistPut(F64, update, f64, DBL_MAX) +static_void_test_AMlistPut(F64, update, double, DBL_MAX); -static_void_test_AMlistPut(Int, insert, int_, INT64_MAX) +static_void_test_AMlistPut(Int, insert, int64_t, INT64_MAX); -static_void_test_AMlistPut(Int, update, int_, INT64_MAX) +static_void_test_AMlistPut(Int, update, int64_t, INT64_MAX); -static_void_test_AMlistPutNull(insert) +static_void_test_AMlistPutNull(insert); -static_void_test_AMlistPutNull(update) +static_void_test_AMlistPutNull(update); -static_void_test_AMlistPutObject(List, insert) +static_void_test_AMlistPutObject(List, insert); -static_void_test_AMlistPutObject(List, update) +static_void_test_AMlistPutObject(List, update); -static_void_test_AMlistPutObject(Map, insert) +static_void_test_AMlistPutObject(Map, insert); -static_void_test_AMlistPutObject(Map, update) +static_void_test_AMlistPutObject(Map, update); -static_void_test_AMlistPutObject(Text, insert) +static_void_test_AMlistPutObject(Text, insert); -static_void_test_AMlistPutObject(Text, update) +static_void_test_AMlistPutObject(Text, update); -static_void_test_AMlistPutObject(Void, insert) +static_void_test_AMlistPutStr(insert, + "Hello, " + "world!"); -static_void_test_AMlistPutObject(Void, update) +static_void_test_AMlistPutStr(update, + "Hello," + " world" + "!"); -static_void_test_AMlistPutStr(insert, "Hello, world!") +static_void_test_AMlistPut(Timestamp, insert, int64_t, INT64_MAX); -static_void_test_AMlistPutStr(update, "Hello, world!") +static_void_test_AMlistPut(Timestamp, update, int64_t, INT64_MAX); -static_void_test_AMlistPut(Timestamp, insert, timestamp, INT64_MAX) +static_void_test_AMlistPut(Uint, insert, uint64_t, UINT64_MAX); -static_void_test_AMlistPut(Timestamp, update, timestamp, INT64_MAX) +static_void_test_AMlistPut(Uint, update, uint64_t, UINT64_MAX); -static_void_test_AMlistPut(Uint, insert, uint, UINT64_MAX) - -static_void_test_AMlistPut(Uint, update, uint, UINT64_MAX) - -static void test_get_list_values(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMobjId const* const list = AMpush( - &stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; +static void test_get_range_values(void** state) { + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc1, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* Insert elements. */ - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("First"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Second"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Third"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Fourth"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Fifth"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Sixth"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Seventh"))); - AMfree(AMlistPutStr(doc1, list, 0, true, AMstr("Eighth"))); - AMfree(AMcommit(doc1, AMstr(NULL), NULL)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("First")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Second")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Third")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Fourth")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Fifth")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Sixth")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Seventh")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 0, true, AMstr("Eighth")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMchangeHashes const v1 = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMdoc* const doc2 = AMpush(&stack, - AMfork(doc1, NULL), - AM_VALUE_DOC, - cmocka_cb).doc; + AMitems const v1 = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMfork(doc1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); - AMfree(AMlistPutStr(doc1, list, 2, false, AMstr("Third V2"))); - AMfree(AMcommit(doc1, AMstr(NULL), NULL)); + AMstackItem(NULL, AMlistPutStr(doc1, list, 2, false, AMstr("Third V2")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMlistPutStr(doc2, list, 2, false, AMstr("Third V3"))); - AMfree(AMcommit(doc2, AMstr(NULL), NULL)); + AMstackItem(NULL, AMlistPutStr(doc2, list, 2, false, AMstr("Third V3")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc2, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMmerge(doc1, doc2)); + AMstackItem(NULL, AMmerge(doc1, doc2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMlistItems range = AMpush(&stack, - AMlistRange(doc1, list, 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - assert_int_equal(AMlistItemsSize(&range), 8); + /* Forward vs. reverse: complete current list range. */ + AMitems range = + AMstackItems(stack_ptr, AMlistRange(doc1, list, 0, SIZE_MAX, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + size_t size = AMitemsSize(&range); + assert_int_equal(size, 8); + AMitems range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + size_t pos; + assert_true(AMitemPos(AMitemsNext(&range, 1), &pos)); + assert_int_equal(pos, 0); + assert_true(AMitemPos(AMitemsNext(&range_back, 1), &pos)); + assert_int_equal(pos, 7); - AMlistItem const* list_item = NULL; - while ((list_item = AMlistItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMresult* result = AMlistGet(doc1, list, AMlistItemIndex(list_item), NULL); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMlistItemObjId(list_item)); - AMfree(result); + AMitem *item1, *item_back1; + size_t count, middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + size_t pos1, pos_back1; + assert_true(AMitemPos(item1, &pos1)); + assert_true(AMitemPos(item_back1, &pos_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(pos1, pos_back1); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(pos1, pos_back1); + } + AMitem* item2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos1, NULL), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos_back1, NULL), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMlistRange(doc1, list, 3, 6, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMlistItems range_back = AMlistItemsReversed(&range); - assert_int_equal(AMlistItemsSize(&range), 3); - assert_int_equal(AMlistItemIndex(AMlistItemsNext(&range, 1)), 3); - assert_int_equal(AMlistItemIndex(AMlistItemsNext(&range_back, 1)), 5); + /* Forward vs. reverse: partial current list range. */ + range = AMstackItems(stack_ptr, AMlistRange(doc1, list, 1, 6, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 5); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemPos(AMitemsNext(&range, 1), &pos)); + assert_int_equal(pos, 1); + assert_true(AMitemPos(AMitemsNext(&range_back, 1), &pos)); + assert_int_equal(pos, 5); - range = AMlistItemsRewound(&range); - while ((list_item = AMlistItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMresult* result = AMlistGet(doc1, list, AMlistItemIndex(list_item), NULL); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMlistItemObjId(list_item)); - AMfree(result); + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + size_t pos1, pos_back1; + assert_true(AMitemPos(item1, &pos1)); + assert_true(AMitemPos(item_back1, &pos_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(pos1, pos_back1); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(pos1, pos_back1); + } + AMitem* item2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos1, NULL), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos_back1, NULL), NULL, NULL); + /** \note An item returned from an `AMlistGet()` call doesn't include + the index used to retrieve it. */ + assert_int_equal(AMitemIdxType(item2), 0); + assert_int_equal(AMitemIdxType(item_back2), 0); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMlistRange(doc1, list, 0, SIZE_MAX, &v1), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - assert_int_equal(AMlistItemsSize(&range), 8); - while ((list_item = AMlistItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMresult* result = AMlistGet(doc1, list, AMlistItemIndex(list_item), &v1); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMlistItemObjId(list_item)); - AMfree(result); + /* Forward vs. reverse: complete historical map range. */ + range = AMstackItems(stack_ptr, AMlistRange(doc1, list, 0, SIZE_MAX, &v1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 8); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemPos(AMitemsNext(&range, 1), &pos)); + assert_int_equal(pos, 0); + assert_true(AMitemPos(AMitemsNext(&range_back, 1), &pos)); + assert_int_equal(pos, 7); + + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + size_t pos1, pos_back1; + assert_true(AMitemPos(item1, &pos1)); + assert_true(AMitemPos(item_back1, &pos_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(pos1, pos_back1); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(pos1, pos_back1); + } + AMitem* item2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos1, &v1), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos_back1, &v1), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMlistRange(doc1, list, 3, 6, &v1), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - range_back = AMlistItemsReversed(&range); - assert_int_equal(AMlistItemsSize(&range), 3); - assert_int_equal(AMlistItemIndex(AMlistItemsNext(&range, 1)), 3); - assert_int_equal(AMlistItemIndex(AMlistItemsNext(&range_back, 1)), 5); + /* Forward vs. reverse: partial historical map range. */ + range = AMstackItems(stack_ptr, AMlistRange(doc1, list, 2, 7, &v1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 5); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemPos(AMitemsNext(&range, 1), &pos)); + assert_int_equal(pos, 2); + assert_true(AMitemPos(AMitemsNext(&range_back, 1), &pos)); + assert_int_equal(pos, 6); - range = AMlistItemsRewound(&range); - while ((list_item = AMlistItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMresult* result = AMlistGet(doc1, list, AMlistItemIndex(list_item), &v1); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMlistItemObjId(list_item)); - AMfree(result); + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + size_t pos1, pos_back1; + assert_true(AMitemPos(item1, &pos1)); + assert_true(AMitemPos(item_back1, &pos_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(pos1, pos_back1); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(pos1, pos_back1); + } + AMitem* item2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos1, &v1), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMlistGet(doc1, list, pos_back1, &v1), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMlistRange(doc1, list, 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMobjItems values = AMpush(&stack, - AMobjValues(doc1, list, NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; - assert_int_equal(AMlistItemsSize(&range), AMobjItemsSize(&values)); - AMobjItem const* value = NULL; - while ((list_item = AMlistItemsNext(&range, 1)) != NULL && - (value = AMobjItemsNext(&values, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMvalue const val2 = AMobjItemValue(value); - assert_true(AMvalueEqual(&val1, &val2)); - assert_true(AMobjIdEqual(AMlistItemObjId(list_item), AMobjItemObjId(value))); + /* List range vs. object range: complete current. */ + range = AMstackItems(stack_ptr, AMlistRange(doc1, list, 0, SIZE_MAX, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMitems obj_items = AMstackItems(stack_ptr, AMobjItems(doc1, list, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&range), AMitemsSize(&obj_items)); + + AMitem *item, *obj_item; + for (item = NULL, obj_item = NULL; item && obj_item; + item = AMitemsNext(&range, 1), obj_item = AMitemsNext(&obj_items, 1)) { + /** \note Object iteration doesn't yield any item indices. */ + assert_true(AMitemIdxType(item)); + assert_false(AMitemIdxType(obj_item)); + assert_true(AMitemEqual(item, obj_item)); + assert_true(AMobjIdEqual(AMitemObjId(item), AMitemObjId(obj_item))); } - range = AMpush(&stack, - AMlistRange(doc1, list, 0, SIZE_MAX, &v1), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - values = AMpush(&stack, - AMobjValues(doc1, list, &v1), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; - assert_int_equal(AMlistItemsSize(&range), AMobjItemsSize(&values)); - while ((list_item = AMlistItemsNext(&range, 1)) != NULL && - (value = AMobjItemsNext(&values, 1)) != NULL) { - AMvalue const val1 = AMlistItemValue(list_item); - AMvalue const val2 = AMobjItemValue(value); - assert_true(AMvalueEqual(&val1, &val2)); - assert_true(AMobjIdEqual(AMlistItemObjId(list_item), AMobjItemObjId(value))); + /* List range vs. object range: complete historical. */ + range = AMstackItems(stack_ptr, AMlistRange(doc1, list, 0, SIZE_MAX, &v1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + obj_items = AMstackItems(stack_ptr, AMobjItems(doc1, list, &v1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&range), AMitemsSize(&obj_items)); + + for (item = NULL, obj_item = NULL; item && obj_item; + item = AMitemsNext(&range, 1), obj_item = AMitemsNext(&obj_items, 1)) { + /** \note Object iteration doesn't yield any item indices. */ + assert_true(AMitemIdxType(item)); + assert_false(AMitemIdxType(obj_item)); + assert_true(AMitemEqual(item, obj_item)); + assert_true(AMobjIdEqual(AMitemObjId(item), AMitemObjId(obj_item))); } } -/** \brief A JavaScript application can introduce NUL (`\0`) characters into a - * list object's string value which will truncate it in a C application. +/** + * \brief A JavaScript application can introduce NUL (`\0`) characters into a + * list object's string value which will truncate it in a C application. */ static void test_get_NUL_string_value(void** state) { /* @@ -381,60 +431,52 @@ static void test_get_NUL_string_value(void** state) { doc[0] = 'o\0ps'; }); const bytes = Automerge.save(doc); - console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], bytes).join(", ") + "};"); + console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], + bytes).join(", ") + "};"); */ static uint8_t const OOPS_VALUE[] = {'o', '\0', 'p', 's'}; static size_t const OOPS_SIZE = sizeof(OOPS_VALUE) / sizeof(uint8_t); static uint8_t const SAVED_DOC[] = { - 133, 111, 74, 131, 224, 28, 197, 17, 0, 113, 1, 16, 246, 137, 63, 193, - 255, 181, 76, 79, 129, 213, 133, 29, 214, 158, 164, 15, 1, 207, 184, - 14, 57, 1, 194, 79, 247, 82, 160, 134, 227, 144, 5, 241, 136, 205, - 238, 250, 251, 54, 34, 250, 210, 96, 204, 132, 153, 203, 110, 109, 6, - 6, 1, 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 3, 33, 2, 35, 2, 52, - 1, 66, 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, - 127, 0, 127, 7, 127, 1, 48, 127, 0, 127, 1, 1, 127, 1, 127, 70, 111, - 0, 112, 115, 127, 0, 0}; + 133, 111, 74, 131, 224, 28, 197, 17, 0, 113, 1, 16, 246, 137, 63, 193, 255, 181, 76, 79, 129, + 213, 133, 29, 214, 158, 164, 15, 1, 207, 184, 14, 57, 1, 194, 79, 247, 82, 160, 134, 227, 144, + 5, 241, 136, 205, 238, 250, 251, 54, 34, 250, 210, 96, 204, 132, 153, 203, 110, 109, 6, 6, 1, + 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 3, 33, 2, 35, 2, 52, 1, 66, + 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, 127, 0, 127, 7, 127, + 1, 48, 127, 0, 127, 1, 1, 127, 1, 127, 70, 111, 0, 112, 115, 127, 0, 0}; static size_t const SAVED_DOC_SIZE = sizeof(SAVED_DOC) / sizeof(uint8_t); - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, - AMload(SAVED_DOC, SAVED_DOC_SIZE), - AM_VALUE_DOC, - cmocka_cb).doc; - AMbyteSpan const str = AMpush(&stack, - AMlistGet(doc, AM_ROOT, 0, NULL), - AM_VALUE_STR, - cmocka_cb).str; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(SAVED_DOC, SAVED_DOC_SIZE), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, AM_ROOT, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_not_equal(str.count, strlen(OOPS_VALUE)); assert_int_equal(str.count, OOPS_SIZE); assert_memory_equal(str.src, OOPS_VALUE, str.count); } static void test_insert_at_index(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - - AMobjId const* const list = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* Insert both at the same index. */ - AMfree(AMlistPutUint(doc, list, 0, true, 0)); - AMfree(AMlistPutUint(doc, list, 0, true, 1)); + AMstackItem(NULL, AMlistPutUint(doc, list, 0, true, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutUint(doc, list, 0, true, 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); assert_int_equal(AMobjSize(doc, list, NULL), 2); - AMstrs const keys = AMpush(&stack, - AMkeys(doc, list, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsSize(&keys), 2); - AMlistItems const range = AMpush(&stack, - AMlistRange(doc, list, 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - assert_int_equal(AMlistItemsSize(&range), 2); + AMitems const keys = AMstackItems(stack_ptr, AMkeys(doc, list, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&keys), 2); + AMitems const range = + AMstackItems(stack_ptr, AMlistRange(doc, list, 0, SIZE_MAX, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_UINT)); + assert_int_equal(AMitemsSize(&range), 2); } int run_list_tests(void) { @@ -458,18 +500,16 @@ int run_list_tests(void) { cmocka_unit_test(test_AMlistPutObject(Map, update)), cmocka_unit_test(test_AMlistPutObject(Text, insert)), cmocka_unit_test(test_AMlistPutObject(Text, update)), - cmocka_unit_test(test_AMlistPutObject(Void, insert)), - cmocka_unit_test(test_AMlistPutObject(Void, update)), cmocka_unit_test(test_AMlistPutStr(insert)), cmocka_unit_test(test_AMlistPutStr(update)), cmocka_unit_test(test_AMlistPut(Timestamp, insert)), cmocka_unit_test(test_AMlistPut(Timestamp, update)), cmocka_unit_test(test_AMlistPut(Uint, insert)), cmocka_unit_test(test_AMlistPut(Uint, update)), - cmocka_unit_test_setup_teardown(test_get_list_values, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_get_NUL_string_value, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_insert_at_index, setup_stack, teardown_stack), + cmocka_unit_test_setup_teardown(test_get_range_values, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_get_NUL_string_value, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_insert_at_index, setup_base, teardown_base), }; - return cmocka_run_group_tests(tests, group_setup, group_teardown); + return cmocka_run_group_tests(tests, setup_doc, teardown_doc); } diff --git a/rust/automerge-c/test/macro_utils.c b/rust/automerge-c/test/macro_utils.c index 6d7578b6..3a546eb5 100644 --- a/rust/automerge-c/test/macro_utils.c +++ b/rust/automerge-c/test/macro_utils.c @@ -3,23 +3,36 @@ /* local */ #include "macro_utils.h" -AMvalueVariant AMvalue_discriminant(char const* suffix) { - if (!strcmp(suffix, "Bool")) return AM_VALUE_BOOLEAN; - else if (!strcmp(suffix, "Bytes")) return AM_VALUE_BYTES; - else if (!strcmp(suffix, "Counter")) return AM_VALUE_COUNTER; - else if (!strcmp(suffix, "F64")) return AM_VALUE_F64; - else if (!strcmp(suffix, "Int")) return AM_VALUE_INT; - else if (!strcmp(suffix, "Null")) return AM_VALUE_NULL; - else if (!strcmp(suffix, "Str")) return AM_VALUE_STR; - else if (!strcmp(suffix, "Timestamp")) return AM_VALUE_TIMESTAMP; - else if (!strcmp(suffix, "Uint")) return AM_VALUE_UINT; - else return AM_VALUE_VOID; +AMobjType suffix_to_obj_type(char const* obj_type_label) { + if (!strcmp(obj_type_label, "List")) + return AM_OBJ_TYPE_LIST; + else if (!strcmp(obj_type_label, "Map")) + return AM_OBJ_TYPE_MAP; + else if (!strcmp(obj_type_label, "Text")) + return AM_OBJ_TYPE_TEXT; + else + return AM_OBJ_TYPE_DEFAULT; } -AMobjType AMobjType_tag(char const* obj_type_label) { - if (!strcmp(obj_type_label, "List")) return AM_OBJ_TYPE_LIST; - else if (!strcmp(obj_type_label, "Map")) return AM_OBJ_TYPE_MAP; - else if (!strcmp(obj_type_label, "Text")) return AM_OBJ_TYPE_TEXT; - else if (!strcmp(obj_type_label, "Void")) return AM_OBJ_TYPE_VOID; - else return 0; +AMvalType suffix_to_val_type(char const* suffix) { + if (!strcmp(suffix, "Bool")) + return AM_VAL_TYPE_BOOL; + else if (!strcmp(suffix, "Bytes")) + return AM_VAL_TYPE_BYTES; + else if (!strcmp(suffix, "Counter")) + return AM_VAL_TYPE_COUNTER; + else if (!strcmp(suffix, "F64")) + return AM_VAL_TYPE_F64; + else if (!strcmp(suffix, "Int")) + return AM_VAL_TYPE_INT; + else if (!strcmp(suffix, "Null")) + return AM_VAL_TYPE_NULL; + else if (!strcmp(suffix, "Str")) + return AM_VAL_TYPE_STR; + else if (!strcmp(suffix, "Timestamp")) + return AM_VAL_TYPE_TIMESTAMP; + else if (!strcmp(suffix, "Uint")) + return AM_VAL_TYPE_UINT; + else + return AM_VAL_TYPE_DEFAULT; } diff --git a/rust/automerge-c/test/macro_utils.h b/rust/automerge-c/test/macro_utils.h index 62e262ce..e4c2c5b9 100644 --- a/rust/automerge-c/test/macro_utils.h +++ b/rust/automerge-c/test/macro_utils.h @@ -1,24 +1,23 @@ -#ifndef MACRO_UTILS_H -#define MACRO_UTILS_H +#ifndef TESTS_MACRO_UTILS_H +#define TESTS_MACRO_UTILS_H /* local */ #include /** - * \brief Gets the result value discriminant corresponding to a function name - * suffix. + * \brief Gets the object type tag corresponding to an object type suffix. * - * \param[in] suffix A string. - * \return An `AMvalue` struct discriminant. - */ -AMvalueVariant AMvalue_discriminant(char const* suffix); - -/** - * \brief Gets the object type tag corresponding to an object type label. - * - * \param[in] obj_type_label A string. + * \param[in] suffix An object type suffix string. * \return An `AMobjType` enum tag. */ -AMobjType AMobjType_tag(char const* obj_type_label); +AMobjType suffix_to_obj_type(char const* suffix); -#endif /* MACRO_UTILS_H */ +/** + * \brief Gets the value type tag corresponding to a value type suffix. + * + * \param[in] suffix A value type suffix string. + * \return An `AMvalType` enum tag. + */ +AMvalType suffix_to_val_type(char const* suffix); + +#endif /* TESTS_MACRO_UTILS_H */ diff --git a/rust/automerge-c/test/main.c b/rust/automerge-c/test/main.c index 09b71bd5..2996c9b3 100644 --- a/rust/automerge-c/test/main.c +++ b/rust/automerge-c/test/main.c @@ -1,6 +1,6 @@ +#include #include #include -#include #include /* third-party */ @@ -8,8 +8,14 @@ extern int run_actor_id_tests(void); +extern int run_byte_span_tests(void); + extern int run_doc_tests(void); +extern int run_enum_string_tests(void); + +extern int run_item_tests(void); + extern int run_list_tests(void); extern int run_map_tests(void); @@ -17,11 +23,6 @@ extern int run_map_tests(void); extern int run_ported_wasm_suite(void); int main(void) { - return ( - run_actor_id_tests() + - run_doc_tests() + - run_list_tests() + - run_map_tests() + - run_ported_wasm_suite() - ); + return (run_actor_id_tests() + run_byte_span_tests() + run_doc_tests() + run_enum_string_tests() + + run_item_tests() + run_list_tests() + run_map_tests() + run_ported_wasm_suite()); } diff --git a/rust/automerge-c/test/map_tests.c b/rust/automerge-c/test/map_tests.c index 194da2e8..2ee2e69a 100644 --- a/rust/automerge-c/test/map_tests.c +++ b/rust/automerge-c/test/map_tests.c @@ -11,144 +11,133 @@ /* local */ #include +#include +#include +#include "base_state.h" #include "cmocka_utils.h" -#include "group_state.h" +#include "doc_state.h" #include "macro_utils.h" -#include "stack_utils.h" static void test_AMmapIncrement(void** state) { - GroupState* group_state = *state; - AMfree(AMmapPutCounter(group_state->doc, AM_ROOT, AMstr("Counter"), 0)); - assert_int_equal(AMpush(&group_state->stack, - AMmapGet(group_state->doc, AM_ROOT, AMstr("Counter"), NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 0); - AMfree(AMpop(&group_state->stack)); - AMfree(AMmapIncrement(group_state->doc, AM_ROOT, AMstr("Counter"), 3)); - assert_int_equal(AMpush(&group_state->stack, - AMmapGet(group_state->doc, AM_ROOT, AMstr("Counter"), NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 3); - AMfree(AMpop(&group_state->stack)); + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + AMstackItem(NULL, AMmapPutCounter(doc_state->doc, AM_ROOT, AMstr("Counter"), 0), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + int64_t counter; + assert_true(AMitemToCounter(AMstackItem(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, AMstr("Counter"), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 0); + AMresultFree(AMstackPop(stack_ptr, NULL)); + AMstackItem(NULL, AMmapIncrement(doc_state->doc, AM_ROOT, AMstr("Counter"), 3), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToCounter(AMstackItem(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, AMstr("Counter"), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 3); + AMresultFree(AMstackPop(stack_ptr, NULL)); } -#define test_AMmapPut(suffix) test_AMmapPut ## suffix +#define test_AMmapPut(suffix) test_AMmapPut##suffix -#define static_void_test_AMmapPut(suffix, member, scalar_value) \ -static void test_AMmapPut ## suffix(void **state) { \ - GroupState* group_state = *state; \ - AMfree(AMmapPut ## suffix(group_state->doc, \ - AM_ROOT, \ - AMstr(#suffix), \ - scalar_value)); \ - assert_true(AMpush( \ - &group_state->stack, \ - AMmapGet(group_state->doc, AM_ROOT, AMstr(#suffix), NULL), \ - AMvalue_discriminant(#suffix), \ - cmocka_cb).member == scalar_value); \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMmapPut(suffix, type, scalar_value) \ + static void test_AMmapPut##suffix(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMstackItem(NULL, AMmapPut##suffix(doc_state->doc, AM_ROOT, AMstr(#suffix), scalar_value), cmocka_cb, \ + AMexpect(AM_VAL_TYPE_VOID)); \ + type value; \ + assert_true(AMitemTo##suffix(AMstackItem(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, AMstr(#suffix), NULL), \ + cmocka_cb, AMexpect(suffix_to_val_type(#suffix))), \ + &value)); \ + assert_true(value == scalar_value); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -static void test_AMmapPutBytes(void **state) { +static void test_AMmapPutBytes(void** state) { static AMbyteSpan const KEY = {"Bytes", 5}; static uint8_t const BYTES_VALUE[] = {INT8_MIN, INT8_MAX / 2, INT8_MAX}; static size_t const BYTES_SIZE = sizeof(BYTES_VALUE) / sizeof(uint8_t); - GroupState* group_state = *state; - AMfree(AMmapPutBytes(group_state->doc, - AM_ROOT, - KEY, - AMbytes(BYTES_VALUE, BYTES_SIZE))); - AMbyteSpan const bytes = AMpush(&group_state->stack, - AMmapGet(group_state->doc, AM_ROOT, KEY, NULL), - AM_VALUE_BYTES, - cmocka_cb).bytes; + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + AMstackItem(NULL, AMmapPutBytes(doc_state->doc, AM_ROOT, KEY, AMbytes(BYTES_VALUE, BYTES_SIZE)), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMbyteSpan bytes; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, KEY, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), + &bytes)); assert_int_equal(bytes.count, BYTES_SIZE); assert_memory_equal(bytes.src, BYTES_VALUE, BYTES_SIZE); - AMfree(AMpop(&group_state->stack)); + AMresultFree(AMstackPop(stack_ptr, NULL)); } -static void test_AMmapPutNull(void **state) { +static void test_AMmapPutNull(void** state) { static AMbyteSpan const KEY = {"Null", 4}; - GroupState* group_state = *state; - AMfree(AMmapPutNull(group_state->doc, AM_ROOT, KEY)); - AMresult* const result = AMmapGet(group_state->doc, AM_ROOT, KEY, NULL); + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + AMstackItem(NULL, AMmapPutNull(doc_state->doc, AM_ROOT, KEY), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMresult* result = AMstackResult(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, KEY, NULL), NULL, NULL); if (AMresultStatus(result) != AM_STATUS_OK) { - fail_msg_view("%s", AMerrorMessage(result)); + fail_msg_view("%s", AMresultError(result)); } assert_int_equal(AMresultSize(result), 1); - assert_int_equal(AMresultValue(result).tag, AM_VALUE_NULL); - AMfree(result); + AMitem* item = AMresultItem(result); + assert_int_equal(AMitemValType(item), AM_VAL_TYPE_NULL); } -#define test_AMmapPutObject(label) test_AMmapPutObject_ ## label +#define test_AMmapPutObject(label) test_AMmapPutObject_##label -#define static_void_test_AMmapPutObject(label) \ -static void test_AMmapPutObject_ ## label(void **state) { \ - GroupState* group_state = *state; \ - AMobjType const obj_type = AMobjType_tag(#label); \ - if (obj_type != AM_OBJ_TYPE_VOID) { \ - AMobjId const* const obj_id = AMpush( \ - &group_state->stack, \ - AMmapPutObject(group_state->doc, \ - AM_ROOT, \ - AMstr(#label), \ - obj_type), \ - AM_VALUE_OBJ_ID, \ - cmocka_cb).obj_id; \ - assert_non_null(obj_id); \ - assert_int_equal(AMobjObjType(group_state->doc, obj_id), obj_type); \ - assert_int_equal(AMobjSize(group_state->doc, obj_id, NULL), 0); \ - } \ - else { \ - AMpush(&group_state->stack, \ - AMmapPutObject(group_state->doc, \ - AM_ROOT, \ - AMstr(#label), \ - obj_type), \ - AM_VALUE_VOID, \ - NULL); \ - assert_int_not_equal(AMresultStatus(group_state->stack->result), \ - AM_STATUS_OK); \ - } \ - AMfree(AMpop(&group_state->stack)); \ -} +#define static_void_test_AMmapPutObject(label) \ + static void test_AMmapPutObject_##label(void** state) { \ + DocState* doc_state = *state; \ + AMstack** stack_ptr = &doc_state->base_state->stack; \ + AMobjType const obj_type = suffix_to_obj_type(#label); \ + AMobjId const* const obj_id = \ + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc_state->doc, AM_ROOT, AMstr(#label), obj_type), \ + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); \ + assert_non_null(obj_id); \ + assert_int_equal(AMobjObjType(doc_state->doc, obj_id), obj_type); \ + assert_int_equal(AMobjSize(doc_state->doc, obj_id, NULL), 0); \ + AMresultFree(AMstackPop(stack_ptr, NULL)); \ + } -static void test_AMmapPutStr(void **state) { - GroupState* group_state = *state; - AMfree(AMmapPutStr(group_state->doc, AM_ROOT, AMstr("Str"), AMstr("Hello, world!"))); - AMbyteSpan const str = AMpush(&group_state->stack, - AMmapGet(group_state->doc, AM_ROOT, AMstr("Str"), NULL), - AM_VALUE_STR, - cmocka_cb).str; +static void test_AMmapPutStr(void** state) { + DocState* doc_state = *state; + AMstack** stack_ptr = &doc_state->base_state->stack; + AMstackItem(NULL, AMmapPutStr(doc_state->doc, AM_ROOT, AMstr("Str"), AMstr("Hello, world!")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMbyteSpan str; + assert_true(AMitemToStr(AMstackItem(stack_ptr, AMmapGet(doc_state->doc, AM_ROOT, AMstr("Str"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)), + &str)); assert_int_equal(str.count, strlen("Hello, world!")); assert_memory_equal(str.src, "Hello, world!", str.count); - AMfree(AMpop(&group_state->stack)); + AMresultFree(AMstackPop(stack_ptr, NULL)); } -static_void_test_AMmapPut(Bool, boolean, true) +static_void_test_AMmapPut(Bool, bool, true); -static_void_test_AMmapPut(Counter, counter, INT64_MAX) +static_void_test_AMmapPut(Counter, int64_t, INT64_MAX); -static_void_test_AMmapPut(F64, f64, DBL_MAX) +static_void_test_AMmapPut(F64, double, DBL_MAX); -static_void_test_AMmapPut(Int, int_, INT64_MAX) +static_void_test_AMmapPut(Int, int64_t, INT64_MAX); -static_void_test_AMmapPutObject(List) +static_void_test_AMmapPutObject(List); -static_void_test_AMmapPutObject(Map) +static_void_test_AMmapPutObject(Map); -static_void_test_AMmapPutObject(Text) +static_void_test_AMmapPutObject(Text); -static_void_test_AMmapPutObject(Void) +static_void_test_AMmapPut(Timestamp, int64_t, INT64_MAX); -static_void_test_AMmapPut(Timestamp, timestamp, INT64_MAX) +static_void_test_AMmapPut(Uint, int64_t, UINT64_MAX); -static_void_test_AMmapPut(Uint, uint, UINT64_MAX) - -/** \brief A JavaScript application can introduce NUL (`\0`) characters into a - * map object's key which will truncate it in a C application. +/** + * \brief A JavaScript application can introduce NUL (`\0`) characters into + * a map object's key which will truncate it in a C application. */ static void test_get_NUL_key(void** state) { /* @@ -158,39 +147,37 @@ static void test_get_NUL_key(void** state) { doc['o\0ps'] = 'oops'; }); const bytes = Automerge.save(doc); - console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], bytes).join(", ") + "};"); + console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], + bytes).join(", ") + "};"); */ static uint8_t const OOPS_SRC[] = {'o', '\0', 'p', 's'}; static AMbyteSpan const OOPS_KEY = {.src = OOPS_SRC, .count = sizeof(OOPS_SRC) / sizeof(uint8_t)}; static uint8_t const SAVED_DOC[] = { - 133, 111, 74, 131, 233, 150, 60, 244, 0, 116, 1, 16, 223, 253, 146, - 193, 58, 122, 66, 134, 151, 225, 210, 51, 58, 86, 247, 8, 1, 49, 118, - 234, 228, 42, 116, 171, 13, 164, 99, 244, 27, 19, 150, 44, 201, 136, - 222, 219, 90, 246, 226, 123, 77, 120, 157, 155, 55, 182, 2, 178, 64, 6, - 1, 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 6, 33, 2, 35, 2, 52, 1, - 66, 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, 127, 0, - 127, 7, 127, 4, 111, 0, 112, 115, 127, 0, 127, 1, 1, 127, 1, 127, 70, - 111, 111, 112, 115, 127, 0, 0 - }; + 133, 111, 74, 131, 233, 150, 60, 244, 0, 116, 1, 16, 223, 253, 146, 193, 58, 122, 66, 134, 151, + 225, 210, 51, 58, 86, 247, 8, 1, 49, 118, 234, 228, 42, 116, 171, 13, 164, 99, 244, 27, 19, + 150, 44, 201, 136, 222, 219, 90, 246, 226, 123, 77, 120, 157, 155, 55, 182, 2, 178, 64, 6, 1, + 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 6, 33, 2, 35, 2, 52, 1, 66, + 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, 127, 0, 127, 7, 127, + 4, 111, 0, 112, 115, 127, 0, 127, 1, 1, 127, 1, 127, 70, 111, 111, 112, 115, 127, 0, 0}; static size_t const SAVED_DOC_SIZE = sizeof(SAVED_DOC) / sizeof(uint8_t); - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, - AMload(SAVED_DOC, SAVED_DOC_SIZE), - AM_VALUE_DOC, - cmocka_cb).doc; - AMbyteSpan const str = AMpush(&stack, - AMmapGet(doc, AM_ROOT, OOPS_KEY, NULL), - AM_VALUE_STR, - cmocka_cb).str; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(SAVED_DOC, SAVED_DOC_SIZE), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, OOPS_KEY, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_not_equal(OOPS_KEY.count, strlen(OOPS_KEY.src)); assert_int_equal(str.count, strlen("oops")); assert_memory_equal(str.src, "oops", str.count); } -/** \brief A JavaScript application can introduce NUL (`\0`) characters into a - * map object's string value which will truncate it in a C application. +/** + * \brief A JavaScript application can introduce NUL (`\0`) characters into a + * map object's string value which will truncate it in a C application. */ static void test_get_NUL_string_value(void** state) { /* @@ -200,1209 +187,1369 @@ static void test_get_NUL_string_value(void** state) { doc.oops = 'o\0ps'; }); const bytes = Automerge.save(doc); - console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], bytes).join(", ") + "};"); + console.log("static uint8_t const SAVED_DOC[] = {" + Array.apply([], + bytes).join(", ") + "};"); */ static uint8_t const OOPS_VALUE[] = {'o', '\0', 'p', 's'}; static size_t const OOPS_SIZE = sizeof(OOPS_VALUE) / sizeof(uint8_t); static uint8_t const SAVED_DOC[] = { - 133, 111, 74, 131, 63, 94, 151, 29, 0, 116, 1, 16, 156, 159, 189, 12, - 125, 55, 71, 154, 136, 104, 237, 186, 45, 224, 32, 22, 1, 36, 163, - 164, 222, 81, 42, 1, 247, 231, 156, 54, 222, 76, 6, 109, 18, 172, 75, - 36, 118, 120, 68, 73, 87, 186, 230, 127, 68, 19, 81, 149, 185, 6, 1, - 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 6, 33, 2, 35, 2, 52, 1, - 66, 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, 127, - 0, 127, 7, 127, 4, 111, 111, 112, 115, 127, 0, 127, 1, 1, 127, 1, 127, - 70, 111, 0, 112, 115, 127, 0, 0 - }; + 133, 111, 74, 131, 63, 94, 151, 29, 0, 116, 1, 16, 156, 159, 189, 12, 125, 55, 71, 154, 136, + 104, 237, 186, 45, 224, 32, 22, 1, 36, 163, 164, 222, 81, 42, 1, 247, 231, 156, 54, 222, 76, + 6, 109, 18, 172, 75, 36, 118, 120, 68, 73, 87, 186, 230, 127, 68, 19, 81, 149, 185, 6, 1, + 2, 3, 2, 19, 2, 35, 2, 64, 2, 86, 2, 8, 21, 6, 33, 2, 35, 2, 52, 1, 66, + 2, 86, 2, 87, 4, 128, 1, 2, 127, 0, 127, 1, 127, 1, 127, 0, 127, 0, 127, 7, 127, + 4, 111, 111, 112, 115, 127, 0, 127, 1, 1, 127, 1, 127, 70, 111, 0, 112, 115, 127, 0, 0}; static size_t const SAVED_DOC_SIZE = sizeof(SAVED_DOC) / sizeof(uint8_t); - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, - AMload(SAVED_DOC, SAVED_DOC_SIZE), - AM_VALUE_DOC, - cmocka_cb).doc; - AMbyteSpan const str = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("oops"), NULL), - AM_VALUE_STR, - cmocka_cb).str; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(SAVED_DOC, SAVED_DOC_SIZE), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("oops"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), + &str)); assert_int_not_equal(str.count, strlen(OOPS_VALUE)); assert_int_equal(str.count, OOPS_SIZE); assert_memory_equal(str.src, OOPS_VALUE, str.count); } static void test_range_iter_map(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("a"), 3)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("b"), 4)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("c"), 5)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("d"), 6)); - AMfree(AMcommit(doc, AMstr(NULL), NULL)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("a"), 7)); - AMfree(AMcommit(doc, AMstr(NULL), NULL)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("a"), 8)); - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("d"), 9)); - AMfree(AMcommit(doc, AMstr(NULL), NULL)); - AMactorId const* const actor_id = AMpush(&stack, - AMgetActorId(doc), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMmapItems map_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - assert_int_equal(AMmapItemsSize(&map_items), 4); + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("a"), 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("b"), 4), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("c"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("d"), 6), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("a"), 7), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("a"), 8), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("d"), 9), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMactorId const* actor_id; + assert_true(AMitemToActorId(AMstackItem(stack_ptr, AMgetActorId(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMitems map_items = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)); + assert_int_equal(AMitemsSize(&map_items), 4); /* ["b"-"d") */ - AMmapItems range = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr("b"), AMstr("d"), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + AMitems range = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr("b"), AMstr("d"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)); /* First */ - AMmapItem const* next = AMmapItemsNext(&range, 1); + AMitem* next = AMitemsNext(&range, 1); assert_non_null(next); - AMbyteSpan key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "b", key.count); - AMvalue next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 4); - AMobjId const* next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + uint64_t uint; + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 4); + AMobjId const* next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "c", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 5); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 5); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - assert_null(AMmapItemsNext(&range, 1)); + assert_null(AMitemsNext(&range, 1)); /* ["b"-) */ - range = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr("b"), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + range = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr("b"), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)); /* First */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "b", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 4); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 4); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "c", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 5); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 5); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "d", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 9); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 9); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 7); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Fourth */ - assert_null(AMmapItemsNext(&range, 1)); + assert_null(AMitemsNext(&range, 1)); /* [-"d") */ - range = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr("d"), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + range = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr("d"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)); /* First */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "a", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 8); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 8); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 6); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "b", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 4); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 4); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "c", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 5); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 5); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Fourth */ - assert_null(AMmapItemsNext(&range, 1)); + assert_null(AMitemsNext(&range, 1)); /* ["a"-) */ - range = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr("a"), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + range = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr("a"), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)); /* First */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "a", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 8); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 8); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 6); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "b", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 4); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 4); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "c", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 5); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 5); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Fourth */ - next = AMmapItemsNext(&range, 1); + next = AMitemsNext(&range, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "d", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_UINT); - assert_int_equal(next_value.uint, 9); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_UINT); + assert_true(AMitemToUint(next, &uint)); + assert_int_equal(uint, 9); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 7); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Fifth */ - assert_null(AMmapItemsNext(&range, 1)); + assert_null(AMitemsNext(&range, 1)); } static void test_map_range_back_and_forth_single(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id = AMpush(&stack, - AMgetActorId(doc), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMactorId const* actor_id; + assert_true(AMitemToActorId(AMstackItem(stack_ptr, AMgetActorId(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("1"), AMstr("a"))); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("2"), AMstr("b"))); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("3"), AMstr("c"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("1"), AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("2"), AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("3"), AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* Forward, back, back. */ - AMmapItems range_all = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + AMitems range_all = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); /* First */ - AMmapItem const* next = AMmapItemsNext(&range_all, 1); + AMitem* next = AMitemsNext(&range_all, 1); assert_non_null(next); - AMbyteSpan key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - AMvalue next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - AMobjId const* next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + AMbyteSpan str; + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + AMobjId const* next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - AMmapItems range_back_all = AMmapItemsReversed(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); - AMmapItem const* next_back = AMmapItemsNext(&range_back_all, 1); + AMitems range_back_all = AMitemsReversed(&range_all); + range_back_all = AMitemsRewound(&range_back_all); + AMitem* next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - AMvalue next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - AMobjId const* next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + AMbyteSpan str_back; + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + AMobjId const* next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "b", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "b", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Forward, back, forward. */ - range_all = AMmapItemsRewound(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); + range_all = AMitemsRewound(&range_all); + range_back_all = AMitemsRewound(&range_back_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "b", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "b", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Forward, forward, forward. */ - range_all = AMmapItemsRewound(&range_all); + range_all = AMitemsRewound(&range_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "b", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "b", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "c", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "c", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Forward stop */ - assert_null(AMmapItemsNext(&range_all, 1)); + assert_null(AMitemsNext(&range_all, 1)); /* Back, back, back. */ - range_back_all = AMmapItemsRewound(&range_back_all); + range_back_all = AMitemsRewound(&range_back_all); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "b", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "b", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* First */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "a", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "a", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Back stop */ - assert_null(AMmapItemsNext(&range_back_all, 1)); + assert_null(AMitemsNext(&range_back_all, 1)); } static void test_map_range_back_and_forth_double(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id1= AMpush(&stack, - AMactorIdInitBytes("\0", 1), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(doc1, actor_id1)); + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); + AMactorId const* actor_id1; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromBytes("\0", 1), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id1)); + AMstackItem(NULL, AMsetActorId(doc1, actor_id1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("1"), AMstr("a"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("2"), AMstr("b"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("3"), AMstr("c"))); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("1"), AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("2"), AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("3"), AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* The second actor should win all conflicts here. */ - AMdoc* const doc2 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id2 = AMpush(&stack, - AMactorIdInitBytes("\1", 1), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(doc2, actor_id2)); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("1"), AMstr("aa"))); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("2"), AMstr("bb"))); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("3"), AMstr("cc"))); + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); + AMactorId const* actor_id2; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromBytes("\1", 1), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id2)); + AMstackItem(NULL, AMsetActorId(doc2, actor_id2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("1"), AMstr("aa")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("2"), AMstr("bb")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("3"), AMstr("cc")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); - AMfree(AMmerge(doc1, doc2)); + AMstackItem(NULL, AMmerge(doc1, doc2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* Forward, back, back. */ - AMmapItems range_all = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + AMitems range_all = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); /* First */ - AMmapItem const* next = AMmapItemsNext(&range_all, 1); + AMitem* next = AMitemsNext(&range_all, 1); assert_non_null(next); - AMbyteSpan key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - AMvalue next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - AMobjId const* next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + AMbyteSpan str; + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + AMobjId const* next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - AMmapItems range_back_all = AMmapItemsReversed(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); - AMmapItem const* next_back = AMmapItemsNext(&range_back_all, 1); + AMitems range_back_all = AMitemsReversed(&range_all); + range_back_all = AMitemsRewound(&range_back_all); + AMitem* next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - AMvalue next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - AMobjId const* next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + AMbyteSpan str_back; + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + AMobjId const* next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "bb", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "bb", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Forward, back, forward. */ - range_all = AMmapItemsRewound(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); + range_all = AMitemsRewound(&range_all); + range_back_all = AMitemsRewound(&range_back_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "bb", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "bb", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Forward, forward, forward. */ - range_all = AMmapItemsRewound(&range_all); + range_all = AMitemsRewound(&range_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "bb", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "bb", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "cc", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "cc", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Forward stop */ - assert_null(AMmapItemsNext(&range_all, 1)); + assert_null(AMitemsNext(&range_all, 1)); /* Back, back, back. */ - range_back_all = AMmapItemsRewound(&range_back_all); + range_back_all = AMitemsRewound(&range_back_all); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "bb", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "bb", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* First */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "aa", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "aa", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Back stop */ - assert_null(AMmapItemsNext(&range_back_all, 1)); + assert_null(AMitemsNext(&range_back_all, 1)); } static void test_map_range_at_back_and_forth_single(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id = AMpush(&stack, - AMgetActorId(doc), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); + AMactorId const* actor_id; + assert_true(AMitemToActorId(AMstackItem(stack_ptr, AMgetActorId(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("1"), AMstr("a"))); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("2"), AMstr("b"))); - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("3"), AMstr("c"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("1"), AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("2"), AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("3"), AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); - AMchangeHashes const heads = AMpush(&stack, - AMgetHeads(doc), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads = AMstackItems(stack_ptr, AMgetHeads(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* Forward, back, back. */ - AMmapItems range_all = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + AMitems range_all = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); /* First */ - AMmapItem const* next = AMmapItemsNext(&range_all, 1); + AMitem* next = AMitemsNext(&range_all, 1); assert_non_null(next); - AMbyteSpan key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - AMvalue next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - AMobjId const* next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + AMbyteSpan str; + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + AMobjId const* next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - AMmapItems range_back_all = AMmapItemsReversed(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); - AMmapItem const* next_back = AMmapItemsNext(&range_back_all, 1); + AMitems range_back_all = AMitemsReversed(&range_all); + range_back_all = AMitemsRewound(&range_back_all); + AMitem* next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - AMvalue next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - AMobjId const* next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + AMbyteSpan str_back; + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + AMobjId const* next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "b", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "b", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Forward, back, forward. */ - range_all = AMmapItemsRewound(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); + range_all = AMitemsRewound(&range_all); + range_back_all = AMitemsRewound(&range_back_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "b", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "b", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Forward, forward, forward. */ - range_all = AMmapItemsRewound(&range_all); + range_all = AMitemsRewound(&range_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "a", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "a", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "b", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "b", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Third */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 1); - assert_memory_equal(next_value.str.src, "c", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 1); + assert_memory_equal(str.src, "c", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 0); /* Forward stop */ - assert_null(AMmapItemsNext(&range_all, 1)); + assert_null(AMitemsNext(&range_all, 1)); /* Back, back, back. */ - range_back_all = AMmapItemsRewound(&range_back_all); + range_back_all = AMitemsRewound(&range_back_all); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "c", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "c", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "b", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "b", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* First */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 1); - assert_memory_equal(next_back_value.str.src, "a", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 1); + assert_memory_equal(str_back.src, "a", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 0); /* Back stop */ - assert_null(AMmapItemsNext(&range_back_all, 1)); + assert_null(AMitemsNext(&range_back_all, 1)); } static void test_map_range_at_back_and_forth_double(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id1= AMpush(&stack, - AMactorIdInitBytes("\0", 1), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(doc1, actor_id1)); + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); + AMactorId const* actor_id1; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromBytes("\0", 1), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id1)); + AMstackItem(NULL, AMsetActorId(doc1, actor_id1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("1"), AMstr("a"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("2"), AMstr("b"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("3"), AMstr("c"))); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("1"), AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("2"), AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("3"), AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* The second actor should win all conflicts here. */ - AMdoc* const doc2 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMactorId const* const actor_id2= AMpush(&stack, - AMactorIdInitBytes("\1", 1), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id; - AMfree(AMsetActorId(doc2, actor_id2)); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("1"), AMstr("aa"))); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("2"), AMstr("bb"))); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("3"), AMstr("cc"))); + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); + AMactorId const* actor_id2; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromBytes("\1", 1), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id2)); + AMstackItem(NULL, AMsetActorId(doc2, actor_id2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("1"), AMstr("aa")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("2"), AMstr("bb")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("3"), AMstr("cc")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); - AMfree(AMmerge(doc1, doc2)); - AMchangeHashes const heads = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMstackItem(NULL, AMmerge(doc1, doc2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems const heads = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* Forward, back, back. */ - AMmapItems range_all = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; + AMitems range_all = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); /* First */ - AMmapItem const* next = AMmapItemsNext(&range_all, 1); + AMitem* next = AMitemsNext(&range_all, 1); assert_non_null(next); - AMbyteSpan key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - AMvalue next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - AMobjId const* next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + AMbyteSpan str; + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + AMobjId const* next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - AMmapItems range_back_all = AMmapItemsReversed(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); - AMmapItem const* next_back = AMmapItemsNext(&range_back_all, 1); + AMitems range_back_all = AMitemsReversed(&range_all); + range_back_all = AMitemsRewound(&range_back_all); + AMitem* next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - AMvalue next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - AMobjId const* next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + AMbyteSpan str_back; + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + AMobjId const* next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "bb", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "bb", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Forward, back, forward. */ - range_all = AMmapItemsRewound(&range_all); - range_back_all = AMmapItemsRewound(&range_back_all); + range_all = AMitemsRewound(&range_all); + range_back_all = AMitemsRewound(&range_back_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "bb", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "bb", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Forward, forward, forward. */ - range_all = AMmapItemsRewound(&range_all); + range_all = AMitemsRewound(&range_all); /* First */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "aa", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "aa", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Second */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "bb", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "bb", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Third */ - next = AMmapItemsNext(&range_all, 1); + next = AMitemsNext(&range_all, 1); assert_non_null(next); - key = AMmapItemKey(next); + assert_int_equal(AMitemIdxType(next), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_value = AMmapItemValue(next); - assert_int_equal(next_value.tag, AM_VALUE_STR); - assert_int_equal(next_value.str.count, 2); - assert_memory_equal(next_value.str.src, "cc", next_value.str.count); - next_obj_id = AMmapItemObjId(next); + assert_int_equal(AMitemValType(next), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next, &str)); + assert_int_equal(str.count, 2); + assert_memory_equal(str.src, "cc", str.count); + next_obj_id = AMitemObjId(next); assert_int_equal(AMobjIdCounter(next_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_obj_id), 1); /* Forward stop */ - assert_null(AMmapItemsNext(&range_all, 1)); + assert_null(AMitemsNext(&range_all, 1)); /* Back, back, back. */ - range_back_all = AMmapItemsRewound(&range_back_all); + range_back_all = AMitemsRewound(&range_back_all); /* Third */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "3", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "cc", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "cc", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 3); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Second */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "2", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "bb", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "bb", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 2); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* First */ - next_back = AMmapItemsNext(&range_back_all, 1); + next_back = AMitemsNext(&range_back_all, 1); assert_non_null(next_back); - key = AMmapItemKey(next_back); + assert_int_equal(AMitemIdxType(next_back), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(next_back, &key)); assert_int_equal(key.count, 1); assert_memory_equal(key.src, "1", key.count); - next_back_value = AMmapItemValue(next_back); - assert_int_equal(next_back_value.tag, AM_VALUE_STR); - assert_int_equal(next_back_value.str.count, 2); - assert_memory_equal(next_back_value.str.src, "aa", next_back_value.str.count); - next_back_obj_id = AMmapItemObjId(next_back); + assert_int_equal(AMitemValType(next_back), AM_VAL_TYPE_STR); + assert_true(AMitemToStr(next_back, &str_back)); + assert_int_equal(str_back.count, 2); + assert_memory_equal(str_back.src, "aa", str_back.count); + next_back_obj_id = AMitemObjId(next_back); assert_int_equal(AMobjIdCounter(next_back_obj_id), 1); assert_int_equal(AMactorIdCmp(AMobjIdActorId(next_back_obj_id), actor_id2), 0); assert_int_equal(AMobjIdIndex(next_back_obj_id), 1); /* Back stop */ - assert_null(AMmapItemsNext(&range_back_all, 1)); + assert_null(AMitemsNext(&range_back_all, 1)); } static void test_get_range_values(void** state) { - AMresultStack* stack = *state; - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("aa"), AMstr("aaa"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("bb"), AMstr("bbb"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("cc"), AMstr("ccc"))); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("dd"), AMstr("ddd"))); - AMfree(AMcommit(doc1, AMstr(NULL), NULL)); + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("aa"), AMstr("aaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("bb"), AMstr("bbb")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("cc"), AMstr("ccc")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("dd"), AMstr("ddd")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMchangeHashes const v1 = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMdoc* const doc2 = AMpush(&stack, AMfork(doc1, NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMitems const v1 = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMfork(doc1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("cc"), AMstr("ccc V2"))); - AMfree(AMcommit(doc1, AMstr(NULL), NULL)); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("cc"), AMstr("ccc V2")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc1, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("cc"), AMstr("ccc V3"))); - AMfree(AMcommit(doc2, AMstr(NULL), NULL)); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("cc"), AMstr("ccc V3")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(doc2, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMfree(AMmerge(doc1, doc2)); + AMstackItem(NULL, AMmerge(doc1, doc2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); - AMmapItems range = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr("b"), AMstr("d"), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItems range_back = AMmapItemsReversed(&range); - assert_int_equal(AMmapItemsSize(&range), 2); + /* Forward vs. reverse: complete current map range. */ + AMitems range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + size_t size = AMitemsSize(&range); + assert_int_equal(size, 4); + AMitems range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + AMbyteSpan key; + assert_true(AMitemKey(AMitemsNext(&range, 1), &key)); + assert_memory_equal(key.src, "aa", key.count); + assert_true(AMitemKey(AMitemsNext(&range_back, 1), &key)); + assert_memory_equal(key.src, "dd", key.count); - AMmapItem const* map_item = NULL; - while ((map_item = AMmapItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMresult* result = AMmapGet(doc1, AM_ROOT, AMmapItemKey(map_item), NULL); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMmapItemObjId(map_item)); - AMfree(result); + AMitem *item1, *item_back1; + size_t count, middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + AMbyteSpan key1, key_back1; + assert_true(AMitemKey(item1, &key1)); + assert_true(AMitemKey(item_back1, &key_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(AMstrCmp(key1, key_back1), 0); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(AMstrCmp(key1, key_back1), 0); + } + AMitem* item2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key1, NULL), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key_back1, NULL), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - assert_int_equal(AMmapItemsSize(&range_back), 2); + /* Forward vs. reverse: partial current map range. */ + range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr("aa"), AMstr("dd"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 3); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemKey(AMitemsNext(&range, 1), &key)); + assert_memory_equal(key.src, "aa", key.count); + assert_true(AMitemKey(AMitemsNext(&range_back, 1), &key)); + assert_memory_equal(key.src, "cc", key.count); - while ((map_item = AMmapItemsNext(&range_back, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMresult* result = AMmapGet(doc1, AM_ROOT, AMmapItemKey(map_item), NULL); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMmapItemObjId(map_item)); - AMfree(result); + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + AMbyteSpan key1, key_back1; + assert_true(AMitemKey(item1, &key1)); + assert_true(AMitemKey(item_back1, &key_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(AMstrCmp(key1, key_back1), 0); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(AMstrCmp(key1, key_back1), 0); + } + AMitem* item2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key1, NULL), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key_back1, NULL), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr("b"), AMstr("d"), &v1), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - range_back = AMmapItemsReversed(&range); - assert_int_equal(AMmapItemsSize(&range), 2); + /* Forward vs. reverse: complete historical map range. */ + range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), &v1), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 4); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemKey(AMitemsNext(&range, 1), &key)); + assert_memory_equal(key.src, "aa", key.count); + assert_true(AMitemKey(AMitemsNext(&range_back, 1), &key)); + assert_memory_equal(key.src, "dd", key.count); - while ((map_item = AMmapItemsNext(&range, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMresult* result = AMmapGet(doc1, AM_ROOT, AMmapItemKey(map_item), &v1); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMmapItemObjId(map_item)); - AMfree(result); + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + AMbyteSpan key1, key_back1; + assert_true(AMitemKey(item1, &key1)); + assert_true(AMitemKey(item_back1, &key_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(AMstrCmp(key1, key_back1), 0); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(AMstrCmp(key1, key_back1), 0); + } + AMitem* item2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key1, &v1), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key_back1, &v1), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - assert_int_equal(AMmapItemsSize(&range_back), 2); + /* Forward vs. reverse: partial historical map range. */ + range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr("bb"), AMstr(NULL), &v1), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + size = AMitemsSize(&range); + assert_int_equal(size, 3); + range_back = AMitemsReversed(&range); + assert_int_equal(AMitemsSize(&range_back), size); + assert_true(AMitemKey(AMitemsNext(&range, 1), &key)); + assert_memory_equal(key.src, "bb", key.count); + assert_true(AMitemKey(AMitemsNext(&range_back, 1), &key)); + assert_memory_equal(key.src, "dd", key.count); - while ((map_item = AMmapItemsNext(&range_back, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMresult* result = AMmapGet(doc1, AM_ROOT, AMmapItemKey(map_item), &v1); - AMvalue const val2 = AMresultValue(result); - assert_true(AMvalueEqual(&val1, &val2)); - assert_non_null(AMmapItemObjId(map_item)); - AMfree(result); + middle = size / 2; + range = AMitemsRewound(&range); + range_back = AMitemsRewound(&range_back); + for (item1 = NULL, item_back1 = NULL, count = 0; item1 && item_back1; + item1 = AMitemsNext(&range, 1), item_back1 = AMitemsNext(&range_back, 1), ++count) { + AMbyteSpan key1, key_back1; + assert_true(AMitemKey(item1, &key1)); + assert_true(AMitemKey(item_back1, &key_back1)); + if ((count == middle) && (middle & 1)) { + /* The iterators are crossing in the middle. */ + assert_int_equal(AMstrCmp(key1, key_back1), 0); + assert_true(AMitemEqual(item1, item_back1)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item_back1))); + } else { + assert_int_not_equal(AMstrCmp(key1, key_back1), 0); + } + AMitem* item2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key1, &v1), NULL, NULL); + AMitem* item_back2 = AMstackItem(stack_ptr, AMmapGet(doc1, AM_ROOT, key_back1, &v1), NULL, NULL); + /** \note An item returned from an `AM...Get()` call doesn't include the + index used to retrieve it. */ + assert_false(AMitemIdxType(item2)); + assert_false(AMitemIdxType(item_back2)); + assert_true(AMitemEqual(item1, item2)); + assert_true(AMobjIdEqual(AMitemObjId(item1), AMitemObjId(item2))); + assert_true(AMitemEqual(item_back1, item_back2)); + assert_true(AMobjIdEqual(AMitemObjId(item_back1), AMitemObjId(item_back2))); + AMresultFree(AMstackPop(stack_ptr, NULL)); } - range = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMobjItems values = AMpush(&stack, - AMobjValues(doc1, AM_ROOT, NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; - assert_int_equal(AMmapItemsSize(&range), AMobjItemsSize(&values)); - AMobjItem const* value = NULL; - while ((map_item = AMmapItemsNext(&range, 1)) != NULL && - (value = AMobjItemsNext(&values, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMvalue const val2 = AMobjItemValue(value); - assert_true(AMvalueEqual(&val1, &val2)); - assert_true(AMobjIdEqual(AMmapItemObjId(map_item), AMobjItemObjId(value))); + /* Map range vs. object range: complete current. */ + range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + AMitems obj_items = AMstackItems(stack_ptr, AMobjItems(doc1, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&range), AMitemsSize(&obj_items)); + + AMitem *item, *obj_item; + for (item = NULL, obj_item = NULL; item && obj_item; + item = AMitemsNext(&range, 1), obj_item = AMitemsNext(&obj_items, 1)) { + /** \note Object iteration doesn't yield any item indices. */ + assert_true(AMitemIdxType(item)); + assert_false(AMitemIdxType(obj_item)); + assert_true(AMitemEqual(item, obj_item)); + assert_true(AMobjIdEqual(AMitemObjId(item), AMitemObjId(obj_item))); } - range = AMpush(&stack, - AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), &v1), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - values = AMpush(&stack, - AMobjValues(doc1, AM_ROOT, &v1), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; - assert_int_equal(AMmapItemsSize(&range), AMobjItemsSize(&values)); - while ((map_item = AMmapItemsNext(&range, 1)) != NULL && - (value = AMobjItemsNext(&values, 1)) != NULL) { - AMvalue const val1 = AMmapItemValue(map_item); - AMvalue const val2 = AMobjItemValue(value); - assert_true(AMvalueEqual(&val1, &val2)); - assert_true(AMobjIdEqual(AMmapItemObjId(map_item), AMobjItemObjId(value))); + /* Map range vs. object range: complete historical. */ + range = AMstackItems(stack_ptr, AMmapRange(doc1, AM_ROOT, AMstr(NULL), AMstr(NULL), &v1), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + obj_items = AMstackItems(stack_ptr, AMobjItems(doc1, AM_ROOT, &v1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&range), AMitemsSize(&obj_items)); + + for (item = NULL, obj_item = NULL; item && obj_item; + item = AMitemsNext(&range, 1), obj_item = AMitemsNext(&obj_items, 1)) { + /** \note Object iteration doesn't yield any item indices. */ + assert_true(AMitemIdxType(item)); + assert_false(AMitemIdxType(obj_item)); + assert_true(AMitemEqual(item, obj_item)); + assert_true(AMobjIdEqual(AMitemObjId(item), AMitemObjId(obj_item))); } } @@ -1418,19 +1565,18 @@ int run_map_tests(void) { cmocka_unit_test(test_AMmapPutObject(List)), cmocka_unit_test(test_AMmapPutObject(Map)), cmocka_unit_test(test_AMmapPutObject(Text)), - cmocka_unit_test(test_AMmapPutObject(Void)), cmocka_unit_test(test_AMmapPutStr), cmocka_unit_test(test_AMmapPut(Timestamp)), cmocka_unit_test(test_AMmapPut(Uint)), - cmocka_unit_test_setup_teardown(test_get_NUL_key, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_get_NUL_string_value, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_range_iter_map, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_map_range_back_and_forth_single, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_map_range_back_and_forth_double, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_map_range_at_back_and_forth_single, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_map_range_at_back_and_forth_double, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_get_range_values, setup_stack, teardown_stack), + cmocka_unit_test_setup_teardown(test_get_NUL_key, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_get_NUL_string_value, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_range_iter_map, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_map_range_back_and_forth_single, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_map_range_back_and_forth_double, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_map_range_at_back_and_forth_single, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_map_range_at_back_and_forth_double, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_get_range_values, setup_base, teardown_base), }; - return cmocka_run_group_tests(tests, group_setup, group_teardown); + return cmocka_run_group_tests(tests, setup_doc, teardown_doc); } diff --git a/rust/automerge-c/test/ported_wasm/basic_tests.c b/rust/automerge-c/test/ported_wasm/basic_tests.c index e2659d62..b83ff132 100644 --- a/rust/automerge-c/test/ported_wasm/basic_tests.c +++ b/rust/automerge-c/test/ported_wasm/basic_tests.c @@ -11,7 +11,10 @@ /* local */ #include -#include "../stack_utils.h" +#include +#include +#include "../base_state.h" +#include "../cmocka_utils.h" /** * \brief default import init() should return a promise @@ -22,163 +25,171 @@ static void test_default_import_init_should_return_a_promise(void** state); * \brief should create, clone and free */ static void test_create_clone_and_free(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc1 = create() */ - AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); /* const doc2 = doc1.clone() */ - AMdoc* const doc2 = AMpush(&stack, AMclone(doc1), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMclone(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); } /** * \brief should be able to start and commit */ static void test_start_and_commit(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* doc.commit() */ - AMpush(&stack, AMemptyChange(doc, AMstr(NULL), NULL), AM_VALUE_CHANGE_HASHES, cmocka_cb); + AMstackItems(stack_ptr, AMemptyChange(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); } /** * \brief getting a nonexistent prop does not throw an error */ static void test_getting_a_nonexistent_prop_does_not_throw_an_error(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* const result = doc.getWithType(root, "hello") */ /* assert.deepEqual(result, undefined) */ - AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("hello"), NULL), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("hello"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } /** * \brief should be able to set and get a simple value */ static void test_should_be_able_to_set_and_get_a_simple_value(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc: Automerge = create("aabbcc") */ - AMdoc* const doc = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aabbcc")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aabbcc")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* let result */ /* */ /* doc.put(root, "hello", "world") */ - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("hello"), AMstr("world"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("hello"), AMstr("world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "number1", 5, "uint") */ - AMfree(AMmapPutUint(doc, AM_ROOT, AMstr("number1"), 5)); + AMstackItem(NULL, AMmapPutUint(doc, AM_ROOT, AMstr("number1"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "number2", 5) */ - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("number2"), 5)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("number2"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "number3", 5.5) */ - AMfree(AMmapPutF64(doc, AM_ROOT, AMstr("number3"), 5.5)); + AMstackItem(NULL, AMmapPutF64(doc, AM_ROOT, AMstr("number3"), 5.5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "number4", 5.5, "f64") */ - AMfree(AMmapPutF64(doc, AM_ROOT, AMstr("number4"), 5.5)); + AMstackItem(NULL, AMmapPutF64(doc, AM_ROOT, AMstr("number4"), 5.5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "number5", 5.5, "int") */ - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("number5"), 5.5)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("number5"), 5.5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "bool", true) */ - AMfree(AMmapPutBool(doc, AM_ROOT, AMstr("bool"), true)); + AMstackItem(NULL, AMmapPutBool(doc, AM_ROOT, AMstr("bool"), true), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "time1", 1000, "timestamp") */ - AMfree(AMmapPutTimestamp(doc, AM_ROOT, AMstr("time1"), 1000)); + AMstackItem(NULL, AMmapPutTimestamp(doc, AM_ROOT, AMstr("time1"), 1000), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put(root, "time2", new Date(1001)) */ - AMfree(AMmapPutTimestamp(doc, AM_ROOT, AMstr("time2"), 1001)); + AMstackItem(NULL, AMmapPutTimestamp(doc, AM_ROOT, AMstr("time2"), 1001), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.putObject(root, "list", []); */ - AMfree(AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST)); + AMstackItem(NULL, AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); /* doc.put(root, "null", null) */ - AMfree(AMmapPutNull(doc, AM_ROOT, AMstr("null"))); + AMstackItem(NULL, AMmapPutNull(doc, AM_ROOT, AMstr("null")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* result = doc.getWithType(root, "hello") */ /* assert.deepEqual(result, ["str", "world"]) */ /* assert.deepEqual(doc.get("/", "hello"), "world") */ - AMbyteSpan str = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("hello"), NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("hello"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), + &str)); assert_int_equal(str.count, strlen("world")); assert_memory_equal(str.src, "world", str.count); /* assert.deepEqual(doc.get("/", "hello"), "world") */ /* */ /* result = doc.getWithType(root, "number1") */ /* assert.deepEqual(result, ["uint", 5]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("number1"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 5); + uint64_t uint; + assert_true(AMitemToUint( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("number1"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 5); /* assert.deepEqual(doc.get("/", "number1"), 5) */ /* */ /* result = doc.getWithType(root, "number2") */ /* assert.deepEqual(result, ["int", 5]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("number2"), NULL), - AM_VALUE_INT, - cmocka_cb).int_, 5); + int64_t int_; + assert_true(AMitemToInt( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("number2"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_INT)), + &int_)); + assert_int_equal(int_, 5); /* */ /* result = doc.getWithType(root, "number3") */ /* assert.deepEqual(result, ["f64", 5.5]) */ - assert_float_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("number3"), NULL), - AM_VALUE_F64, - cmocka_cb).f64, 5.5, DBL_EPSILON); + double f64; + assert_true(AMitemToF64( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("number3"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_F64)), + &f64)); + assert_float_equal(f64, 5.5, DBL_EPSILON); /* */ /* result = doc.getWithType(root, "number4") */ /* assert.deepEqual(result, ["f64", 5.5]) */ - assert_float_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("number4"), NULL), - AM_VALUE_F64, - cmocka_cb).f64, 5.5, DBL_EPSILON); + assert_true(AMitemToF64( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("number4"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_F64)), + &f64)); + assert_float_equal(f64, 5.5, DBL_EPSILON); /* */ /* result = doc.getWithType(root, "number5") */ /* assert.deepEqual(result, ["int", 5]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("number5"), NULL), - AM_VALUE_INT, - cmocka_cb).int_, 5); + assert_true(AMitemToInt( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("number5"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_INT)), + &int_)); + assert_int_equal(int_, 5); /* */ /* result = doc.getWithType(root, "bool") */ /* assert.deepEqual(result, ["boolean", true]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("bool"), NULL), - AM_VALUE_BOOLEAN, - cmocka_cb).boolean, true); + bool boolean; + assert_true(AMitemToBool( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("bool"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BOOL)), + &boolean)); + assert_true(boolean); /* */ /* doc.put(root, "bool", false, "boolean") */ - AMfree(AMmapPutBool(doc, AM_ROOT, AMstr("bool"), false)); + AMstackItem(NULL, AMmapPutBool(doc, AM_ROOT, AMstr("bool"), false), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* result = doc.getWithType(root, "bool") */ /* assert.deepEqual(result, ["boolean", false]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("bool"), NULL), - AM_VALUE_BOOLEAN, - cmocka_cb).boolean, false); + assert_true(AMitemToBool( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("bool"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BOOL)), + &boolean)); + assert_false(boolean); /* */ /* result = doc.getWithType(root, "time1") */ /* assert.deepEqual(result, ["timestamp", new Date(1000)]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("time1"), NULL), - AM_VALUE_TIMESTAMP, - cmocka_cb).timestamp, 1000); + int64_t timestamp; + assert_true(AMitemToTimestamp(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("time1"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_TIMESTAMP)), + ×tamp)); + assert_int_equal(timestamp, 1000); /* */ /* result = doc.getWithType(root, "time2") */ /* assert.deepEqual(result, ["timestamp", new Date(1001)]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("time2"), NULL), - AM_VALUE_TIMESTAMP, - cmocka_cb).timestamp, 1001); + assert_true(AMitemToTimestamp(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("time2"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_TIMESTAMP)), + ×tamp)); + assert_int_equal(timestamp, 1001); /* */ /* result = doc.getWithType(root, "list") */ /* assert.deepEqual(result, ["list", "10@aabbcc"]); */ - AMobjId const* const list = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("list"), NULL), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const list = AMitemObjId( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("list"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); assert_int_equal(AMobjIdCounter(list), 10); str = AMactorIdStr(AMobjIdActorId(list)); assert_int_equal(str.count, strlen("aabbcc")); @@ -186,38 +197,39 @@ static void test_should_be_able_to_set_and_get_a_simple_value(void** state) { /* */ /* result = doc.getWithType(root, "null") */ /* assert.deepEqual(result, ["null", null]); */ - AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("null"), NULL), - AM_VALUE_NULL, - cmocka_cb); + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("null"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_NULL)); } /** * \brief should be able to use bytes */ static void test_should_be_able_to_use_bytes(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* doc.put("_root", "data1", new Uint8Array([10, 11, 12])); */ static uint8_t const DATA1[] = {10, 11, 12}; - AMfree(AMmapPutBytes(doc, AM_ROOT, AMstr("data1"), AMbytes(DATA1, sizeof(DATA1)))); + AMstackItem(NULL, AMmapPutBytes(doc, AM_ROOT, AMstr("data1"), AMbytes(DATA1, sizeof(DATA1))), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* doc.put("_root", "data2", new Uint8Array([13, 14, 15]), "bytes"); */ static uint8_t const DATA2[] = {13, 14, 15}; - AMfree(AMmapPutBytes(doc, AM_ROOT, AMstr("data2"), AMbytes(DATA2, sizeof(DATA2)))); + AMstackItem(NULL, AMmapPutBytes(doc, AM_ROOT, AMstr("data2"), AMbytes(DATA2, sizeof(DATA2))), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* const value1 = doc.getWithType("_root", "data1") */ - AMbyteSpan const value1 = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("data1"), NULL), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan value1; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("data1"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), + &value1)); /* assert.deepEqual(value1, ["bytes", new Uint8Array([10, 11, 12])]); */ assert_int_equal(value1.count, sizeof(DATA1)); assert_memory_equal(value1.src, DATA1, sizeof(DATA1)); /* const value2 = doc.getWithType("_root", "data2") */ - AMbyteSpan const value2 = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("data2"), NULL), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan value2; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("data2"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), + &value2)); /* assert.deepEqual(value2, ["bytes", new Uint8Array([13, 14, 15])]); */ assert_int_equal(value2.count, sizeof(DATA2)); assert_memory_equal(value2.src, DATA2, sizeof(DATA2)); @@ -227,103 +239,92 @@ static void test_should_be_able_to_use_bytes(void** state) { * \brief should be able to make subobjects */ static void test_should_be_able_to_make_subobjects(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* let result */ /* */ /* const submap = doc.putObject(root, "submap", {}) */ - AMobjId const* const submap = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("submap"), AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const submap = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("submap"), AM_OBJ_TYPE_MAP), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc.put(submap, "number", 6, "uint") */ - AMfree(AMmapPutUint(doc, submap, AMstr("number"), 6)); + AMstackItem(NULL, AMmapPutUint(doc, submap, AMstr("number"), 6), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.strictEqual(doc.pendingOps(), 2) */ assert_int_equal(AMpendingOps(doc), 2); /* */ /* result = doc.getWithType(root, "submap") */ /* assert.deepEqual(result, ["map", submap]) */ - assert_true(AMobjIdEqual(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("submap"), NULL), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id, + assert_true(AMobjIdEqual(AMitemObjId(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("submap"), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))), submap)); /* */ /* result = doc.getWithType(submap, "number") */ /* assert.deepEqual(result, ["uint", 6]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, submap, AMstr("number"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, - 6); + uint64_t uint; + assert_true(AMitemToUint( + AMstackItem(stack_ptr, AMmapGet(doc, submap, AMstr("number"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 6); } /** * \brief should be able to make lists */ static void test_should_be_able_to_make_lists(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* */ /* const sublist = doc.putObject(root, "numbers", []) */ - AMobjId const* const sublist = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("numbers"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const sublist = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("numbers"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc.insert(sublist, 0, "a"); */ - AMfree(AMlistPutStr(doc, sublist, 0, true, AMstr("a"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 0, true, AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.insert(sublist, 1, "b"); */ - AMfree(AMlistPutStr(doc, sublist, 1, true, AMstr("b"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 1, true, AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.insert(sublist, 2, "c"); */ - AMfree(AMlistPutStr(doc, sublist, 2, true, AMstr("c"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 2, true, AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.insert(sublist, 0, "z"); */ - AMfree(AMlistPutStr(doc, sublist, 0, true, AMstr("z"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 0, true, AMstr("z")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* assert.deepEqual(doc.getWithType(sublist, 0), ["str", "z"]) */ - AMbyteSpan str = AMpush(&stack, - AMlistGet(doc, sublist, 0, NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, sublist, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "z", str.count); /* assert.deepEqual(doc.getWithType(sublist, 1), ["str", "a"]) */ - str = AMpush(&stack, - AMlistGet(doc, sublist, 1, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, sublist, 1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); /* assert.deepEqual(doc.getWithType(sublist, 2), ["str", "b"]) */ - str = AMpush(&stack, - AMlistGet(doc, sublist, 2, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, sublist, 2, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); /* assert.deepEqual(doc.getWithType(sublist, 3), ["str", "c"]) */ - str = AMpush(&stack, - AMlistGet(doc, sublist, 3, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, sublist, 3, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); /* assert.deepEqual(doc.length(sublist), 4) */ assert_int_equal(AMobjSize(doc, sublist, NULL), 4); /* */ /* doc.put(sublist, 2, "b v2"); */ - AMfree(AMlistPutStr(doc, sublist, 2, false, AMstr("b v2"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 2, false, AMstr("b v2")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* assert.deepEqual(doc.getWithType(sublist, 2), ["str", "b v2"]) */ - str = AMpush(&stack, - AMlistGet(doc, sublist, 2, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, sublist, 2, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "b v2", str.count); /* assert.deepEqual(doc.length(sublist), 4) */ @@ -334,233 +335,217 @@ static void test_should_be_able_to_make_lists(void** state) { * \brief lists have insert, set, splice, and push ops */ static void test_lists_have_insert_set_splice_and_push_ops(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* */ /* const sublist = doc.putObject(root, "letters", []) */ - AMobjId const* const sublist = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("letters"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const sublist = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("letters"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc.insert(sublist, 0, "a"); */ - AMfree(AMlistPutStr(doc, sublist, 0, true, AMstr("a"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 0, true, AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.insert(sublist, 0, "b"); */ - AMfree(AMlistPutStr(doc, sublist, 0, true, AMstr("b"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 0, true, AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.materialize(), { letters: ["b", "a"] }) */ - AMmapItems doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* doc_item = AMmapItemsNext(&doc_items, 1); - AMbyteSpan key = AMmapItemKey(doc_item); + AMitem* doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&list_items), 2); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - assert_null(AMlistItemsNext(&list_items, 1)); + assert_null(AMitemsNext(&list_items, 1)); } /* doc.push(sublist, "c"); */ - AMfree(AMlistPutStr(doc, sublist, SIZE_MAX, true, AMstr("c"))); + AMstackItem(NULL, AMlistPutStr(doc, sublist, SIZE_MAX, true, AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const heads = doc.getHeads() */ - AMchangeHashes const heads = AMpush(&stack, - AMgetHeads(doc), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads = AMstackItems(stack_ptr, AMgetHeads(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* assert.deepEqual(doc.materialize(), { letters: ["b", "a", "c"] }) */ - doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&list_items), 3); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_null(AMlistItemsNext(&list_items, 1)); + assert_null(AMitemsNext(&list_items, 1)); } /* doc.push(sublist, 3, "timestamp"); */ - AMfree(AMlistPutTimestamp(doc, sublist, SIZE_MAX, true, 3)); - /* assert.deepEqual(doc.materialize(), { letters: ["b", "a", "c", new Date(3)] } */ - doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + AMstackItem(NULL, AMlistPutTimestamp(doc, sublist, SIZE_MAX, true, 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + /* assert.deepEqual(doc.materialize(), { letters: ["b", "a", "c", new + * Date(3)] } */ + doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR | AM_VAL_TYPE_TIMESTAMP)); + assert_int_equal(AMitemsSize(&list_items), 4); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).timestamp, - 3); - assert_null(AMlistItemsNext(&list_items, 1)); + int64_t timestamp; + assert_true(AMitemToTimestamp(AMitemsNext(&list_items, 1), ×tamp)); + assert_int_equal(timestamp, 3); + assert_null(AMitemsNext(&list_items, 1)); } /* doc.splice(sublist, 1, 1, ["d", "e", "f"]); */ - static AMvalue const DATA[] = {{.str_tag = AM_VALUE_STR, .str = {.src = "d", .count = 1}}, - {.str_tag = AM_VALUE_STR, .str = {.src = "e", .count = 1}}, - {.str_tag = AM_VALUE_STR, .str = {.src = "f", .count = 1}}}; - AMfree(AMsplice(doc, sublist, 1, 1, DATA, sizeof(DATA)/sizeof(AMvalue))); - /* assert.deepEqual(doc.materialize(), { letters: ["b", "d", "e", "f", "c", new Date(3)] } */ - doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + AMresult* data = AMstackResult( + stack_ptr, AMresultFrom(3, AMitemFromStr(AMstr("d")), AMitemFromStr(AMstr("e")), AMitemFromStr(AMstr("f"))), + NULL, NULL); + AMstackItem(NULL, AMsplice(doc, sublist, 1, 1, AMresultItems(data)), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + /* assert.deepEqual(doc.materialize(), { letters: ["b", "d", "e", "f", "c", + * new Date(3)] } */ + doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR | AM_VAL_TYPE_TIMESTAMP)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "d", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "e", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "f", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).timestamp, - 3); - assert_null(AMlistItemsNext(&list_items, 1)); + int64_t timestamp; + assert_true(AMitemToTimestamp(AMitemsNext(&list_items, 1), ×tamp)); + assert_int_equal(timestamp, 3); + assert_null(AMitemsNext(&list_items, 1)); } /* doc.put(sublist, 0, "z"); */ - AMfree(AMlistPutStr(doc, sublist, 0, false, AMstr("z"))); - /* assert.deepEqual(doc.materialize(), { letters: ["z", "d", "e", "f", "c", new Date(3)] } */ - doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + AMstackItem(NULL, AMlistPutStr(doc, sublist, 0, false, AMstr("z")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + /* assert.deepEqual(doc.materialize(), { letters: ["z", "d", "e", "f", "c", + * new Date(3)] } */ + doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR | AM_VAL_TYPE_TIMESTAMP)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "z", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "d", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "e", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "f", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).timestamp, - 3); - assert_null(AMlistItemsNext(&list_items, 1)); + int64_t timestamp; + assert_true(AMitemToTimestamp(AMitemsNext(&list_items, 1), ×tamp)); + assert_int_equal(timestamp, 3); + assert_null(AMitemsNext(&list_items, 1)); } - /* assert.deepEqual(doc.materialize(sublist), ["z", "d", "e", "f", "c", new Date(3)] */ - AMlistItems sublist_items = AMpush( - &stack, - AMlistRange(doc, sublist, 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).str; + /* assert.deepEqual(doc.materialize(sublist), ["z", "d", "e", "f", "c", new + * Date(3)] */ + AMitems sublist_items = AMstackItems(stack_ptr, AMlistRange(doc, sublist, 0, SIZE_MAX, NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR | AM_VAL_TYPE_TIMESTAMP)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&sublist_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "z", str.count); - str = AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&sublist_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "d", str.count); - str = AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&sublist_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "e", str.count); - str = AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&sublist_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "f", str.count); - str = AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&sublist_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&sublist_items, 1)).timestamp, - 3); - assert_null(AMlistItemsNext(&sublist_items, 1)); + int64_t timestamp; + assert_true(AMitemToTimestamp(AMitemsNext(&sublist_items, 1), ×tamp)); + assert_int_equal(timestamp, 3); + assert_null(AMitemsNext(&sublist_items, 1)); /* assert.deepEqual(doc.length(sublist), 6) */ assert_int_equal(AMobjSize(doc, sublist, NULL), 6); - /* assert.deepEqual(doc.materialize("/", heads), { letters: ["b", "a", "c"] } */ - doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + /* assert.deepEqual(doc.materialize("/", heads), { letters: ["b", "a", "c"] + * } */ + doc_item = AMstackItem(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("letters")); assert_memory_equal(key.src, "letters", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, &heads), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, &heads), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "c", str.count); - assert_null(AMlistItemsNext(&list_items, 1)); + assert_null(AMitemsNext(&list_items, 1)); } } @@ -568,67 +553,54 @@ static void test_lists_have_insert_set_splice_and_push_ops(void** state) { * \brief should be able to delete non-existent props */ static void test_should_be_able_to_delete_non_existent_props(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* */ /* doc.put("_root", "foo", "bar") */ - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("foo"), AMstr("bar"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("foo"), AMstr("bar")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put("_root", "bip", "bap") */ - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("bip"), AMstr("bap"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("bip"), AMstr("bap")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const hash1 = doc.commit() */ - AMchangeHashes const hash1 = AMpush(&stack, - AMcommit(doc, AMstr(NULL), NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const hash1 = + AMstackItems(stack_ptr, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* assert.deepEqual(doc.keys("_root"), ["bip", "foo"]) */ - AMstrs keys = AMpush(&stack, - AMkeys(doc, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - AMbyteSpan str = AMstrsNext(&keys, 1); + AMitems keys = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bip", str.count); - str = AMstrsNext(&keys, 1); + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "foo", str.count); /* */ /* doc.delete("_root", "foo") */ - AMfree(AMmapDelete(doc, AM_ROOT, AMstr("foo"))); + AMstackItem(NULL, AMmapDelete(doc, AM_ROOT, AMstr("foo")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.delete("_root", "baz") */ - AMfree(AMmapDelete(doc, AM_ROOT, AMstr("baz"))); + AMstackItem(NULL, AMmapDelete(doc, AM_ROOT, AMstr("baz")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const hash2 = doc.commit() */ - AMchangeHashes const hash2 = AMpush(&stack, - AMcommit(doc, AMstr(NULL), NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const hash2 = + AMstackItems(stack_ptr, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* assert.deepEqual(doc.keys("_root"), ["bip"]) */ - keys = AMpush(&stack, - AMkeys(doc, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - str = AMstrsNext(&keys, 1); + keys = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bip", str.count); /* assert.deepEqual(doc.keys("_root", [hash1]), ["bip", "foo"]) */ - keys = AMpush(&stack, - AMkeys(doc, AM_ROOT, &hash1), - AM_VALUE_STRS, - cmocka_cb).strs; - str = AMstrsNext(&keys, 1); + keys = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, &hash1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bip", str.count); - str = AMstrsNext(&keys, 1); + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "foo", str.count); /* assert.deepEqual(doc.keys("_root", [hash2]), ["bip"]) */ - keys = AMpush(&stack, - AMkeys(doc, AM_ROOT, &hash2), - AM_VALUE_STRS, - cmocka_cb).strs; - str = AMstrsNext(&keys, 1); + keys = AMstackItems(stack_ptr, AMkeys(doc, AM_ROOT, &hash2), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bip", str.count); } @@ -636,123 +608,114 @@ static void test_should_be_able_to_delete_non_existent_props(void** state) { /** * \brief should be able to del */ -static void test_should_be_able_to_del(void **state) { - AMresultStack* stack = *state; +static void test_should_be_able_to_del(void** state) { + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* */ /* doc.put(root, "xxx", "xxx"); */ - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("xxx"), AMstr("xxx"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("xxx"), AMstr("xxx")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(root, "xxx"), ["str", "xxx"]) */ - AMbyteSpan const str = AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("xxx"), NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("xxx"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), + &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "xxx", str.count); /* doc.delete(root, "xxx"); */ - AMfree(AMmapDelete(doc, AM_ROOT, AMstr("xxx"))); + AMstackItem(NULL, AMmapDelete(doc, AM_ROOT, AMstr("xxx")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(root, "xxx"), undefined) */ - AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("xxx"), NULL), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMmapGet(doc, AM_ROOT, AMstr("xxx"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } /** * \brief should be able to use counters */ static void test_should_be_able_to_use_counters(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root" */ /* */ /* doc.put(root, "counter", 10, "counter"); */ - AMfree(AMmapPutCounter(doc, AM_ROOT, AMstr("counter"), 10)); + AMstackItem(NULL, AMmapPutCounter(doc, AM_ROOT, AMstr("counter"), 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(root, "counter"), ["counter", 10]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 10); + int64_t counter; + assert_true(AMitemToCounter(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 10); /* doc.increment(root, "counter", 10); */ - AMfree(AMmapIncrement(doc, AM_ROOT, AMstr("counter"), 10)); + AMstackItem(NULL, AMmapIncrement(doc, AM_ROOT, AMstr("counter"), 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(root, "counter"), ["counter", 20]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 20); + assert_true(AMitemToCounter(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 20); /* doc.increment(root, "counter", -5); */ - AMfree(AMmapIncrement(doc, AM_ROOT, AMstr("counter"), -5)); + AMstackItem(NULL, AMmapIncrement(doc, AM_ROOT, AMstr("counter"), -5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(root, "counter"), ["counter", 15]) */ - assert_int_equal(AMpush(&stack, - AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), - AM_VALUE_COUNTER, - cmocka_cb).counter, 15); + assert_true(AMitemToCounter(AMstackItem(stack_ptr, AMmapGet(doc, AM_ROOT, AMstr("counter"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER)), + &counter)); + assert_int_equal(counter, 15); } /** * \brief should be able to splice text */ static void test_should_be_able_to_splice_text(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const root = "_root"; */ /* */ /* const text = doc.putObject(root, "text", ""); */ - AMobjId const* const text = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const text = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc.splice(text, 0, 0, "hello ") */ - AMfree(AMspliceText(doc, text, 0, 0, AMstr("hello "))); + AMstackItem(NULL, AMspliceText(doc, text, 0, 0, AMstr("hello ")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.splice(text, 6, 0, "world") */ - AMfree(AMspliceText(doc, text, 6, 0, AMstr("world"))); + AMstackItem(NULL, AMspliceText(doc, text, 6, 0, AMstr("world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.splice(text, 11, 0, "!?") */ - AMfree(AMspliceText(doc, text, 11, 0, AMstr("!?"))); + AMstackItem(NULL, AMspliceText(doc, text, 11, 0, AMstr("!?")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.getWithType(text, 0), ["str", "h"]) */ - AMbyteSpan str = AMpush(&stack, - AMlistGet(doc, text, 0, NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMlistGet(doc, text, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "h", str.count); /* assert.deepEqual(doc.getWithType(text, 1), ["str", "e"]) */ - str = AMpush(&stack, - AMlistGet(doc, text, 1, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMlistGet(doc, text, 1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "e", str.count); /* assert.deepEqual(doc.getWithType(text, 9), ["str", "l"]) */ - str = AMpush(&stack, - AMlistGet(doc, text, 9, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMlistGet(doc, text, 9, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "l", str.count); /* assert.deepEqual(doc.getWithType(text, 10), ["str", "d"]) */ - str = AMpush(&stack, - AMlistGet(doc, text, 10, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, text, 10, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "d", str.count); /* assert.deepEqual(doc.getWithType(text, 11), ["str", "!"]) */ - str = AMpush(&stack, - AMlistGet(doc, text, 11, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, text, 11, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "!", str.count); /* assert.deepEqual(doc.getWithType(text, 12), ["str", "?"]) */ - str = AMpush(&stack, - AMlistGet(doc, text, 12, NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMlistGet(doc, text, 12, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "?", str.count); } @@ -761,52 +724,45 @@ static void test_should_be_able_to_splice_text(void** state) { * \brief should be able to save all or incrementally */ static void test_should_be_able_to_save_all_or_incrementally(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* */ /* doc.put("_root", "foo", 1) */ - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("foo"), 1)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("foo"), 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* const save1 = doc.save() */ - AMbyteSpan const save1 = AMpush(&stack, - AMsave(doc), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan save1; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save1)); /* */ /* doc.put("_root", "bar", 2) */ - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("bar"), 2)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("bar"), 2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* const saveMidway = doc.clone().save(); */ - AMbyteSpan const saveMidway = AMpush(&stack, - AMsave( - AMpush(&stack, - AMclone(doc), - AM_VALUE_DOC, - cmocka_cb).doc), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMdoc* doc_clone; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMclone(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc_clone)); + AMbyteSpan saveMidway; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc_clone), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &saveMidway)); /* */ /* const save2 = doc.saveIncremental(); */ - AMbyteSpan const save2 = AMpush(&stack, - AMsaveIncremental(doc), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan save2; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsaveIncremental(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save2)); /* */ /* doc.put("_root", "baz", 3); */ - AMfree(AMmapPutInt(doc, AM_ROOT, AMstr("baz"), 3)); + AMstackItem(NULL, AMmapPutInt(doc, AM_ROOT, AMstr("baz"), 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* const save3 = doc.saveIncremental(); */ - AMbyteSpan const save3 = AMpush(&stack, - AMsaveIncremental(doc), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan save3; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsaveIncremental(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save3)); /* */ /* const saveA = doc.save(); */ - AMbyteSpan const saveA = AMpush(&stack, - AMsave(doc), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan saveA; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &saveA)); /* const saveB = new Uint8Array([...save1, ...save2, ...save3]); */ size_t const saveB_count = save1.count + save2.count + save3.count; uint8_t* const saveB_src = test_malloc(saveB_count); @@ -818,104 +774,83 @@ static void test_should_be_able_to_save_all_or_incrementally(void** state) { assert_memory_not_equal(saveA.src, saveB_src, saveA.count); /* */ /* const docA = load(saveA); */ - AMdoc* const docA = AMpush(&stack, - AMload(saveA.src, saveA.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* docA; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(saveA.src, saveA.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &docA)); /* const docB = load(saveB); */ - AMdoc* const docB = AMpush(&stack, - AMload(saveB_src, saveB_count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* docB; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(saveB_src, saveB_count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &docB)); test_free(saveB_src); /* const docC = load(saveMidway) */ - AMdoc* const docC = AMpush(&stack, - AMload(saveMidway.src, saveMidway.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* docC; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(saveMidway.src, saveMidway.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &docC)); /* docC.loadIncremental(save3) */ - AMfree(AMloadIncremental(docC, save3.src, save3.count)); + AMstackItem(NULL, AMloadIncremental(docC, save3.src, save3.count), cmocka_cb, AMexpect(AM_VAL_TYPE_UINT)); /* */ /* assert.deepEqual(docA.keys("_root"), docB.keys("_root")); */ - AMstrs const keysA = AMpush(&stack, - AMkeys(docA, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - AMstrs const keysB = AMpush(&stack, - AMkeys(docB, AM_ROOT, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsCmp(&keysA, &keysB), 0); + AMitems const keysA = AMstackItems(stack_ptr, AMkeys(docA, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMitems const keysB = AMstackItems(stack_ptr, AMkeys(docB, AM_ROOT, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemsEqual(&keysA, &keysB)); /* assert.deepEqual(docA.save(), docB.save()); */ - AMbyteSpan const save = AMpush(&stack, - AMsave(docA), - AM_VALUE_BYTES, - cmocka_cb).bytes; - assert_memory_equal(save.src, - AMpush(&stack, - AMsave(docB), - AM_VALUE_BYTES, - cmocka_cb).bytes.src, - save.count); + AMbyteSpan docA_save; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(docA), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &docA_save)); + AMbyteSpan docB_save; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(docB), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &docB_save)); + assert_int_equal(docA_save.count, docB_save.count); + assert_memory_equal(docA_save.src, docB_save.src, docA_save.count); /* assert.deepEqual(docA.save(), docC.save()); */ - assert_memory_equal(save.src, - AMpush(&stack, - AMsave(docC), - AM_VALUE_BYTES, - cmocka_cb).bytes.src, - save.count); + AMbyteSpan docC_save; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(docC), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &docC_save)); + assert_int_equal(docA_save.count, docC_save.count); + assert_memory_equal(docA_save.src, docC_save.src, docA_save.count); } /** * \brief should be able to splice text #2 */ static void test_should_be_able_to_splice_text_2(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create() */ - AMdoc* const doc = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const text = doc.putObject("_root", "text", ""); */ - AMobjId const* const text = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const text = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc.splice(text, 0, 0, "hello world"); */ - AMfree(AMspliceText(doc, text, 0, 0, AMstr("hello world"))); + AMstackItem(NULL, AMspliceText(doc, text, 0, 0, AMstr("hello world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const hash1 = doc.commit(); */ - AMchangeHashes const hash1 = AMpush(&stack, - AMcommit(doc, AMstr(NULL), NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const hash1 = + AMstackItems(stack_ptr, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* doc.splice(text, 6, 0, "big bad "); */ - AMfree(AMspliceText(doc, text, 6, 0, AMstr("big bad "))); + AMstackItem(NULL, AMspliceText(doc, text, 6, 0, AMstr("big bad ")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const hash2 = doc.commit(); */ - AMchangeHashes const hash2 = AMpush(&stack, - AMcommit(doc, AMstr(NULL), NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const hash2 = + AMstackItems(stack_ptr, AMcommit(doc, AMstr(NULL), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* assert.strictEqual(doc.text(text), "hello big bad world") */ - AMbyteSpan str = AMpush(&stack, - AMtext(doc, text, NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMtext(doc, text, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello big bad world")); assert_memory_equal(str.src, "hello big bad world", str.count); /* assert.strictEqual(doc.length(text), 19) */ assert_int_equal(AMobjSize(doc, text, NULL), 19); /* assert.strictEqual(doc.text(text, [hash1]), "hello world") */ - str = AMpush(&stack, - AMtext(doc, text, &hash1), - AM_VALUE_STR, - cmocka_cb).str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMtext(doc, text, &hash1), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello world")); assert_memory_equal(str.src, "hello world", str.count); /* assert.strictEqual(doc.length(text, [hash1]), 11) */ assert_int_equal(AMobjSize(doc, text, &hash1), 11); /* assert.strictEqual(doc.text(text, [hash2]), "hello big bad world") */ - str = AMpush(&stack, - AMtext(doc, text, &hash2), - AM_VALUE_STR, - cmocka_cb).str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, AMtext(doc, text, &hash2), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello big bad world")); assert_memory_equal(str.src, "hello big bad world", str.count); /* assert.strictEqual(doc.length(text, [hash2]), 19) */ @@ -926,266 +861,234 @@ static void test_should_be_able_to_splice_text_2(void** state) { * \brief local inc increments all visible counters in a map */ static void test_local_inc_increments_all_visible_counters_in_a_map(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc1 = create("aaaa") */ - AMdoc* const doc1 = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); /* doc1.put("_root", "hello", "world") */ - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("hello"), AMstr("world"))); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("hello"), AMstr("world")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* const doc2 = load(doc1.save(), "bbbb"); */ - AMbyteSpan const save = AMpush(&stack, - AMsave(doc1), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMdoc* const doc2 = AMpush(&stack, - AMload(save.src, save.count), - AM_VALUE_DOC, - cmocka_cb).doc; - AMfree(AMsetActorId(doc2, AMpush(&stack, - AMactorIdInitStr(AMstr("bbbb")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMbyteSpan save; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save)); + AMdoc* doc2; + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMload(save.src, save.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("bbbb")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMstackItem(NULL, AMsetActorId(doc2, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const doc3 = load(doc1.save(), "cccc"); */ - AMdoc* const doc3 = AMpush(&stack, - AMload(save.src, save.count), - AM_VALUE_DOC, - cmocka_cb).doc; - AMfree(AMsetActorId(doc3, AMpush(&stack, - AMactorIdInitStr(AMstr("cccc")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMdoc* doc3; + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMload(save.src, save.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc3)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("cccc")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMstackItem(NULL, AMsetActorId(doc3, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* let heads = doc1.getHeads() */ - AMchangeHashes const heads1 = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads1 = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* doc1.put("_root", "cnt", 20) */ - AMfree(AMmapPutInt(doc1, AM_ROOT, AMstr("cnt"), 20)); + AMstackItem(NULL, AMmapPutInt(doc1, AM_ROOT, AMstr("cnt"), 20), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc2.put("_root", "cnt", 0, "counter") */ - AMfree(AMmapPutCounter(doc2, AM_ROOT, AMstr("cnt"), 0)); + AMstackItem(NULL, AMmapPutCounter(doc2, AM_ROOT, AMstr("cnt"), 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc3.put("_root", "cnt", 10, "counter") */ - AMfree(AMmapPutCounter(doc3, AM_ROOT, AMstr("cnt"), 10)); + AMstackItem(NULL, AMmapPutCounter(doc3, AM_ROOT, AMstr("cnt"), 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc1.applyChanges(doc2.getChanges(heads)) */ - AMchanges const changes2 = AMpush(&stack, - AMgetChanges(doc2, &heads1), - AM_VALUE_CHANGES, - cmocka_cb).changes; - AMfree(AMapplyChanges(doc1, &changes2)); + AMitems const changes2 = + AMstackItems(stack_ptr, AMgetChanges(doc2, &heads1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMstackItem(NULL, AMapplyChanges(doc1, &changes2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc1.applyChanges(doc3.getChanges(heads)) */ - AMchanges const changes3 = AMpush(&stack, - AMgetChanges(doc3, &heads1), - AM_VALUE_CHANGES, - cmocka_cb).changes; - AMfree(AMapplyChanges(doc1, &changes3)); + AMitems const changes3 = + AMstackItems(stack_ptr, AMgetChanges(doc3, &heads1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMstackItem(NULL, AMapplyChanges(doc1, &changes3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* let result = doc1.getAll("_root", "cnt") */ - AMobjItems result = AMpush(&stack, - AMmapGetAll(doc1, AM_ROOT, AMstr("cnt"), NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; + AMitems result = AMstackItems(stack_ptr, AMmapGetAll(doc1, AM_ROOT, AMstr("cnt"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER | AM_VAL_TYPE_INT | AM_VAL_TYPE_STR)); /* assert.deepEqual(result, [ ['int', 20, '2@aaaa'], ['counter', 0, '2@bbbb'], ['counter', 10, '2@cccc'], ]) */ - AMobjItem const* result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).int_, 20); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 2); - AMbyteSpan str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + AMitem* result_item = AMitemsNext(&result, 1); + int64_t int_; + assert_true(AMitemToInt(result_item, &int_)); + assert_int_equal(int_, 20); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 2); + AMbyteSpan str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "aaaa", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 0); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 2); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + int64_t counter; + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 0); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 2); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "bbbb", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 10); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 2); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 10); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 2); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "cccc", str.count); /* doc1.increment("_root", "cnt", 5) */ - AMfree(AMmapIncrement(doc1, AM_ROOT, AMstr("cnt"), 5)); + AMstackItem(NULL, AMmapIncrement(doc1, AM_ROOT, AMstr("cnt"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* result = doc1.getAll("_root", "cnt") */ - result = AMpush(&stack, - AMmapGetAll(doc1, AM_ROOT, AMstr("cnt"), NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; + result = AMstackItems(stack_ptr, AMmapGetAll(doc1, AM_ROOT, AMstr("cnt"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER)); /* assert.deepEqual(result, [ ['counter', 5, '2@bbbb'], ['counter', 15, '2@cccc'], ]) */ - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 5); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 2); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 5); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 2); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "bbbb", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 15); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 2); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 15); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 2); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "cccc", str.count); /* */ /* const save1 = doc1.save() */ - AMbyteSpan const save1 = AMpush(&stack, - AMsave(doc1), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan save1; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save1)); /* const doc4 = load(save1) */ - AMdoc* const doc4 = AMpush(&stack, - AMload(save1.src, save1.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* doc4; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(save1.src, save1.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc4)); /* assert.deepEqual(doc4.save(), save1); */ - assert_memory_equal(AMpush(&stack, - AMsave(doc4), - AM_VALUE_BYTES, - cmocka_cb).bytes.src, - save1.src, - save1.count); + AMbyteSpan doc4_save; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc4), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &doc4_save)); + assert_int_equal(doc4_save.count, save1.count); + assert_memory_equal(doc4_save.src, save1.src, doc4_save.count); } /** * \brief local inc increments all visible counters in a sequence */ static void test_local_inc_increments_all_visible_counters_in_a_sequence(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc1 = create("aaaa") */ - AMdoc* const doc1 = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); /* const seq = doc1.putObject("_root", "seq", []) */ - AMobjId const* const seq = AMpush( - &stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("seq"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const seq = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc1, AM_ROOT, AMstr("seq"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* doc1.insert(seq, 0, "hello") */ - AMfree(AMlistPutStr(doc1, seq, 0, true, AMstr("hello"))); + AMstackItem(NULL, AMlistPutStr(doc1, seq, 0, true, AMstr("hello")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const doc2 = load(doc1.save(), "bbbb"); */ - AMbyteSpan const save1 = AMpush(&stack, - AMsave(doc1), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMdoc* const doc2 = AMpush(&stack, - AMload(save1.src, save1.count), - AM_VALUE_DOC, - cmocka_cb).doc; - AMfree(AMsetActorId(doc2, AMpush(&stack, - AMactorIdInitStr(AMstr("bbbb")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMbyteSpan save1; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save1)); + AMdoc* doc2; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(save1.src, save1.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("bbbb")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMstackItem(NULL, AMsetActorId(doc2, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const doc3 = load(doc1.save(), "cccc"); */ - AMdoc* const doc3 = AMpush(&stack, - AMload(save1.src, save1.count), - AM_VALUE_DOC, - cmocka_cb).doc; - AMfree(AMsetActorId(doc3, AMpush(&stack, - AMactorIdInitStr(AMstr("cccc")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMdoc* doc3; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(save1.src, save1.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc3)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("cccc")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMstackItem(NULL, AMsetActorId(doc3, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* let heads = doc1.getHeads() */ - AMchangeHashes const heads1 = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads1 = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* doc1.put(seq, 0, 20) */ - AMfree(AMlistPutInt(doc1, seq, 0, false, 20)); + AMstackItem(NULL, AMlistPutInt(doc1, seq, 0, false, 20), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc2.put(seq, 0, 0, "counter") */ - AMfree(AMlistPutCounter(doc2, seq, 0, false, 0)); + AMstackItem(NULL, AMlistPutCounter(doc2, seq, 0, false, 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc3.put(seq, 0, 10, "counter") */ - AMfree(AMlistPutCounter(doc3, seq, 0, false, 10)); + AMstackItem(NULL, AMlistPutCounter(doc3, seq, 0, false, 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc1.applyChanges(doc2.getChanges(heads)) */ - AMchanges const changes2 = AMpush(&stack, - AMgetChanges(doc2, &heads1), - AM_VALUE_CHANGES, - cmocka_cb).changes; - AMfree(AMapplyChanges(doc1, &changes2)); + AMitems const changes2 = + AMstackItems(stack_ptr, AMgetChanges(doc2, &heads1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMstackItem(NULL, AMapplyChanges(doc1, &changes2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc1.applyChanges(doc3.getChanges(heads)) */ - AMchanges const changes3 = AMpush(&stack, - AMgetChanges(doc3, &heads1), - AM_VALUE_CHANGES, - cmocka_cb).changes; - AMfree(AMapplyChanges(doc1, &changes3)); + AMitems const changes3 = + AMstackItems(stack_ptr, AMgetChanges(doc3, &heads1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMstackItem(NULL, AMapplyChanges(doc1, &changes3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* let result = doc1.getAll(seq, 0) */ - AMobjItems result = AMpush(&stack, - AMlistGetAll(doc1, seq, 0, NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; + AMitems result = AMstackItems(stack_ptr, AMlistGetAll(doc1, seq, 0, NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_COUNTER | AM_VAL_TYPE_INT)); /* assert.deepEqual(result, [ ['int', 20, '3@aaaa'], ['counter', 0, '3@bbbb'], ['counter', 10, '3@cccc'], ]) */ - AMobjItem const* result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).int_, 20); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 3); - AMbyteSpan str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + AMitem* result_item = AMitemsNext(&result, 1); + int64_t int_; + assert_true(AMitemToInt(result_item, &int_)); + assert_int_equal(int_, 20); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 3); + AMbyteSpan str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "aaaa", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 0); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 3); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + int64_t counter; + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 0); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 3); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_memory_equal(str.src, "bbbb", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 10); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 3); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 10); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 3); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "cccc", str.count); /* doc1.increment(seq, 0, 5) */ - AMfree(AMlistIncrement(doc1, seq, 0, 5)); + AMstackItem(NULL, AMlistIncrement(doc1, seq, 0, 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* result = doc1.getAll(seq, 0) */ - result = AMpush(&stack, - AMlistGetAll(doc1, seq, 0, NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; + result = AMstackItems(stack_ptr, AMlistGetAll(doc1, seq, 0, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_COUNTER)); /* assert.deepEqual(result, [ ['counter', 5, '3@bbbb'], ['counter', 15, '3@cccc'], ]) */ - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 5); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 3); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 5); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 3); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_int_equal(str.count, 4); assert_memory_equal(str.src, "bbbb", str.count); - result_item = AMobjItemsNext(&result, 1); - assert_int_equal(AMobjItemValue(result_item).counter, 15); - assert_int_equal(AMobjIdCounter(AMobjItemObjId(result_item)), 3); - str = AMactorIdStr(AMobjIdActorId(AMobjItemObjId(result_item))); + result_item = AMitemsNext(&result, 1); + assert_true(AMitemToCounter(result_item, &counter)); + assert_int_equal(counter, 15); + assert_int_equal(AMobjIdCounter(AMitemObjId(result_item)), 3); + str = AMactorIdStr(AMobjIdActorId(AMitemObjId(result_item))); assert_memory_equal(str.src, "cccc", str.count); /* */ /* const save = doc1.save() */ - AMbyteSpan const save = AMpush(&stack, - AMsave(doc1), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan save; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &save)); /* const doc4 = load(save) */ - AMdoc* const doc4 = AMpush(&stack, - AMload(save.src, save.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* doc4; + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMload(save.src, save.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc4)); /* assert.deepEqual(doc4.save(), save); */ - assert_memory_equal(AMpush(&stack, - AMsave(doc4), - AM_VALUE_BYTES, - cmocka_cb).bytes.src, - save.src, - save.count); + AMbyteSpan doc4_save; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc4), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &doc4_save)); + assert_int_equal(doc4_save.count, save.count); + assert_memory_equal(doc4_save.src, save.src, doc4_save.count); } /** @@ -1197,314 +1100,269 @@ static void test_paths_can_be_used_instead_of_objids(void** state); * \brief should be able to fetch changes by hash */ static void test_should_be_able_to_fetch_changes_by_hash(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc1 = create("aaaa") */ - AMdoc* const doc1 = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); /* const doc2 = create("bbbb") */ - AMdoc* const doc2 = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("bbbb")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("bbbb")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc2; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); /* doc1.put("/", "a", "b") */ - AMfree(AMmapPutStr(doc1, AM_ROOT, AMstr("a"), AMstr("b"))); + AMstackItem(NULL, AMmapPutStr(doc1, AM_ROOT, AMstr("a"), AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc2.put("/", "b", "c") */ - AMfree(AMmapPutStr(doc2, AM_ROOT, AMstr("b"), AMstr("c"))); + AMstackItem(NULL, AMmapPutStr(doc2, AM_ROOT, AMstr("b"), AMstr("c")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const head1 = doc1.getHeads() */ - AMchangeHashes head1 = AMpush(&stack, - AMgetHeads(doc1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems head1 = AMstackItems(stack_ptr, AMgetHeads(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* const head2 = doc2.getHeads() */ - AMchangeHashes head2 = AMpush(&stack, - AMgetHeads(doc2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems head2 = AMstackItems(stack_ptr, AMgetHeads(doc2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* const change1 = doc1.getChangeByHash(head1[0]) - if (change1 === null) { throw new RangeError("change1 should not be null") */ - AMbyteSpan const change_hash1 = AMchangeHashesNext(&head1, 1); - AMchanges change1 = AMpush( - &stack, - AMgetChangeByHash(doc1, change_hash1.src, change_hash1.count), - AM_VALUE_CHANGES, - cmocka_cb).changes; + if (change1 === null) { throw new RangeError("change1 should not be + null") */ + AMbyteSpan change_hash1; + assert_true(AMitemToChangeHash(AMitemsNext(&head1, 1), &change_hash1)); + AMchange const* change1; + assert_true(AMitemToChange(AMstackItem(stack_ptr, AMgetChangeByHash(doc1, change_hash1.src, change_hash1.count), + cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)), + &change1)); /* const change2 = doc1.getChangeByHash(head2[0]) assert.deepEqual(change2, null) */ - AMbyteSpan const change_hash2 = AMchangeHashesNext(&head2, 1); - AMpush(&stack, - AMgetChangeByHash(doc1, change_hash2.src, change_hash2.count), - AM_VALUE_VOID, - cmocka_cb); + AMbyteSpan change_hash2; + assert_true(AMitemToChangeHash(AMitemsNext(&head2, 1), &change_hash2)); + AMstackItem(NULL, AMgetChangeByHash(doc1, change_hash2.src, change_hash2.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(decodeChange(change1).hash, head1[0]) */ - assert_memory_equal(AMchangeHash(AMchangesNext(&change1, 1)).src, - change_hash1.src, - change_hash1.count); + assert_memory_equal(AMchangeHash(change1).src, change_hash1.src, change_hash1.count); } /** * \brief recursive sets are possible */ static void test_recursive_sets_are_possible(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create("aaaa") */ - AMdoc* const doc = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const l1 = doc.putObject("_root", "list", [{ foo: "bar" }, [1, 2, 3]] */ - AMobjId const* const l1 = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const l1 = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); { - AMobjId const* const map = AMpush( - &stack, - AMlistPutObject(doc, l1, 0, true, AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMmapPutStr(doc, map, AMstr("foo"), AMstr("bar"))); - AMobjId const* const list = AMpush( - &stack, - AMlistPutObject(doc, l1, SIZE_MAX, true, AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const map = AMitemObjId(AMstackItem( + stack_ptr, AMlistPutObject(doc, l1, 0, true, AM_OBJ_TYPE_MAP), cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMmapPutStr(doc, map, AMstr("foo"), AMstr("bar")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMlistPutObject(doc, l1, SIZE_MAX, true, AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); for (int value = 1; value != 4; ++value) { - AMfree(AMlistPutInt(doc, list, SIZE_MAX, true, value)); + AMstackItem(NULL, AMlistPutInt(doc, list, SIZE_MAX, true, value), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } } /* const l2 = doc.insertObject(l1, 0, { zip: ["a", "b"] }) */ - AMobjId const* const l2 = AMpush( - &stack, - AMlistPutObject(doc, l1, 0, true, AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const l2 = AMitemObjId(AMstackItem(stack_ptr, AMlistPutObject(doc, l1, 0, true, AM_OBJ_TYPE_MAP), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); { - AMobjId const* const list = AMpush( - &stack, - AMmapPutObject(doc, l2, AMstr("zip"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMlistPutStr(doc, list, SIZE_MAX, true, AMstr("a"))); - AMfree(AMlistPutStr(doc, list, SIZE_MAX, true, AMstr("b"))); + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, l2, AMstr("zip"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMlistPutStr(doc, list, SIZE_MAX, true, AMstr("a")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMlistPutStr(doc, list, SIZE_MAX, true, AMstr("b")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } - /* const l3 = doc.putObject("_root", "info1", "hello world") // 'text' object */ - AMobjId const* const l3 = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("info1"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMspliceText(doc, l3, 0, 0, AMstr("hello world"))); + /* const l3 = doc.putObject("_root", "info1", "hello world") // 'text' + * object */ + AMobjId const* const l3 = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("info1"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMspliceText(doc, l3, 0, 0, AMstr("hello world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* doc.put("_root", "info2", "hello world") // 'str' */ - AMfree(AMmapPutStr(doc, AM_ROOT, AMstr("info2"), AMstr("hello world"))); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("info2"), AMstr("hello world")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* const l4 = doc.putObject("_root", "info3", "hello world") */ - AMobjId const* const l4 = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("info3"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; - AMfree(AMspliceText(doc, l4, 0, 0, AMstr("hello world"))); + AMobjId const* const l4 = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("info3"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); + AMstackItem(NULL, AMspliceText(doc, l4, 0, 0, AMstr("hello world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(doc.materialize(), { "list": [{ zip: ["a", "b"] }, { foo: "bar" }, [1, 2, 3]], "info1": "hello world", "info2": "hello world", "info3": "hello world", - }) */ - AMmapItems doc_items = AMpush(&stack, - AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* doc_item = AMmapItemsNext(&doc_items, 1); - AMbyteSpan key = AMmapItemKey(doc_item); + }) */ + AMitems doc_items = AMstackItems(stack_ptr, AMmapRange(doc, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE | AM_VAL_TYPE_STR)); + AMitem* doc_item = AMitemsNext(&doc_items, 1); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("info1")); assert_memory_equal(key.src, "info1", key.count); - AMbyteSpan str = AMpush(&stack, - AMtext(doc, AMmapItemObjId(doc_item), NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMtext(doc, AMitemObjId(doc_item), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello world")); assert_memory_equal(str.src, "hello world", str.count); - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + doc_item = AMitemsNext(&doc_items, 1); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("info2")); assert_memory_equal(key.src, "info2", key.count); - str = AMmapItemValue(doc_item).str; + assert_true(AMitemToStr(doc_item, &str)); assert_int_equal(str.count, strlen("hello world")); assert_memory_equal(str.src, "hello world", str.count); - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + doc_item = AMitemsNext(&doc_items, 1); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("info3")); assert_memory_equal(key.src, "info3", key.count); - str = AMpush(&stack, - AMtext(doc, AMmapItemObjId(doc_item), NULL), - AM_VALUE_STR, - cmocka_cb).str; + assert_true(AMitemToStr( + AMstackItem(stack_ptr, AMtext(doc, AMitemObjId(doc_item), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello world")); assert_memory_equal(str.src, "hello world", str.count); - doc_item = AMmapItemsNext(&doc_items, 1); - key = AMmapItemKey(doc_item); + doc_item = AMitemsNext(&doc_items, 1); + assert_int_equal(AMitemIdxType(doc_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(doc_item, &key)); assert_int_equal(key.count, strlen("list")); assert_memory_equal(key.src, "list", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(doc_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMlistItem const* list_item = AMlistItemsNext(&list_items, 1); + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(doc_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + AMitem const* list_item = AMitemsNext(&list_items, 1); { - AMmapItems map_items = AMpush( - &stack, - AMmapRange(doc, AMlistItemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* map_item = AMmapItemsNext(&map_items, 1); - AMbyteSpan const key = AMmapItemKey(map_item); + AMitems map_items = + AMstackItems(stack_ptr, AMmapRange(doc, AMitemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + AMitem const* map_item = AMitemsNext(&map_items, 1); + assert_int_equal(AMitemIdxType(map_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(map_item, &key)); assert_int_equal(key.count, strlen("zip")); assert_memory_equal(key.src, "zip", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(map_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(map_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE | AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); } } - list_item = AMlistItemsNext(&list_items, 1); + list_item = AMitemsNext(&list_items, 1); { - AMmapItems map_items = AMpush( - &stack, - AMmapRange(doc, AMlistItemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* map_item = AMmapItemsNext(&map_items, 1); - AMbyteSpan const key = AMmapItemKey(map_item); + AMitems map_items = + AMstackItems(stack_ptr, AMmapRange(doc, AMitemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE | AM_VAL_TYPE_STR)); + AMitem* map_item = AMitemsNext(&map_items, 1); + assert_int_equal(AMitemIdxType(map_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(map_item, &key)); assert_int_equal(key.count, strlen("foo")); assert_memory_equal(key.src, "foo", key.count); - AMbyteSpan const str = AMmapItemValue(map_item).str; + AMbyteSpan str; + assert_true(AMitemToStr(map_item, &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bar", str.count); } - list_item = AMlistItemsNext(&list_items, 1); + list_item = AMitemsNext(&list_items, 1); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMlistItemObjId(list_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - assert_int_equal(AMlistItemValue( - AMlistItemsNext(&list_items, 1)).int_, - 1); - assert_int_equal(AMlistItemValue( - AMlistItemsNext(&list_items, 1)).int_, - 2); - assert_int_equal(AMlistItemValue( - AMlistItemsNext(&list_items, 1)).int_, - 3); + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(list_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_INT)); + int64_t int_; + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 1); + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 2); + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 3); } } /* assert.deepEqual(doc.materialize(l2), { zip: ["a", "b"] }) */ - AMmapItems map_items = AMpush( - &stack, - AMmapRange(doc, l2, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* map_item = AMmapItemsNext(&map_items, 1); - key = AMmapItemKey(map_item); + AMitems map_items = AMstackItems(stack_ptr, AMmapRange(doc, l2, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + AMitem const* map_item = AMitemsNext(&map_items, 1); + assert_int_equal(AMitemIdxType(map_item), AM_IDX_TYPE_KEY); + assert_true(AMitemKey(map_item, &key)); assert_int_equal(key.count, strlen("zip")); assert_memory_equal(key.src, "zip", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(map_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(map_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); } - /* assert.deepEqual(doc.materialize(l1), [{ zip: ["a", "b"] }, { foo: "bar" }, [1, 2, 3]] */ - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, l1, 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMlistItem const* list_item = AMlistItemsNext(&list_items, 1); + /* assert.deepEqual(doc.materialize(l1), [{ zip: ["a", "b"] }, { foo: "bar" + * }, [1, 2, 3]] */ + AMitems list_items = + AMstackItems(stack_ptr, AMlistRange(doc, l1, 0, SIZE_MAX, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + AMitem const* list_item = AMitemsNext(&list_items, 1); { - AMmapItems map_items = AMpush( - &stack, - AMmapRange(doc, AMlistItemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* map_item = AMmapItemsNext(&map_items, 1); - AMbyteSpan const key = AMmapItemKey(map_item); + AMitems map_items = + AMstackItems(stack_ptr, AMmapRange(doc, AMitemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + AMitem const* map_item = AMitemsNext(&map_items, 1); + assert_int_equal(AMitemIdxType(map_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(map_item, &key)); assert_int_equal(key.count, strlen("zip")); assert_memory_equal(key.src, "zip", key.count); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMmapItemObjId(map_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - AMbyteSpan str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(map_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "a", str.count); - str = AMlistItemValue(AMlistItemsNext(&list_items, 1)).str; + assert_true(AMitemToStr(AMitemsNext(&list_items, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "b", str.count); } } - list_item = AMlistItemsNext(&list_items, 1); + list_item = AMitemsNext(&list_items, 1); { - AMmapItems map_items = AMpush( - &stack, - AMmapRange(doc, AMlistItemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItem const* map_item = AMmapItemsNext(&map_items, 1); - AMbyteSpan const key = AMmapItemKey(map_item); + AMitems map_items = + AMstackItems(stack_ptr, AMmapRange(doc, AMitemObjId(list_item), AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + AMitem* map_item = AMitemsNext(&map_items, 1); + assert_int_equal(AMitemIdxType(map_item), AM_IDX_TYPE_KEY); + AMbyteSpan key; + assert_true(AMitemKey(map_item, &key)); assert_int_equal(key.count, strlen("foo")); assert_memory_equal(key.src, "foo", key.count); - AMbyteSpan const str = AMmapItemValue(map_item).str; + AMbyteSpan str; + assert_true(AMitemToStr(map_item, &str)); assert_int_equal(str.count, 3); assert_memory_equal(str.src, "bar", str.count); } - list_item = AMlistItemsNext(&list_items, 1); + list_item = AMitemsNext(&list_items, 1); { - AMlistItems list_items = AMpush( - &stack, - AMlistRange(doc, AMlistItemObjId(list_item), 0, SIZE_MAX, NULL), - AM_VALUE_LIST_ITEMS, - cmocka_cb).list_items; - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).int_, - 1); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).int_, - 2); - assert_int_equal(AMlistItemValue(AMlistItemsNext(&list_items, 1)).int_, - 3); + AMitems list_items = AMstackItems(stack_ptr, AMlistRange(doc, AMitemObjId(list_item), 0, SIZE_MAX, NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_INT)); + int64_t int_; + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 1); + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 2); + assert_true(AMitemToInt(AMitemsNext(&list_items, 1), &int_)); + assert_int_equal(int_, 3); } /* assert.deepEqual(doc.materialize(l4), "hello world") */ - str = AMpush(&stack, AMtext(doc, l4, NULL), AM_VALUE_STR, cmocka_cb).str; + assert_true(AMitemToStr(AMstackItem(stack_ptr, AMtext(doc, l4, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hello world")); assert_memory_equal(str.src, "hello world", str.count); } @@ -1513,65 +1371,41 @@ static void test_recursive_sets_are_possible(void** state) { * \brief only returns an object id when objects are created */ static void test_only_returns_an_object_id_when_objects_are_created(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc = create("aaaa") */ - AMdoc* const doc = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc)); /* const r1 = doc.put("_root", "foo", "bar") assert.deepEqual(r1, null); */ - AMpush(&stack, - AMmapPutStr(doc, AM_ROOT, AMstr("foo"), AMstr("bar")), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMmapPutStr(doc, AM_ROOT, AMstr("foo"), AMstr("bar")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const r2 = doc.putObject("_root", "list", []) */ - AMobjId const* const r2 = AMpush( - &stack, - AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const r2 = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc, AM_ROOT, AMstr("list"), AM_OBJ_TYPE_LIST), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* const r3 = doc.put("_root", "counter", 10, "counter") assert.deepEqual(r3, null); */ - AMpush(&stack, - AMmapPutCounter(doc, AM_ROOT, AMstr("counter"), 10), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMmapPutCounter(doc, AM_ROOT, AMstr("counter"), 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const r4 = doc.increment("_root", "counter", 1) assert.deepEqual(r4, null); */ - AMpush(&stack, - AMmapIncrement(doc, AM_ROOT, AMstr("counter"), 1), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMmapIncrement(doc, AM_ROOT, AMstr("counter"), 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const r5 = doc.delete("_root", "counter") assert.deepEqual(r5, null); */ - AMpush(&stack, - AMmapDelete(doc, AM_ROOT, AMstr("counter")), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMmapDelete(doc, AM_ROOT, AMstr("counter")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const r6 = doc.insert(r2, 0, 10); assert.deepEqual(r6, null); */ - AMpush(&stack, - AMlistPutInt(doc, r2, 0, true, 10), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMlistPutInt(doc, r2, 0, true, 10), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const r7 = doc.insertObject(r2, 0, {}); */ - AMobjId const* const r7 = AMpush( - &stack, - AMlistPutObject(doc, r2, 0, true, AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const r7 = AMitemObjId(AMstackItem(stack_ptr, AMlistPutObject(doc, r2, 0, true, AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* const r8 = doc.splice(r2, 1, 0, ["a", "b", "c"]); */ - AMvalue const STRS[] = {{.str_tag = AM_VALUE_STR, .str = {.src = "a", .count = 1}}, - {.str_tag = AM_VALUE_STR, .str = {.src = "b", .count = 1}}, - {.str_tag = AM_VALUE_STR, .str = {.src = "c", .count = 1}}}; - AMpush(&stack, - AMsplice(doc, r2, 1, 0, STRS, sizeof(STRS)/sizeof(AMvalue)), - AM_VALUE_VOID, - cmocka_cb); + AMresult* data = AMstackResult( + stack_ptr, AMresultFrom(3, AMitemFromStr(AMstr("a")), AMitemFromStr(AMstr("b")), AMitemFromStr(AMstr("c"))), + NULL, NULL); + AMstackItem(NULL, AMsplice(doc, r2, 1, 0, AMresultItems(data)), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepEqual(r2, "2@aaaa"); */ assert_int_equal(AMobjIdCounter(r2), 2); AMbyteSpan str = AMactorIdStr(AMobjIdActorId(r2)); @@ -1587,75 +1421,58 @@ static void test_only_returns_an_object_id_when_objects_are_created(void** state * \brief objects without properties are preserved */ static void test_objects_without_properties_are_preserved(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const doc1 = create("aaaa") */ - AMdoc* const doc1 = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), &actor_id)); + AMdoc* doc1; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1)); /* const a = doc1.putObject("_root", "a", {}); */ - AMobjId const* const a = AMpush( - &stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("a"), AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const a = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc1, AM_ROOT, AMstr("a"), AM_OBJ_TYPE_MAP), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* const b = doc1.putObject("_root", "b", {}); */ - AMobjId const* const b = AMpush( - &stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("b"), AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const b = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc1, AM_ROOT, AMstr("b"), AM_OBJ_TYPE_MAP), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* const c = doc1.putObject("_root", "c", {}); */ - AMobjId const* const c = AMpush( - &stack, - AMmapPutObject(doc1, AM_ROOT, AMstr("c"), AM_OBJ_TYPE_MAP), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const c = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(doc1, AM_ROOT, AMstr("c"), AM_OBJ_TYPE_MAP), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* const d = doc1.put(c, "d", "dd"); */ - AMfree(AMmapPutStr(doc1, c, AMstr("d"), AMstr("dd"))); + AMstackItem(NULL, AMmapPutStr(doc1, c, AMstr("d"), AMstr("dd")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const saved = doc1.save(); */ - AMbyteSpan const saved = AMpush(&stack, - AMsave(doc1), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan saved; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(doc1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &saved)); /* const doc2 = load(saved); */ - AMdoc* const doc2 = AMpush(&stack, - AMload(saved.src, saved.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* doc2; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(saved.src, saved.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2)); /* assert.deepEqual(doc2.getWithType("_root", "a"), ["map", a]) */ - AMmapItems doc_items = AMpush(&stack, - AMmapRange(doc2, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - assert_true(AMobjIdEqual(AMmapItemObjId(AMmapItemsNext(&doc_items, 1)), a)); + AMitems doc_items = AMstackItems(stack_ptr, AMmapRange(doc2, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE)); + assert_true(AMobjIdEqual(AMitemObjId(AMitemsNext(&doc_items, 1)), a)); /* assert.deepEqual(doc2.keys(a), []) */ - AMstrs keys = AMpush(&stack, - AMkeys(doc1, a, NULL), - AM_VALUE_STRS, - cmocka_cb).strs; - assert_int_equal(AMstrsSize(&keys), 0); + AMitems keys = AMstackItems(stack_ptr, AMkeys(doc1, a, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&keys), 0); /* assert.deepEqual(doc2.getWithType("_root", "b"), ["map", b]) */ - assert_true(AMobjIdEqual(AMmapItemObjId(AMmapItemsNext(&doc_items, 1)), b)); + assert_true(AMobjIdEqual(AMitemObjId(AMitemsNext(&doc_items, 1)), b)); /* assert.deepEqual(doc2.keys(b), []) */ - keys = AMpush(&stack, AMkeys(doc1, b, NULL), AM_VALUE_STRS, cmocka_cb).strs; - assert_int_equal(AMstrsSize(&keys), 0); + keys = AMstackItems(stack_ptr, AMkeys(doc1, b, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_int_equal(AMitemsSize(&keys), 0); /* assert.deepEqual(doc2.getWithType("_root", "c"), ["map", c]) */ - assert_true(AMobjIdEqual(AMmapItemObjId(AMmapItemsNext(&doc_items, 1)), c)); + assert_true(AMobjIdEqual(AMitemObjId(AMitemsNext(&doc_items, 1)), c)); /* assert.deepEqual(doc2.keys(c), ["d"]) */ - keys = AMpush(&stack, AMkeys(doc1, c, NULL), AM_VALUE_STRS, cmocka_cb).strs; - AMbyteSpan str = AMstrsNext(&keys, 1); + keys = AMstackItems(stack_ptr, AMkeys(doc1, c, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMbyteSpan str; + assert_true(AMitemToStr(AMitemsNext(&keys, 1), &str)); assert_int_equal(str.count, 1); assert_memory_equal(str.src, "d", str.count); /* assert.deepEqual(doc2.getWithType(c, "d"), ["str", "dd"]) */ - AMobjItems obj_items = AMpush(&stack, - AMobjValues(doc1, c, NULL), - AM_VALUE_OBJ_ITEMS, - cmocka_cb).obj_items; - str = AMobjItemValue(AMobjItemsNext(&obj_items, 1)).str; + AMitems obj_items = AMstackItems(stack_ptr, AMobjItems(doc1, c, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemToStr(AMitemsNext(&obj_items, 1), &str)); assert_int_equal(str.count, 2); assert_memory_equal(str.src, "dd", str.count); } @@ -1664,177 +1481,162 @@ static void test_objects_without_properties_are_preserved(void** state) { * \brief should allow you to forkAt a heads */ static void test_should_allow_you_to_forkAt_a_heads(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const A = create("aaaaaa") */ - AMdoc* const A = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aaaaaa")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aaaaaa")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* A; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &A)); /* A.put("/", "key1", "val1"); */ - AMfree(AMmapPutStr(A, AM_ROOT, AMstr("key1"), AMstr("val1"))); + AMstackItem(NULL, AMmapPutStr(A, AM_ROOT, AMstr("key1"), AMstr("val1")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* A.put("/", "key2", "val2"); */ - AMfree(AMmapPutStr(A, AM_ROOT, AMstr("key2"), AMstr("val2"))); + AMstackItem(NULL, AMmapPutStr(A, AM_ROOT, AMstr("key2"), AMstr("val2")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const heads1 = A.getHeads(); */ - AMchangeHashes const heads1 = AMpush(&stack, - AMgetHeads(A), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads1 = AMstackItems(stack_ptr, AMgetHeads(A), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* const B = A.fork("bbbbbb") */ - AMdoc* const B = AMpush(&stack, AMfork(A, NULL), AM_VALUE_DOC, cmocka_cb).doc; - AMfree(AMsetActorId(B, AMpush(&stack, - AMactorIdInitStr(AMstr("bbbbbb")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMdoc* B; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMfork(A, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &B)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("bbbbbb")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(B, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* A.put("/", "key3", "val3"); */ - AMfree(AMmapPutStr(A, AM_ROOT, AMstr("key3"), AMstr("val3"))); + AMstackItem(NULL, AMmapPutStr(A, AM_ROOT, AMstr("key3"), AMstr("val3")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* B.put("/", "key4", "val4"); */ - AMfree(AMmapPutStr(B, AM_ROOT, AMstr("key4"), AMstr("val4"))); + AMstackItem(NULL, AMmapPutStr(B, AM_ROOT, AMstr("key4"), AMstr("val4")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* A.merge(B) */ - AMfree(AMmerge(A, B)); + AMstackItem(NULL, AMmerge(A, B), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* const heads2 = A.getHeads(); */ - AMchangeHashes const heads2 = AMpush(&stack, - AMgetHeads(A), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; + AMitems const heads2 = AMstackItems(stack_ptr, AMgetHeads(A), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* A.put("/", "key5", "val5"); */ - AMfree(AMmapPutStr(A, AM_ROOT, AMstr("key5"), AMstr("val5"))); - /* assert.deepEqual(A.forkAt(heads1).materialize("/"), A.materialize("/", heads1) */ - AMmapItems AforkAt1_items = AMpush( - &stack, - AMmapRange( - AMpush(&stack, AMfork(A, &heads1), AM_VALUE_DOC, cmocka_cb).doc, - AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItems A1_items = AMpush(&stack, - AMmapRange(A, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads1), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - assert_true(AMmapItemsEqual(&AforkAt1_items, &A1_items)); - /* assert.deepEqual(A.forkAt(heads2).materialize("/"), A.materialize("/", heads2) */ - AMmapItems AforkAt2_items = AMpush( - &stack, - AMmapRange( - AMpush(&stack, AMfork(A, &heads2), AM_VALUE_DOC, cmocka_cb).doc, - AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - AMmapItems A2_items = AMpush(&stack, - AMmapRange(A, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads2), - AM_VALUE_MAP_ITEMS, - cmocka_cb).map_items; - assert_true(AMmapItemsEqual(&AforkAt2_items, &A2_items)); + AMstackItem(NULL, AMmapPutStr(A, AM_ROOT, AMstr("key5"), AMstr("val5")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + /* assert.deepEqual(A.forkAt(heads1).materialize("/"), A.materialize("/", + * heads1) */ + AMdoc* A_forkAt1; + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMfork(A, &heads1), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &A_forkAt1)); + AMitems AforkAt1_items = AMstackItems(stack_ptr, AMmapRange(A_forkAt1, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMitems A1_items = AMstackItems(stack_ptr, AMmapRange(A, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads1), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemsEqual(&AforkAt1_items, &A1_items)); + /* assert.deepEqual(A.forkAt(heads2).materialize("/"), A.materialize("/", + * heads2) */ + AMdoc* A_forkAt2; + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMfork(A, &heads2), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &A_forkAt2)); + AMitems AforkAt2_items = AMstackItems(stack_ptr, AMmapRange(A_forkAt2, AM_ROOT, AMstr(NULL), AMstr(NULL), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)); + AMitems A2_items = AMstackItems(stack_ptr, AMmapRange(A, AM_ROOT, AMstr(NULL), AMstr(NULL), &heads2), cmocka_cb, + AMexpect(AM_VAL_TYPE_STR)); + assert_true(AMitemsEqual(&AforkAt2_items, &A2_items)); } /** * \brief should handle merging text conflicts then saving & loading */ static void test_should_handle_merging_text_conflicts_then_saving_and_loading(void** state) { - AMresultStack* stack = *state; + BaseState* base_state = *state; + AMstack** stack_ptr = &base_state->stack; /* const A = create("aabbcc") */ - AMdoc* const A = AMpush(&stack, - AMcreate(AMpush(&stack, - AMactorIdInitStr(AMstr("aabbcc")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("aabbcc")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* A; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &A)); /* const At = A.putObject('_root', 'text', "") */ - AMobjId const* const At = AMpush( - &stack, - AMmapPutObject(A, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const At = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(A, AM_ROOT, AMstr("text"), AM_OBJ_TYPE_TEXT), cmocka_cb, + AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* A.splice(At, 0, 0, 'hello') */ - AMfree(AMspliceText(A, At, 0, 0, AMstr("hello"))); + AMstackItem(NULL, AMspliceText(A, At, 0, 0, AMstr("hello")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* const B = A.fork() */ - AMdoc* const B = AMpush(&stack, AMfork(A, NULL), AM_VALUE_DOC, cmocka_cb).doc; + AMdoc* B; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMfork(A, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &B)); /* */ /* assert.deepEqual(B.getWithType("_root", "text"), ["text", At]) */ - AMbyteSpan str = AMpush(&stack, - AMtext(B, - AMpush(&stack, - AMmapGet(B, AM_ROOT, AMstr("text"), NULL), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id, - NULL), - AM_VALUE_STR, - cmocka_cb).str; - AMbyteSpan const str2 = AMpush(&stack, - AMtext(A, At, NULL), - AM_VALUE_STR, - cmocka_cb).str; + AMbyteSpan str; + assert_true( + AMitemToStr(AMstackItem(stack_ptr, + AMtext(B, + AMitemObjId(AMstackItem(stack_ptr, AMmapGet(B, AM_ROOT, AMstr("text"), NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))), + NULL), + cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), + &str)); + AMbyteSpan str2; + assert_true(AMitemToStr(AMstackItem(stack_ptr, AMtext(A, At, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str2)); assert_int_equal(str.count, str2.count); assert_memory_equal(str.src, str2.src, str.count); /* */ /* B.splice(At, 4, 1) */ - AMfree(AMspliceText(B, At, 4, 1, AMstr(NULL))); + AMstackItem(NULL, AMspliceText(B, At, 4, 1, AMstr(NULL)), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* B.splice(At, 4, 0, '!') */ - AMfree(AMspliceText(B, At, 4, 0, AMstr("!"))); + AMstackItem(NULL, AMspliceText(B, At, 4, 0, AMstr("!")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* B.splice(At, 5, 0, ' ') */ - AMfree(AMspliceText(B, At, 5, 0, AMstr(" "))); + AMstackItem(NULL, AMspliceText(B, At, 5, 0, AMstr(" ")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* B.splice(At, 6, 0, 'world') */ - AMfree(AMspliceText(B, At, 6, 0, AMstr("world"))); + AMstackItem(NULL, AMspliceText(B, At, 6, 0, AMstr("world")), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* A.merge(B) */ - AMfree(AMmerge(A, B)); + AMstackItem(NULL, AMmerge(A, B), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* const binary = A.save() */ - AMbyteSpan const binary = AMpush(&stack, - AMsave(A), - AM_VALUE_BYTES, - cmocka_cb).bytes; + AMbyteSpan binary; + assert_true(AMitemToBytes(AMstackItem(stack_ptr, AMsave(A), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &binary)); /* */ /* const C = load(binary) */ - AMdoc* const C = AMpush(&stack, - AMload(binary.src, binary.count), - AM_VALUE_DOC, - cmocka_cb).doc; + AMdoc* C; + assert_true(AMitemToDoc( + AMstackItem(stack_ptr, AMload(binary.src, binary.count), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &C)); /* */ /* assert.deepEqual(C.getWithType('_root', 'text'), ['text', '1@aabbcc'] */ - AMobjId const* const C_text = AMpush(&stack, - AMmapGet(C, AM_ROOT, AMstr("text"), NULL), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const C_text = AMitemObjId( + AMstackItem(stack_ptr, AMmapGet(C, AM_ROOT, AMstr("text"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); assert_int_equal(AMobjIdCounter(C_text), 1); str = AMactorIdStr(AMobjIdActorId(C_text)); assert_int_equal(str.count, strlen("aabbcc")); assert_memory_equal(str.src, "aabbcc", str.count); /* assert.deepEqual(C.text(At), 'hell! world') */ - str = AMpush(&stack, AMtext(C, At, NULL), AM_VALUE_STR, cmocka_cb).str; + assert_true(AMitemToStr(AMstackItem(stack_ptr, AMtext(C, At, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_STR)), &str)); assert_int_equal(str.count, strlen("hell! world")); assert_memory_equal(str.src, "hell! world", str.count); } int run_ported_wasm_basic_tests(void) { const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_create_clone_and_free, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_start_and_commit, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_getting_a_nonexistent_prop_does_not_throw_an_error, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_set_and_get_a_simple_value, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_use_bytes, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_make_subobjects, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_make_lists, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_lists_have_insert_set_splice_and_push_ops, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_delete_non_existent_props, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_del, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_use_counters, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_save_all_or_incrementally, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text_2, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_local_inc_increments_all_visible_counters_in_a_map, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_local_inc_increments_all_visible_counters_in_a_sequence, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_be_able_to_fetch_changes_by_hash, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_recursive_sets_are_possible, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_only_returns_an_object_id_when_objects_are_created, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_objects_without_properties_are_preserved, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_allow_you_to_forkAt_a_heads, setup_stack, teardown_stack), - cmocka_unit_test_setup_teardown(test_should_handle_merging_text_conflicts_then_saving_and_loading, setup_stack, teardown_stack) - }; + cmocka_unit_test_setup_teardown(test_create_clone_and_free, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_start_and_commit, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_getting_a_nonexistent_prop_does_not_throw_an_error, setup_base, + teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_set_and_get_a_simple_value, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_use_bytes, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_make_subobjects, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_make_lists, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_lists_have_insert_set_splice_and_push_ops, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_delete_non_existent_props, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_del, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_use_counters, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_save_all_or_incrementally, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_splice_text_2, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_local_inc_increments_all_visible_counters_in_a_map, setup_base, + teardown_base), + cmocka_unit_test_setup_teardown(test_local_inc_increments_all_visible_counters_in_a_sequence, setup_base, + teardown_base), + cmocka_unit_test_setup_teardown(test_should_be_able_to_fetch_changes_by_hash, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_recursive_sets_are_possible, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_only_returns_an_object_id_when_objects_are_created, setup_base, + teardown_base), + cmocka_unit_test_setup_teardown(test_objects_without_properties_are_preserved, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_allow_you_to_forkAt_a_heads, setup_base, teardown_base), + cmocka_unit_test_setup_teardown(test_should_handle_merging_text_conflicts_then_saving_and_loading, setup_base, + teardown_base)}; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/rust/automerge-c/test/ported_wasm/suite.c b/rust/automerge-c/test/ported_wasm/suite.c index fc10fadc..440ed899 100644 --- a/rust/automerge-c/test/ported_wasm/suite.c +++ b/rust/automerge-c/test/ported_wasm/suite.c @@ -1,6 +1,6 @@ +#include #include #include -#include #include /* third-party */ @@ -11,8 +11,5 @@ extern int run_ported_wasm_basic_tests(void); extern int run_ported_wasm_sync_tests(void); int run_ported_wasm_suite(void) { - return ( - run_ported_wasm_basic_tests() + - run_ported_wasm_sync_tests() - ); + return (run_ported_wasm_basic_tests() + run_ported_wasm_sync_tests()); } diff --git a/rust/automerge-c/test/ported_wasm/sync_tests.c b/rust/automerge-c/test/ported_wasm/sync_tests.c index a1ddbf3c..099f8dbf 100644 --- a/rust/automerge-c/test/ported_wasm/sync_tests.c +++ b/rust/automerge-c/test/ported_wasm/sync_tests.c @@ -9,10 +9,12 @@ /* local */ #include -#include "../stack_utils.h" +#include +#include "../base_state.h" +#include "../cmocka_utils.h" typedef struct { - AMresultStack* stack; + BaseState* base_state; AMdoc* n1; AMdoc* n2; AMsyncState* s1; @@ -21,43 +23,35 @@ typedef struct { static int setup(void** state) { TestState* test_state = test_calloc(1, sizeof(TestState)); - test_state->n1 = AMpush(&test_state->stack, - AMcreate(AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("01234567")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; - test_state->n2 = AMpush(&test_state->stack, - AMcreate(AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("89abcdef")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; - test_state->s1 = AMpush(&test_state->stack, - AMsyncStateInit(), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; - test_state->s2 = AMpush(&test_state->stack, - AMsyncStateInit(), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + setup_base((void**)&test_state->base_state); + AMstack** stack_ptr = &test_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("01234567")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &test_state->n1)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("89abcdef")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + assert_true( + AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &test_state->n2)); + assert_true(AMitemToSyncState( + AMstackItem(stack_ptr, AMsyncStateInit(), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_STATE)), &test_state->s1)); + assert_true(AMitemToSyncState( + AMstackItem(stack_ptr, AMsyncStateInit(), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_STATE)), &test_state->s2)); *state = test_state; return 0; } static int teardown(void** state) { TestState* test_state = *state; - AMfreeStack(&test_state->stack); + teardown_base((void**)&test_state->base_state); test_free(test_state); return 0; } -static void sync(AMdoc* a, - AMdoc* b, - AMsyncState* a_sync_state, - AMsyncState* b_sync_state) { +static void sync(AMdoc* a, AMdoc* b, AMsyncState* a_sync_state, AMsyncState* b_sync_state) { static size_t const MAX_ITER = 10; AMsyncMessage const* a2b_msg = NULL; @@ -66,29 +60,35 @@ static void sync(AMdoc* a, do { AMresult* a2b_msg_result = AMgenerateSyncMessage(a, a_sync_state); AMresult* b2a_msg_result = AMgenerateSyncMessage(b, b_sync_state); - AMvalue value = AMresultValue(a2b_msg_result); - switch (value.tag) { - case AM_VALUE_SYNC_MESSAGE: { - a2b_msg = value.sync_message; - AMfree(AMreceiveSyncMessage(b, b_sync_state, a2b_msg)); - } - break; - case AM_VALUE_VOID: a2b_msg = NULL; break; + AMitem* item = AMresultItem(a2b_msg_result); + switch (AMitemValType(item)) { + case AM_VAL_TYPE_SYNC_MESSAGE: { + AMitemToSyncMessage(item, &a2b_msg); + AMstackResult(NULL, AMreceiveSyncMessage(b, b_sync_state, a2b_msg), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + } break; + case AM_VAL_TYPE_VOID: + a2b_msg = NULL; + break; } - value = AMresultValue(b2a_msg_result); - switch (value.tag) { - case AM_VALUE_SYNC_MESSAGE: { - b2a_msg = value.sync_message; - AMfree(AMreceiveSyncMessage(a, a_sync_state, b2a_msg)); - } - break; - case AM_VALUE_VOID: b2a_msg = NULL; break; + item = AMresultItem(b2a_msg_result); + switch (AMitemValType(item)) { + case AM_VAL_TYPE_SYNC_MESSAGE: { + AMitemToSyncMessage(item, &b2a_msg); + AMstackResult(NULL, AMreceiveSyncMessage(a, a_sync_state, b2a_msg), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + } break; + case AM_VAL_TYPE_VOID: + b2a_msg = NULL; + break; } if (++iter > MAX_ITER) { - fail_msg("Did not synchronize within %d iterations. " - "Do you have a bug causing an infinite loop?", MAX_ITER); + fail_msg( + "Did not synchronize within %d iterations. " + "Do you have a bug causing an infinite loop?", + MAX_ITER); } - } while(a2b_msg || b2a_msg); + } while (a2b_msg || b2a_msg); } static time_t const TIME_0 = 0; @@ -96,151 +96,135 @@ static time_t const TIME_0 = 0; /** * \brief should send a sync message implying no local data */ -static void test_should_send_a_sync_message_implying_no_local_data(void **state) { +static void test_should_send_a_sync_message_implying_no_local_data(void** state) { /* const doc = create() const s1 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* const m1 = doc.generateSyncMessage(s1) if (m1 === null) { throw new RangeError("message should not be null") } const message: DecodedSyncMessage = decodeSyncMessage(m1) */ - AMsyncMessage const* const m1 = AMpush(&test_state->stack, - AMgenerateSyncMessage( - test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + AMsyncMessage const* m1; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &m1)); /* assert.deepStrictEqual(message.heads, []) */ - AMchangeHashes heads = AMsyncMessageHeads(m1); - assert_int_equal(AMchangeHashesSize(&heads), 0); + AMitems heads = AMstackItems(stack_ptr, AMsyncMessageHeads(m1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&heads), 0); /* assert.deepStrictEqual(message.need, []) */ - AMchangeHashes needs = AMsyncMessageNeeds(m1); - assert_int_equal(AMchangeHashesSize(&needs), 0); + AMitems needs = AMstackItems(stack_ptr, AMsyncMessageNeeds(m1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&needs), 0); /* assert.deepStrictEqual(message.have.length, 1) */ - AMsyncHaves haves = AMsyncMessageHaves(m1); - assert_int_equal(AMsyncHavesSize(&haves), 1); + AMitems haves = AMstackItems(stack_ptr, AMsyncMessageHaves(m1), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_HAVE)); + assert_int_equal(AMitemsSize(&haves), 1); /* assert.deepStrictEqual(message.have[0].lastSync, []) */ - AMsyncHave const* have0 = AMsyncHavesNext(&haves, 1); - AMchangeHashes last_sync = AMsyncHaveLastSync(have0); - assert_int_equal(AMchangeHashesSize(&last_sync), 0); + AMsyncHave const* have0; + assert_true(AMitemToSyncHave(AMitemsNext(&haves, 1), &have0)); + AMitems last_sync = + AMstackItems(stack_ptr, AMsyncHaveLastSync(have0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&last_sync), 0); /* assert.deepStrictEqual(message.have[0].bloom.byteLength, 0) assert.deepStrictEqual(message.changes, []) */ - AMchanges changes = AMsyncMessageChanges(m1); - assert_int_equal(AMchangesSize(&changes), 0); + AMitems changes = AMstackItems(stack_ptr, AMsyncMessageChanges(m1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&changes), 0); } /** * \brief should not reply if we have no data as well */ -static void test_should_not_reply_if_we_have_no_data_as_well(void **state) { +static void test_should_not_reply_if_we_have_no_data_as_well(void** state) { /* const n1 = create(), n2 = create() const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* const m1 = n1.generateSyncMessage(s1) if (m1 === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* const m1 = AMpush(&test_state->stack, - AMgenerateSyncMessage( - test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + AMsyncMessage const* m1; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &m1)); /* n2.receiveSyncMessage(s2, m1) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, m1)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, m1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const m2 = n2.generateSyncMessage(s2) assert.deepStrictEqual(m2, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMgenerateSyncMessage(test_state->n2, test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } /** * \brief repos with equal heads do not need a reply message */ -static void test_repos_with_equal_heads_do_not_need_a_reply_message(void **state) { +static void test_repos_with_equal_heads_do_not_need_a_reply_message(void** state) { /* const n1 = create(), n2 = create() const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* make two nodes with the same changes */ /* const list = n1.putObject("_root", "n", []) */ - AMobjId const* const list = AMpush(&test_state->stack, - AMmapPutObject(test_state->n1, - AM_ROOT, - AMstr("n"), - AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(test_state->n1, AM_ROOT, AMstr("n"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* for (let i = 0; i < 10; i++) { */ for (size_t i = 0; i != 10; ++i) { /* n1.insert(list, i, i) */ - AMfree(AMlistPutUint(test_state->n1, AM_ROOT, i, true, i)); + AMstackItem(NULL, AMlistPutUint(test_state->n1, list, i, true, i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* n2.applyChanges(n1.getChanges([])) */ - AMchanges const changes = AMpush(&test_state->stack, - AMgetChanges(test_state->n1, NULL), - AM_VALUE_CHANGES, - cmocka_cb).changes; - AMfree(AMapplyChanges(test_state->n2, &changes)); + AMitems const items = + AMstackItems(stack_ptr, AMgetChanges(test_state->n1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + AMstackItem(NULL, AMapplyChanges(test_state->n2, &items), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); /* */ /* generate a naive sync message */ /* const m1 = n1.generateSyncMessage(s1) if (m1 === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* m1 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + AMsyncMessage const* m1; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &m1)); /* assert.deepStrictEqual(s1.lastSentHeads, n1.getHeads()) */ - AMchangeHashes const last_sent_heads = AMsyncStateLastSentHeads( - test_state->s1 - ); - AMchangeHashes const heads = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&last_sent_heads, &heads), 0); + AMitems const last_sent_heads = + AMstackItems(stack_ptr, AMsyncStateLastSentHeads(test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems const heads = + AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&last_sent_heads, &heads)); /* */ /* heads are equal so this message should be null */ /* n2.receiveSyncMessage(s2, m1) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, m1)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, m1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* const m2 = n2.generateSyncMessage(s2) assert.strictEqual(m2, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); } /** * \brief n1 should offer all changes to n2 when starting from nothing */ -static void test_n1_should_offer_all_changes_to_n2_when_starting_from_nothing(void **state) { +static void test_n1_should_offer_all_changes_to_n2_when_starting_from_nothing(void** state) { /* const n1 = create(), n2 = create() */ TestState* test_state = *state; - + AMstack** stack_ptr = &test_state->base_state->stack; /* make changes for n1 that n2 should request */ /* const list = n1.putObject("_root", "n", []) */ - AMobjId const* const list = AMpush( - &test_state->stack, - AMmapPutObject(test_state->n1, AM_ROOT, AMstr("n"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(test_state->n1, AM_ROOT, AMstr("n"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* for (let i = 0; i < 10; i++) { */ for (size_t i = 0; i != 10; ++i) { /* n1.insert(list, i, i) */ - AMfree(AMlistPutUint(test_state->n1, AM_ROOT, i, true, i)); + AMstackItem(NULL, AMlistPutUint(test_state->n1, list, i, true, i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ @@ -254,26 +238,24 @@ static void test_n1_should_offer_all_changes_to_n2_when_starting_from_nothing(vo /** * \brief should sync peers where one has commits the other does not */ -static void test_should_sync_peers_where_one_has_commits_the_other_does_not(void **state) { +static void test_should_sync_peers_where_one_has_commits_the_other_does_not(void** state) { /* const n1 = create(), n2 = create() */ TestState* test_state = *state; - + AMstack** stack_ptr = &test_state->base_state->stack; /* make changes for n1 that n2 should request */ /* const list = n1.putObject("_root", "n", []) */ - AMobjId const* const list = AMpush( - &test_state->stack, - AMmapPutObject(test_state->n1, AM_ROOT, AMstr("n"), AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const list = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(test_state->n1, AM_ROOT, AMstr("n"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* for (let i = 0; i < 10; i++) { */ for (size_t i = 0; i != 10; ++i) { /* n1.insert(list, i, i) */ - AMfree(AMlistPutUint(test_state->n1, AM_ROOT, i, true, i)); + AMstackItem(NULL, AMlistPutUint(test_state->n1, list, i, true, i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ @@ -287,19 +269,20 @@ static void test_should_sync_peers_where_one_has_commits_the_other_does_not(void /** * \brief should work with prior sync state */ -static void test_should_work_with_prior_sync_state(void **state) { +static void test_should_work_with_prior_sync_state(void** state) { /* create & synchronize two nodes */ /* const n1 = create(), n2 = create() const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); @@ -308,10 +291,10 @@ static void test_should_work_with_prior_sync_state(void **state) { /* for (let i = 5; i < 10; i++) { */ for (size_t i = 5; i != 10; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ @@ -325,326 +308,333 @@ static void test_should_work_with_prior_sync_state(void **state) { /** * \brief should not generate messages once synced */ -static void test_should_not_generate_messages_once_synced(void **state) { +static void test_should_not_generate_messages_once_synced(void** state) { /* create & synchronize two nodes */ /* const n1 = create('abc123'), n2 = create('def456') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; - AMfree(AMsetActorId(test_state->n1, AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("abc123")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); - AMfree(AMsetActorId(test_state->n2, AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("def456")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMstack** stack_ptr = &test_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("abc123")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->n1, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("def456")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->n2, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* let message, patch for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n2.put("_root", "y", i) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("y"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("y"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n2.commit("", 0) */ - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* n1 reports what it has */ /* message = n1.generateSyncMessage(s1) - if (message === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (message === null) { throw new RangeError("message should not be + null") */ + AMsyncMessage const* message; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); /* */ /* n2 receives that message and sends changes along with what it has */ /* n2.receiveSyncMessage(s2, message) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, message)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, message), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* message = n2.generateSyncMessage(s2) - if (message === null) { throw new RangeError("message should not be null") */ - message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; - AMchanges message_changes = AMsyncMessageChanges(message); - assert_int_equal(AMchangesSize(&message_changes), 5); + if (message === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); + AMitems message_changes = + AMstackItems(stack_ptr, AMsyncMessageChanges(message), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&message_changes), 5); /* */ /* n1 receives the changes and replies with the changes it now knows that * n2 needs */ /* n1.receiveSyncMessage(s1, message) */ - AMfree(AMreceiveSyncMessage(test_state->n1, test_state->s1, message)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n1, test_state->s1, message), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* message = n2.generateSyncMessage(s2) - if (message === null) { throw new RangeError("message should not be null") */ - message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; - message_changes = AMsyncMessageChanges(message); - assert_int_equal(AMchangesSize(&message_changes), 5); + if (message === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); + message_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(message), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&message_changes), 5); /* */ /* n2 applies the changes and sends confirmation ending the exchange */ /* n2.receiveSyncMessage(s2, message) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, message)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, message), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* message = n2.generateSyncMessage(s2) - if (message === null) { throw new RangeError("message should not be null") */ - message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (message === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); /* */ /* n1 receives the message and has nothing more to say */ /* n1.receiveSyncMessage(s1, message) */ - AMfree(AMreceiveSyncMessage(test_state->n1, test_state->s1, message)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n1, test_state->s1, message), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* message = n1.generateSyncMessage(s1) assert.deepStrictEqual(message, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMgenerateSyncMessage(test_state->n1, test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* //assert.deepStrictEqual(patch, null) // no changes arrived */ /* */ /* n2 also has nothing left to say */ /* message = n2.generateSyncMessage(s2) assert.deepStrictEqual(message, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMgenerateSyncMessage(test_state->n2, test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); } /** * \brief should allow simultaneous messages during synchronization */ -static void test_should_allow_simultaneous_messages_during_synchronization(void **state) { +static void test_should_allow_simultaneous_messages_during_synchronization(void** state) { /* create & synchronize two nodes */ /* const n1 = create('abc123'), n2 = create('def456') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; - AMfree(AMsetActorId(test_state->n1, AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("abc123")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); - AMfree(AMsetActorId(test_state->n2, AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("def456")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id)); + AMstack** stack_ptr = &test_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("abc123")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->n1, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("def456")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMstackItem(NULL, AMsetActorId(test_state->n2, actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n2.put("_root", "y", i) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("y"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("y"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n2.commit("", 0) */ - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* const head1 = n1.getHeads()[0], head2 = n2.getHeads()[0] */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMbyteSpan const head1 = AMchangeHashesNext(&heads1, 1); - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMbyteSpan const head2 = AMchangeHashesNext(&heads2, 1); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMbyteSpan head1; + assert_true(AMitemToChangeHash(AMitemsNext(&heads1, 1), &head1)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMbyteSpan head2; + assert_true(AMitemToChangeHash(AMitemsNext(&heads2, 1), &head2)); /* */ /* both sides report what they have but have no shared peer state */ /* let msg1to2, msg2to1 msg1to2 = n1.generateSyncMessage(s1) - if (msg1to2 === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* msg1to2 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg1to2 === null) { throw new RangeError("message should not be + null") */ + AMsyncMessage const* msg1to2; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg1to2)); /* msg2to1 = n2.generateSyncMessage(s2) - if (msg2to1 === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* msg2to1 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, - test_state->s2), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg2to1 === null) { throw new RangeError("message should not be + null") */ + AMsyncMessage const* msg2to1; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg2to1)); /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).changes.length, 0) */ - AMchanges msg1to2_changes = AMsyncMessageChanges(msg1to2); - assert_int_equal(AMchangesSize(&msg1to2_changes), 0); - /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync.length, 0 */ - AMsyncHaves msg1to2_haves = AMsyncMessageHaves(msg1to2); - AMsyncHave const* msg1to2_have = AMsyncHavesNext(&msg1to2_haves, 1); - AMchangeHashes msg1to2_last_sync = AMsyncHaveLastSync(msg1to2_have); - assert_int_equal(AMchangeHashesSize(&msg1to2_last_sync), 0); + AMitems msg1to2_changes = + AMstackItems(stack_ptr, AMsyncMessageChanges(msg1to2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg1to2_changes), 0); + /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync.length, + * 0 */ + AMitems msg1to2_haves = + AMstackItems(stack_ptr, AMsyncMessageHaves(msg1to2), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_HAVE)); + AMsyncHave const* msg1to2_have; + assert_true(AMitemToSyncHave(AMitemsNext(&msg1to2_haves, 1), &msg1to2_have)); + AMitems msg1to2_last_sync = + AMstackItems(stack_ptr, AMsyncHaveLastSync(msg1to2_have), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&msg1to2_last_sync), 0); /* assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 0) */ - AMchanges msg2to1_changes = AMsyncMessageChanges(msg2to1); - assert_int_equal(AMchangesSize(&msg2to1_changes), 0); - /* assert.deepStrictEqual(decodeSyncMessage(msg2to1).have[0].lastSync.length, 0 */ - AMsyncHaves msg2to1_haves = AMsyncMessageHaves(msg2to1); - AMsyncHave const* msg2to1_have = AMsyncHavesNext(&msg2to1_haves, 1); - AMchangeHashes msg2to1_last_sync = AMsyncHaveLastSync(msg2to1_have); - assert_int_equal(AMchangeHashesSize(&msg2to1_last_sync), 0); + AMitems msg2to1_changes = + AMstackItems(stack_ptr, AMsyncMessageChanges(msg2to1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg2to1_changes), 0); + /* assert.deepStrictEqual(decodeSyncMessage(msg2to1).have[0].lastSync.length, + * 0 */ + AMitems msg2to1_haves = + AMstackItems(stack_ptr, AMsyncMessageHaves(msg2to1), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_HAVE)); + AMsyncHave const* msg2to1_have; + assert_true(AMitemToSyncHave(AMitemsNext(&msg2to1_haves, 1), &msg2to1_have)); + AMitems msg2to1_last_sync = + AMstackItems(stack_ptr, AMsyncHaveLastSync(msg2to1_have), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&msg2to1_last_sync), 0); /* */ /* n1 and n2 receive that message and update sync state but make no patc */ /* n1.receiveSyncMessage(s1, msg2to1) */ - AMfree(AMreceiveSyncMessage(test_state->n1, test_state->s1, msg2to1)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n1, test_state->s1, msg2to1), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* n2.receiveSyncMessage(s2, msg1to2) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* */ /* now both reply with their local changes that the other lacks * (standard warning that 1% of the time this will result in a "needs" * message) */ /* msg1to2 = n1.generateSyncMessage(s1) - if (msg1to2 === null) { throw new RangeError("message should not be null") */ - msg1to2 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg1to2 === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg1to2)); /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).changes.length, 5) */ - msg1to2_changes = AMsyncMessageChanges(msg1to2); - assert_int_equal(AMchangesSize(&msg1to2_changes), 5); + msg1to2_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(msg1to2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg1to2_changes), 5); /* msg2to1 = n2.generateSyncMessage(s2) - if (msg2to1 === null) { throw new RangeError("message should not be null") */ - msg2to1 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg2to1 === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg2to1)); /* assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 5) */ - msg2to1_changes = AMsyncMessageChanges(msg2to1); - assert_int_equal(AMchangesSize(&msg2to1_changes), 5); + msg2to1_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(msg2to1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg2to1_changes), 5); /* */ /* both should now apply the changes and update the frontend */ /* n1.receiveSyncMessage(s1, msg2to1) */ - AMfree(AMreceiveSyncMessage(test_state->n1, - test_state->s1, - msg2to1)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n1, test_state->s1, msg2to1), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepStrictEqual(n1.getMissingDeps(), []) */ - AMchangeHashes missing_deps = AMpush(&test_state->stack, - AMgetMissingDeps(test_state->n1, NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesSize(&missing_deps), 0); + AMitems missing_deps = + AMstackItems(stack_ptr, AMgetMissingDeps(test_state->n1, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&missing_deps), 0); /* //assert.notDeepStrictEqual(patch1, null) assert.deepStrictEqual(n1.materialize(), { x: 4, y: 4 }) */ - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(test_state->n1, AM_ROOT, AMstr("x"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 4); - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(test_state->n1, AM_ROOT, AMstr("y"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 4); + uint64_t uint; + assert_true(AMitemToUint(AMstackItem(stack_ptr, AMmapGet(test_state->n1, AM_ROOT, AMstr("x"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 4); + assert_true(AMitemToUint(AMstackItem(stack_ptr, AMmapGet(test_state->n1, AM_ROOT, AMstr("y"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 4); /* */ /* n2.receiveSyncMessage(s2, msg1to2) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepStrictEqual(n2.getMissingDeps(), []) */ - missing_deps = AMpush(&test_state->stack, - AMgetMissingDeps(test_state->n2, NULL), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesSize(&missing_deps), 0); + missing_deps = + AMstackItems(stack_ptr, AMgetMissingDeps(test_state->n2, NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_int_equal(AMitemsSize(&missing_deps), 0); /* //assert.notDeepStrictEqual(patch2, null) assert.deepStrictEqual(n2.materialize(), { x: 4, y: 4 }) */ - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(test_state->n2, AM_ROOT, AMstr("x"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 4); - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(test_state->n2, AM_ROOT, AMstr("y"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 4); + assert_true(AMitemToUint(AMstackItem(stack_ptr, AMmapGet(test_state->n2, AM_ROOT, AMstr("x"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 4); + assert_true(AMitemToUint(AMstackItem(stack_ptr, AMmapGet(test_state->n2, AM_ROOT, AMstr("y"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 4); /* */ /* The response acknowledges the changes received and sends no further * changes */ /* msg1to2 = n1.generateSyncMessage(s1) - if (msg1to2 === null) { throw new RangeError("message should not be null") */ - msg1to2 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg1to2 === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg1to2)); /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).changes.length, 0) */ - msg1to2_changes = AMsyncMessageChanges(msg1to2); - assert_int_equal(AMchangesSize(&msg1to2_changes), 0); + msg1to2_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(msg1to2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg1to2_changes), 0); /* msg2to1 = n2.generateSyncMessage(s2) - if (msg2to1 === null) { throw new RangeError("message should not be null") */ - msg2to1 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (msg2to1 === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n2, test_state->s2), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg2to1)); /* assert.deepStrictEqual(decodeSyncMessage(msg2to1).changes.length, 0) */ - msg2to1_changes = AMsyncMessageChanges(msg2to1); - assert_int_equal(AMchangesSize(&msg2to1_changes), 0); + msg2to1_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(msg2to1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&msg2to1_changes), 0); /* */ /* After receiving acknowledgements, their shared heads should be equal */ /* n1.receiveSyncMessage(s1, msg2to1) */ - AMfree(AMreceiveSyncMessage(test_state->n1, test_state->s1, msg2to1)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n1, test_state->s1, msg2to1), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* n2.receiveSyncMessage(s2, msg1to2) */ - AMfree(AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2)); + AMstackItem(NULL, AMreceiveSyncMessage(test_state->n2, test_state->s2, msg1to2), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* assert.deepStrictEqual(s1.sharedHeads, [head1, head2].sort()) */ - AMchangeHashes s1_shared_heads = AMsyncStateSharedHeads(test_state->s1); - assert_memory_equal(AMchangeHashesNext(&s1_shared_heads, 1).src, - head1.src, - head1.count); - assert_memory_equal(AMchangeHashesNext(&s1_shared_heads, 1).src, - head2.src, - head2.count); + AMitems s1_shared_heads = + AMstackItems(stack_ptr, AMsyncStateSharedHeads(test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMbyteSpan s1_shared_change_hash; + assert_true(AMitemToChangeHash(AMitemsNext(&s1_shared_heads, 1), &s1_shared_change_hash)); + assert_memory_equal(s1_shared_change_hash.src, head1.src, head1.count); + assert_true(AMitemToChangeHash(AMitemsNext(&s1_shared_heads, 1), &s1_shared_change_hash)); + assert_memory_equal(s1_shared_change_hash.src, head2.src, head2.count); /* assert.deepStrictEqual(s2.sharedHeads, [head1, head2].sort()) */ - AMchangeHashes s2_shared_heads = AMsyncStateSharedHeads(test_state->s2); - assert_memory_equal(AMchangeHashesNext(&s2_shared_heads, 1).src, - head1.src, - head1.count); - assert_memory_equal(AMchangeHashesNext(&s2_shared_heads, 1).src, - head2.src, - head2.count); + AMitems s2_shared_heads = + AMstackItems(stack_ptr, AMsyncStateSharedHeads(test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMbyteSpan s2_shared_change_hash; + assert_true(AMitemToChangeHash(AMitemsNext(&s2_shared_heads, 1), &s2_shared_change_hash)); + assert_memory_equal(s2_shared_change_hash.src, head1.src, head1.count); + assert_true(AMitemToChangeHash(AMitemsNext(&s2_shared_heads, 1), &s2_shared_change_hash)); + assert_memory_equal(s2_shared_change_hash.src, head2.src, head2.count); /* //assert.deepStrictEqual(patch1, null) //assert.deepStrictEqual(patch2, null) */ /* */ /* We're in sync, no more messages required */ /* msg1to2 = n1.generateSyncMessage(s1) assert.deepStrictEqual(msg1to2, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMgenerateSyncMessage(test_state->n1, test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* msg2to1 = n2.generateSyncMessage(s2) assert.deepStrictEqual(msg2to1, null) */ - AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n2, test_state->s2), - AM_VALUE_VOID, - cmocka_cb); + AMstackItem(NULL, AMgenerateSyncMessage(test_state->n2, test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* If we make one more change and start another sync then its lastSync * should be updated */ /* n1.put("_root", "x", 5) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 5)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* msg1to2 = n1.generateSyncMessage(s1) - if (msg1to2 === null) { throw new RangeError("message should not be null") */ - msg1to2 = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; - /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync, [head1, head2].sort( */ - msg1to2_haves = AMsyncMessageHaves(msg1to2); - msg1to2_have = AMsyncHavesNext(&msg1to2_haves, 1); - msg1to2_last_sync = AMsyncHaveLastSync(msg1to2_have); - AMbyteSpan msg1to2_last_sync_next = AMchangeHashesNext(&msg1to2_last_sync, 1); + if (msg1to2 === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &msg1to2)); + /* assert.deepStrictEqual(decodeSyncMessage(msg1to2).have[0].lastSync, + * [head1, head2].sort( */ + msg1to2_haves = AMstackItems(stack_ptr, AMsyncMessageHaves(msg1to2), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_HAVE)); + assert_true(AMitemToSyncHave(AMitemsNext(&msg1to2_haves, 1), &msg1to2_have)); + msg1to2_last_sync = + AMstackItems(stack_ptr, AMsyncHaveLastSync(msg1to2_have), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMbyteSpan msg1to2_last_sync_next; + assert_true(AMitemToChangeHash(AMitemsNext(&msg1to2_last_sync, 1), &msg1to2_last_sync_next)); assert_int_equal(msg1to2_last_sync_next.count, head1.count); assert_memory_equal(msg1to2_last_sync_next.src, head1.src, head1.count); - msg1to2_last_sync_next = AMchangeHashesNext(&msg1to2_last_sync, 1); + assert_true(AMitemToChangeHash(AMitemsNext(&msg1to2_last_sync, 1), &msg1to2_last_sync_next)); assert_int_equal(msg1to2_last_sync_next.count, head2.count); assert_memory_equal(msg1to2_last_sync_next.src, head2.src, head2.count); } @@ -652,87 +642,89 @@ static void test_should_allow_simultaneous_messages_during_synchronization(void /** * \brief should assume sent changes were received until we hear otherwise */ -static void test_should_assume_sent_changes_were_received_until_we_hear_otherwise(void **state) { +static void test_should_assume_sent_changes_were_received_until_we_hear_otherwise(void** state) { /* const n1 = create('01234567'), n2 = create('89abcdef') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* let message = null */ /* */ /* const items = n1.putObject("_root", "items", []) */ - AMobjId const* items = AMpush(&test_state->stack, - AMmapPutObject(test_state->n1, - AM_ROOT, - AMstr("items"), - AM_OBJ_TYPE_LIST), - AM_VALUE_OBJ_ID, - cmocka_cb).obj_id; + AMobjId const* const items = + AMitemObjId(AMstackItem(stack_ptr, AMmapPutObject(test_state->n1, AM_ROOT, AMstr("items"), AM_OBJ_TYPE_LIST), + cmocka_cb, AMexpect(AM_VAL_TYPE_OBJ_TYPE))); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* sync(n1, n2, s1, s2) */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); /* */ /* n1.push(items, "x") */ - AMfree(AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("x"))); + AMstackItem(NULL, AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("x")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* message = n1.generateSyncMessage(s1) - if (message === null) { throw new RangeError("message should not be null") */ - AMsyncMessage const* message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, - test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (message === null) { throw new RangeError("message should not be null") + */ + AMsyncMessage const* message; + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); /* assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) */ - AMchanges message_changes = AMsyncMessageChanges(message); - assert_int_equal(AMchangesSize(&message_changes), 1); + AMitems message_changes = + AMstackItems(stack_ptr, AMsyncMessageChanges(message), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&message_changes), 1); /* */ /* n1.push(items, "y") */ - AMfree(AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("y"))); + AMstackItem(NULL, AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("y")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* message = n1.generateSyncMessage(s1) - if (message === null) { throw new RangeError("message should not be null") */ - message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (message === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); /* assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) */ - message_changes = AMsyncMessageChanges(message); - assert_int_equal(AMchangesSize(&message_changes), 1); + message_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(message), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&message_changes), 1); /* */ /* n1.push(items, "z") */ - AMfree(AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("z"))); + AMstackItem(NULL, AMlistPutStr(test_state->n1, items, SIZE_MAX, true, AMstr("z")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* message = n1.generateSyncMessage(s1) - if (message === null) { throw new RangeError("message should not be null") */ - message = AMpush(&test_state->stack, - AMgenerateSyncMessage(test_state->n1, test_state->s1), - AM_VALUE_SYNC_MESSAGE, - cmocka_cb).sync_message; + if (message === null) { throw new RangeError("message should not be + null") */ + assert_true(AMitemToSyncMessage(AMstackItem(stack_ptr, AMgenerateSyncMessage(test_state->n1, test_state->s1), + cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_MESSAGE)), + &message)); /* assert.deepStrictEqual(decodeSyncMessage(message).changes.length, 1) */ - message_changes = AMsyncMessageChanges(message); - assert_int_equal(AMchangesSize(&message_changes), 1); + message_changes = AMstackItems(stack_ptr, AMsyncMessageChanges(message), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); + assert_int_equal(AMitemsSize(&message_changes), 1); } /** * \brief should work regardless of who initiates the exchange */ -static void test_should_work_regardless_of_who_initiates_the_exchange(void **state) { +static void test_should_work_regardless_of_who_initiates_the_exchange(void** state) { /* create & synchronize two nodes */ /* const n1 = create(), n2 = create() const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* for (let i = 0; i < 5; i++) { */ for (size_t i = 0; i != 5; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ @@ -742,10 +734,10 @@ static void test_should_work_regardless_of_who_initiates_the_exchange(void **sta /* for (let i = 5; i < 10; i++) { */ for (size_t i = 5; i != 10; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ @@ -759,24 +751,26 @@ static void test_should_work_regardless_of_who_initiates_the_exchange(void **sta /** * \brief should work without prior sync state */ -static void test_should_work_without_prior_sync_state(void **state) { - /* Scenario: ,-- c10 <-- c11 <-- c12 <-- c13 <-- c14 - * c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ - * `-- c15 <-- c16 <-- c17 - * lastSync is undefined. */ +static void test_should_work_without_prior_sync_state(void** state) { + /* Scenario: ,-- + * c10 <-- c11 <-- c12 <-- c13 <-- c14 c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 + * <-- c6 <-- c7 <-- c8 <-- c9 <-+ + * `-- + * c15 <-- c16 <-- c17 lastSync is undefined. */ /* */ /* create two peers both with divergent commits */ /* const n1 = create('01234567'), n2 = create('89abcdef') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* for (let i = 0; i < 10; i++) { */ for (size_t i = 0; i != 10; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2) */ @@ -785,19 +779,19 @@ static void test_should_work_without_prior_sync_state(void **state) { /* for (let i = 10; i < 15; i++) { */ for (size_t i = 10; i != 15; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* for (let i = 15; i < 18; i++) { */ for (size_t i = 15; i != 18; ++i) { /* n2.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n2.commit("", 0) */ - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ @@ -805,15 +799,9 @@ static void test_should_work_without_prior_sync_state(void **state) { /* sync(n1, n2) */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); } @@ -821,25 +809,27 @@ static void test_should_work_without_prior_sync_state(void **state) { /** * \brief should work with prior sync state */ -static void test_should_work_with_prior_sync_state_2(void **state) { +static void test_should_work_with_prior_sync_state_2(void** state) { /* Scenario: - * ,-- c10 <-- c11 <-- c12 <-- c13 <-- c14 - * c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 <-- c9 <-+ - * `-- c15 <-- c16 <-- c17 - * lastSync is c9. */ + * ,-- + * c10 <-- c11 <-- c12 <-- c13 <-- c14 c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 + * <-- c6 <-- c7 <-- c8 <-- c9 <-+ + * `-- + * c15 <-- c16 <-- c17 lastSync is c9. */ /* */ /* create two peers both with divergent commits */ /* const n1 = create('01234567'), n2 = create('89abcdef') let s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* for (let i = 0; i < 10; i++) { */ for (size_t i = 0; i != 10; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ @@ -848,54 +838,44 @@ static void test_should_work_with_prior_sync_state_2(void **state) { /* for (let i = 10; i < 15; i++) { */ for (size_t i = 10; i != 15; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* for (let i = 15; i < 18; i++) { */ for (size_t i = 15; i != 18; ++i) { /* n2.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n2.commit("", 0) */ - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* s1 = decodeSyncState(encodeSyncState(s1)) */ - AMbyteSpan encoded = AMpush(&test_state->stack, - AMsyncStateEncode(test_state->s1), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMsyncState* s1 = AMpush(&test_state->stack, - AMsyncStateDecode(encoded.src, encoded.count), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMbyteSpan encoded; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMsyncStateEncode(test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &encoded)); + AMsyncState* s1; + assert_true(AMitemToSyncState(AMstackItem(stack_ptr, AMsyncStateDecode(encoded.src, encoded.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_SYNC_STATE)), + &s1)); /* s2 = decodeSyncState(encodeSyncState(s2)) */ - encoded = AMpush(&test_state->stack, - AMsyncStateEncode(test_state->s2), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMsyncState* s2 = AMpush(&test_state->stack, - AMsyncStateDecode(encoded.src, - encoded.count), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMsyncStateEncode(test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &encoded)); + AMsyncState* s2; + assert_true(AMitemToSyncState(AMstackItem(stack_ptr, AMsyncStateDecode(encoded.src, encoded.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_SYNC_STATE)), + &s2)); /* */ /* assert.notDeepStrictEqual(n1.materialize(), n2.materialize()) */ assert_false(AMequal(test_state->n1, test_state->n2)); /* sync(n1, n2, s1, s2) */ sync(test_state->n1, test_state->n2, s1, s2); /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); } @@ -903,39 +883,39 @@ static void test_should_work_with_prior_sync_state_2(void **state) { /** * \brief should ensure non-empty state after sync */ -static void test_should_ensure_non_empty_state_after_sync(void **state) { +static void test_should_ensure_non_empty_state_after_sync(void** state) { /* const n1 = create('01234567'), n2 = create('89abcdef') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* for (let i = 0; i < 3; i++) { */ for (size_t i = 0; i != 3; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); /* */ /* assert.deepStrictEqual(s1.sharedHeads, n1.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes shared_heads1 = AMsyncStateSharedHeads(test_state->s1); - assert_int_equal(AMchangeHashesCmp(&shared_heads1, &heads1), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems shared_heads1 = + AMstackItems(stack_ptr, AMsyncStateSharedHeads(test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&shared_heads1, &heads1)); /* assert.deepStrictEqual(s2.sharedHeads, n1.getHeads()) */ - AMchangeHashes shared_heads2 = AMsyncStateSharedHeads(test_state->s2); - assert_int_equal(AMchangeHashesCmp(&shared_heads2, &heads1), 0); + AMitems shared_heads2 = + AMstackItems(stack_ptr, AMsyncStateSharedHeads(test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&shared_heads2, &heads1)); } /** * \brief should re-sync after one node crashed with data loss */ -static void test_should_resync_after_one_node_crashed_with_data_loss(void **state) { +static void test_should_resync_after_one_node_crashed_with_data_loss(void** state) { /* Scenario: (r) (n2) (n1) * c0 <-- c1 <-- c2 <-- c3 <-- c4 <-- c5 <-- c6 <-- c7 <-- c8 * n2 has changes {c0, c1, c2}, n1's lastSync is c5, and n2's lastSync @@ -946,15 +926,16 @@ static void test_should_resync_after_one_node_crashed_with_data_loss(void **stat let s1 = initSyncState() const s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* n1 makes three changes, which we sync to n2 */ /* for (let i = 0; i < 3; i++) { */ for (size_t i = 0; i != 3; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); @@ -963,28 +944,25 @@ static void test_should_resync_after_one_node_crashed_with_data_loss(void **stat /* let r let rSyncState ;[r, rSyncState] = [n2.clone(), s2.clone()] */ - AMdoc* r = AMpush(&test_state->stack, - AMclone(test_state->n2), - AM_VALUE_DOC, - cmocka_cb).doc; - AMbyteSpan const encoded_s2 = AMpush(&test_state->stack, - AMsyncStateEncode(test_state->s2), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMsyncState* sync_state_r = AMpush(&test_state->stack, - AMsyncStateDecode(encoded_s2.src, - encoded_s2.count), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMdoc* r; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMclone(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &r)); + AMbyteSpan encoded_s2; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsyncStateEncode(test_state->s2), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), + &encoded_s2)); + AMsyncState* sync_state_r; + assert_true(AMitemToSyncState(AMstackItem(stack_ptr, AMsyncStateDecode(encoded_s2.src, encoded_s2.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_SYNC_STATE)), + &sync_state_r)); /* */ /* sync another few commits */ /* for (let i = 3; i < 6; i++) { */ for (size_t i = 3; i != 6; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ @@ -992,15 +970,9 @@ static void test_should_resync_after_one_node_crashed_with_data_loss(void **stat /* */ /* everyone should be on the same page here */ /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); /* */ @@ -1009,132 +981,106 @@ static void test_should_resync_after_one_node_crashed_with_data_loss(void **stat /* for (let i = 6; i < 9; i++) { */ for (size_t i = 6; i != 9; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* s1 = decodeSyncState(encodeSyncState(s1)) */ - AMbyteSpan const encoded_s1 = AMpush(&test_state->stack, - AMsyncStateEncode(test_state->s1), - AM_VALUE_BYTES, - cmocka_cb).bytes; - AMsyncState* const s1 = AMpush(&test_state->stack, - AMsyncStateDecode(encoded_s1.src, - encoded_s1.count), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMbyteSpan encoded_s1; + assert_true( + AMitemToBytes(AMstackItem(stack_ptr, AMsyncStateEncode(test_state->s1), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), + &encoded_s1)); + AMsyncState* s1; + assert_true(AMitemToSyncState(AMstackItem(stack_ptr, AMsyncStateDecode(encoded_s1.src, encoded_s1.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_SYNC_STATE)), + &s1)); /* rSyncState = decodeSyncState(encodeSyncState(rSyncState)) */ - AMbyteSpan const encoded_r = AMpush(&test_state->stack, - AMsyncStateEncode(sync_state_r), - AM_VALUE_BYTES, - cmocka_cb).bytes; - sync_state_r = AMpush(&test_state->stack, - AMsyncStateDecode(encoded_r.src, encoded_r.count), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMbyteSpan encoded_r; + assert_true(AMitemToBytes( + AMstackItem(stack_ptr, AMsyncStateEncode(sync_state_r), cmocka_cb, AMexpect(AM_VAL_TYPE_BYTES)), &encoded_r)); + assert_true(AMitemToSyncState(AMstackItem(stack_ptr, AMsyncStateDecode(encoded_r.src, encoded_r.count), cmocka_cb, + AMexpect(AM_VAL_TYPE_SYNC_STATE)), + &sync_state_r)); /* */ /* assert.notDeepStrictEqual(n1.getHeads(), r.getHeads()) */ - heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads_r = AMpush(&test_state->stack, - AMgetHeads(r), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_not_equal(AMchangeHashesCmp(&heads1, &heads_r), 0); + heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads_r = AMstackItems(stack_ptr, AMgetHeads(r), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_false(AMitemsEqual(&heads1, &heads_r)); /* assert.notDeepStrictEqual(n1.materialize(), r.materialize()) */ assert_false(AMequal(test_state->n1, r)); /* assert.deepStrictEqual(n1.materialize(), { x: 8 }) */ - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(test_state->n1, AM_ROOT, AMstr("x"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 8); + uint64_t uint; + assert_true(AMitemToUint(AMstackItem(stack_ptr, AMmapGet(test_state->n1, AM_ROOT, AMstr("x"), NULL), cmocka_cb, + AMexpect(AM_VAL_TYPE_UINT)), + &uint)); + assert_int_equal(uint, 8); /* assert.deepStrictEqual(r.materialize(), { x: 2 }) */ - assert_int_equal(AMpush(&test_state->stack, - AMmapGet(r, AM_ROOT, AMstr("x"), NULL), - AM_VALUE_UINT, - cmocka_cb).uint, 2); + assert_true(AMitemToUint( + AMstackItem(stack_ptr, AMmapGet(r, AM_ROOT, AMstr("x"), NULL), cmocka_cb, AMexpect(AM_VAL_TYPE_UINT)), &uint)); + assert_int_equal(uint, 2); /* sync(n1, r, s1, rSyncState) */ sync(test_state->n1, r, test_state->s1, sync_state_r); /* assert.deepStrictEqual(n1.getHeads(), r.getHeads()) */ - heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - heads_r = AMpush(&test_state->stack, - AMgetHeads(r), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads_r), 0); + heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + heads_r = AMstackItems(stack_ptr, AMgetHeads(r), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads_r)); /* assert.deepStrictEqual(n1.materialize(), r.materialize()) */ assert_true(AMequal(test_state->n1, r)); } /** - * \brief should re-sync after one node experiences data loss without disconnecting + * \brief should re-sync after one node experiences data loss without + * disconnecting */ -static void test_should_resync_after_one_node_experiences_data_loss_without_disconnecting(void **state) { +static void test_should_resync_after_one_node_experiences_data_loss_without_disconnecting(void** state) { /* const n1 = create('01234567'), n2 = create('89abcdef') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; + AMstack** stack_ptr = &test_state->base_state->stack; /* */ /* n1 makes three changes which we sync to n2 */ /* for (let i = 0; i < 3; i++) { */ for (size_t i = 0; i != 3; ++i) { /* n1.put("_root", "x", i) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.commit("", 0) */ - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); - /* { */ + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); /* */ /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); /* */ /* const n2AfterDataLoss = create('89abcdef') */ - AMdoc* n2_after_data_loss = AMpush(&test_state->stack, - AMcreate(AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("89abcdef")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("89abcdef")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* n2_after_data_loss; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), + &n2_after_data_loss)); /* */ /* "n2" now has no data, but n1 still thinks it does. Note we don't do * decodeSyncState(encodeSyncState(s1)) in order to simulate data loss * without disconnecting */ /* sync(n1, n2AfterDataLoss, s1, initSyncState()) */ - AMsyncState* s2_after_data_loss = AMpush(&test_state->stack, - AMsyncStateInit(), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMsyncState* s2_after_data_loss; + assert_true(AMitemToSyncState( + AMstackItem(stack_ptr, AMsyncStateInit(), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_STATE)), &s2_after_data_loss)); sync(test_state->n1, n2_after_data_loss, test_state->s1, s2_after_data_loss); /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); } @@ -1142,33 +1088,33 @@ static void test_should_resync_after_one_node_experiences_data_loss_without_disc /** * \brief should handle changes concurrent to the last sync heads */ -static void test_should_handle_changes_concurrrent_to_the_last_sync_heads(void **state) { - /* const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('fedcba98' */ +static void test_should_handle_changes_concurrrent_to_the_last_sync_heads(void** state) { + /* const n1 = create('01234567'), n2 = create('89abcdef'), n3 = + * create('fedcba98' */ TestState* test_state = *state; - AMdoc* n3 = AMpush(&test_state->stack, - AMcreate(AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("fedcba98")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; - /* const s12 = initSyncState(), s21 = initSyncState(), s23 = initSyncState(), s32 = initSyncState( */ + AMstack** stack_ptr = &test_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("fedcba98")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* n3; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &n3)); + /* const s12 = initSyncState(), s21 = initSyncState(), s23 = + * initSyncState(), s32 = initSyncState( */ AMsyncState* s12 = test_state->s1; AMsyncState* s21 = test_state->s2; - AMsyncState* s23 = AMpush(&test_state->stack, - AMsyncStateInit(), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; - AMsyncState* s32 = AMpush(&test_state->stack, - AMsyncStateInit(), - AM_VALUE_SYNC_STATE, - cmocka_cb).sync_state; + AMsyncState* s23; + assert_true(AMitemToSyncState( + AMstackItem(stack_ptr, AMsyncStateInit(), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_STATE)), &s23)); + AMsyncState* s32; + assert_true(AMitemToSyncState( + AMstackItem(stack_ptr, AMsyncStateInit(), cmocka_cb, AMexpect(AM_VAL_TYPE_SYNC_STATE)), &s32)); /* */ /* Change 1 is known to all three nodes */ /* //n1 = Automerge.change(n1, {time: 0}, doc => doc.x = 1) */ /* n1.put("_root", "x", 1); n1.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 1)); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* sync(n1, n2, s12, s21) */ sync(test_state->n1, test_state->n2, s12, s21); @@ -1177,47 +1123,38 @@ static void test_should_handle_changes_concurrrent_to_the_last_sync_heads(void * /* */ /* Change 2 is known to n1 and n2 */ /* n1.put("_root", "x", 2); n1.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 2)); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* sync(n1, n2, s12, s21) */ sync(test_state->n1, test_state->n2, s12, s21); /* */ /* Each of the three nodes makes one change (changes 3, 4, 5) */ /* n1.put("_root", "x", 3); n1.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 3)); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* n2.put("_root", "x", 4); n2.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), 4)); - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("x"), 4), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* n3.put("_root", "x", 5); n3.commit("", 0) */ - AMfree(AMmapPutUint(n3, AM_ROOT, AMstr("x"), 5)); - AMfree(AMcommit(n3, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(n3, AM_ROOT, AMstr("x"), 5), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(n3, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* Apply n3's latest change to n2. */ /* let change = n3.getLastLocalChange() if (change === null) throw new RangeError("no local change") */ - AMchanges changes = AMpush(&test_state->stack, - AMgetLastLocalChange(n3), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems changes = AMstackItems(stack_ptr, AMgetLastLocalChange(n3), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* n2.applyChanges([change]) */ - AMfree(AMapplyChanges(test_state->n2, &changes)); + AMstackItem(NULL, AMapplyChanges(test_state->n2, &changes), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* */ /* Now sync n1 and n2. n3's change is concurrent to n1 and n2's last sync * heads */ /* sync(n1, n2, s12, s21) */ sync(test_state->n1, test_state->n2, s12, s21); /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); } @@ -1225,39 +1162,35 @@ static void test_should_handle_changes_concurrrent_to_the_last_sync_heads(void * /** * \brief should handle histories with lots of branching and merging */ -static void test_should_handle_histories_with_lots_of_branching_and_merging(void **state) { - /* const n1 = create('01234567'), n2 = create('89abcdef'), n3 = create('fedcba98') - const s1 = initSyncState(), s2 = initSyncState() */ +static void test_should_handle_histories_with_lots_of_branching_and_merging(void** state) { + /* const n1 = create('01234567'), n2 = create('89abcdef'), n3 = + create('fedcba98') const s1 = initSyncState(), s2 = initSyncState() */ TestState* test_state = *state; - AMdoc* n3 = AMpush(&test_state->stack, - AMcreate(AMpush(&test_state->stack, - AMactorIdInitStr(AMstr("fedcba98")), - AM_VALUE_ACTOR_ID, - cmocka_cb).actor_id), - AM_VALUE_DOC, - cmocka_cb).doc; + AMstack** stack_ptr = &test_state->base_state->stack; + AMactorId const* actor_id; + assert_true(AMitemToActorId( + AMstackItem(stack_ptr, AMactorIdFromStr(AMstr("fedcba98")), cmocka_cb, AMexpect(AM_VAL_TYPE_ACTOR_ID)), + &actor_id)); + AMdoc* n3; + assert_true(AMitemToDoc(AMstackItem(stack_ptr, AMcreate(actor_id), cmocka_cb, AMexpect(AM_VAL_TYPE_DOC)), &n3)); /* n1.put("_root", "x", 0); n1.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 0)); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("x"), 0), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* let change1 = n1.getLastLocalChange() if (change1 === null) throw new RangeError("no local change") */ - AMchanges change1 = AMpush(&test_state->stack, - AMgetLastLocalChange(test_state->n1), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems change1 = + AMstackItems(stack_ptr, AMgetLastLocalChange(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* n2.applyChanges([change1]) */ - AMfree(AMapplyChanges(test_state->n2, &change1)); + AMstackItem(NULL, AMapplyChanges(test_state->n2, &change1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* let change2 = n1.getLastLocalChange() if (change2 === null) throw new RangeError("no local change") */ - AMchanges change2 = AMpush(&test_state->stack, - AMgetLastLocalChange(test_state->n1), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems change2 = + AMstackItems(stack_ptr, AMgetLastLocalChange(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* n3.applyChanges([change2]) */ - AMfree(AMapplyChanges(n3, &change2)); + AMstackItem(NULL, AMapplyChanges(n3, &change2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n3.put("_root", "x", 1); n3.commit("", 0) */ - AMfree(AMmapPutUint(n3, AM_ROOT, AMstr("x"), 1)); - AMfree(AMcommit(n3, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(n3, AM_ROOT, AMstr("x"), 1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(n3, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* - n1c1 <------ n1c2 <------ n1c3 <-- etc. <-- n1c20 <------ n1c21 * / \/ \/ \/ @@ -1269,28 +1202,24 @@ static void test_should_handle_histories_with_lots_of_branching_and_merging(void /* for (let i = 1; i < 20; i++) { */ for (size_t i = 1; i != 20; ++i) { /* n1.put("_root", "n1", i); n1.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n1, AM_ROOT, AMstr("n1"), i)); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n1, AM_ROOT, AMstr("n1"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* n2.put("_root", "n2", i); n2.commit("", 0) */ - AMfree(AMmapPutUint(test_state->n2, AM_ROOT, AMstr("n2"), i)); - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutUint(test_state->n2, AM_ROOT, AMstr("n2"), i), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* const change1 = n1.getLastLocalChange() if (change1 === null) throw new RangeError("no local change") */ - AMchanges change1 = AMpush(&test_state->stack, - AMgetLastLocalChange(test_state->n1), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems change1 = + AMstackItems(stack_ptr, AMgetLastLocalChange(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* const change2 = n2.getLastLocalChange() if (change2 === null) throw new RangeError("no local change") */ - AMchanges change2 = AMpush(&test_state->stack, - AMgetLastLocalChange(test_state->n2), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems change2 = + AMstackItems(stack_ptr, AMgetLastLocalChange(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* n1.applyChanges([change2]) */ - AMfree(AMapplyChanges(test_state->n1, &change2)); + AMstackItem(NULL, AMapplyChanges(test_state->n1, &change2), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n2.applyChanges([change1]) */ - AMfree(AMapplyChanges(test_state->n2, &change1)); - /* { */ + AMstackItem(NULL, AMapplyChanges(test_state->n2, &change1), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); + /* { */ } /* */ /* sync(n1, n2, s1, s2) */ @@ -1300,31 +1229,24 @@ static void test_should_handle_histories_with_lots_of_branching_and_merging(void * the slower code path */ /* const change3 = n2.getLastLocalChange() if (change3 === null) throw new RangeError("no local change") */ - AMchanges change3 = AMpush(&test_state->stack, - AMgetLastLocalChange(n3), - AM_VALUE_CHANGES, - cmocka_cb).changes; + AMitems change3 = AMstackItems(stack_ptr, AMgetLastLocalChange(n3), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE)); /* n2.applyChanges([change3]) */ - AMfree(AMapplyChanges(test_state->n2, &change3)); + AMstackItem(NULL, AMapplyChanges(test_state->n2, &change3), cmocka_cb, AMexpect(AM_VAL_TYPE_VOID)); /* n1.put("_root", "n1", "final"); n1.commit("", 0) */ - AMfree(AMmapPutStr(test_state->n1, AM_ROOT, AMstr("n1"), AMstr("final"))); - AMfree(AMcommit(test_state->n1, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutStr(test_state->n1, AM_ROOT, AMstr("n1"), AMstr("final")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n1, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* n2.put("_root", "n2", "final"); n2.commit("", 0) */ - AMfree(AMmapPutStr(test_state->n2, AM_ROOT, AMstr("n2"), AMstr("final"))); - AMfree(AMcommit(test_state->n2, AMstr(""), &TIME_0)); + AMstackItem(NULL, AMmapPutStr(test_state->n2, AM_ROOT, AMstr("n2"), AMstr("final")), cmocka_cb, + AMexpect(AM_VAL_TYPE_VOID)); + AMstackItem(NULL, AMcommit(test_state->n2, AMstr(""), &TIME_0), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); /* */ /* sync(n1, n2, s1, s2) */ sync(test_state->n1, test_state->n2, test_state->s1, test_state->s2); /* assert.deepStrictEqual(n1.getHeads(), n2.getHeads()) */ - AMchangeHashes heads1 = AMpush(&test_state->stack, - AMgetHeads(test_state->n1), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - AMchangeHashes heads2 = AMpush(&test_state->stack, - AMgetHeads(test_state->n2), - AM_VALUE_CHANGE_HASHES, - cmocka_cb).change_hashes; - assert_int_equal(AMchangeHashesCmp(&heads1, &heads2), 0); + AMitems heads1 = AMstackItems(stack_ptr, AMgetHeads(test_state->n1), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + AMitems heads2 = AMstackItems(stack_ptr, AMgetHeads(test_state->n2), cmocka_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); + assert_true(AMitemsEqual(&heads1, &heads2)); /* assert.deepStrictEqual(n1.materialize(), n2.materialize()) */ assert_true(AMequal(test_state->n1, test_state->n2)); } @@ -1334,20 +1256,26 @@ int run_ported_wasm_sync_tests(void) { cmocka_unit_test_setup_teardown(test_should_send_a_sync_message_implying_no_local_data, setup, teardown), cmocka_unit_test_setup_teardown(test_should_not_reply_if_we_have_no_data_as_well, setup, teardown), cmocka_unit_test_setup_teardown(test_repos_with_equal_heads_do_not_need_a_reply_message, setup, teardown), - cmocka_unit_test_setup_teardown(test_n1_should_offer_all_changes_to_n2_when_starting_from_nothing, setup, teardown), - cmocka_unit_test_setup_teardown(test_should_sync_peers_where_one_has_commits_the_other_does_not, setup, teardown), + cmocka_unit_test_setup_teardown(test_n1_should_offer_all_changes_to_n2_when_starting_from_nothing, setup, + teardown), + cmocka_unit_test_setup_teardown(test_should_sync_peers_where_one_has_commits_the_other_does_not, setup, + teardown), cmocka_unit_test_setup_teardown(test_should_work_with_prior_sync_state, setup, teardown), cmocka_unit_test_setup_teardown(test_should_not_generate_messages_once_synced, setup, teardown), - cmocka_unit_test_setup_teardown(test_should_allow_simultaneous_messages_during_synchronization, setup, teardown), - cmocka_unit_test_setup_teardown(test_should_assume_sent_changes_were_received_until_we_hear_otherwise, setup, teardown), + cmocka_unit_test_setup_teardown(test_should_allow_simultaneous_messages_during_synchronization, setup, + teardown), + cmocka_unit_test_setup_teardown(test_should_assume_sent_changes_were_received_until_we_hear_otherwise, setup, + teardown), cmocka_unit_test_setup_teardown(test_should_work_regardless_of_who_initiates_the_exchange, setup, teardown), cmocka_unit_test_setup_teardown(test_should_work_without_prior_sync_state, setup, teardown), cmocka_unit_test_setup_teardown(test_should_work_with_prior_sync_state_2, setup, teardown), cmocka_unit_test_setup_teardown(test_should_ensure_non_empty_state_after_sync, setup, teardown), cmocka_unit_test_setup_teardown(test_should_resync_after_one_node_crashed_with_data_loss, setup, teardown), - cmocka_unit_test_setup_teardown(test_should_resync_after_one_node_experiences_data_loss_without_disconnecting, setup, teardown), + cmocka_unit_test_setup_teardown(test_should_resync_after_one_node_experiences_data_loss_without_disconnecting, + setup, teardown), cmocka_unit_test_setup_teardown(test_should_handle_changes_concurrrent_to_the_last_sync_heads, setup, teardown), - cmocka_unit_test_setup_teardown(test_should_handle_histories_with_lots_of_branching_and_merging, setup, teardown), + cmocka_unit_test_setup_teardown(test_should_handle_histories_with_lots_of_branching_and_merging, setup, + teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); diff --git a/rust/automerge-c/test/stack_utils.c b/rust/automerge-c/test/stack_utils.c deleted file mode 100644 index f65ea2e5..00000000 --- a/rust/automerge-c/test/stack_utils.c +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include -#include - -/* third-party */ -#include - -/* local */ -#include "cmocka_utils.h" -#include "stack_utils.h" - -void cmocka_cb(AMresultStack** stack, uint8_t discriminant) { - assert_non_null(stack); - assert_non_null(*stack); - assert_non_null((*stack)->result); - if (AMresultStatus((*stack)->result) != AM_STATUS_OK) { - fail_msg_view("%s", AMerrorMessage((*stack)->result)); - } - assert_int_equal(AMresultValue((*stack)->result).tag, discriminant); -} - -int setup_stack(void** state) { - *state = NULL; - return 0; -} - -int teardown_stack(void** state) { - AMresultStack* stack = *state; - AMfreeStack(&stack); - return 0; -} diff --git a/rust/automerge-c/test/stack_utils.h b/rust/automerge-c/test/stack_utils.h deleted file mode 100644 index 473feebc..00000000 --- a/rust/automerge-c/test/stack_utils.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef STACK_UTILS_H -#define STACK_UTILS_H - -#include - -/* local */ -#include - -/** - * \brief Reports an error through a cmocka assertion. - * - * \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. - * \param[in] discriminant An `AMvalueVariant` enum tag. - * \pre \p stack` != NULL`. - */ -void cmocka_cb(AMresultStack** stack, uint8_t discriminant); - -/** - * \brief Allocates a result stack for storing the results allocated during one - * or more test cases. - * - * \param[in,out] state A pointer to a pointer to an `AMresultStack` struct. - * \pre \p state` != NULL`. - * \warning The `AMresultStack` struct returned through \p state must be - * deallocated with `teardown_stack()` in order to prevent memory leaks. - */ -int setup_stack(void** state); - -/** - * \brief Deallocates a result stack after deallocating any results that were - * stored in it by one or more test cases. - * - * \param[in] state A pointer to a pointer to an `AMresultStack` struct. - * \pre \p state` != NULL`. - */ -int teardown_stack(void** state); - -#endif /* STACK_UTILS_H */ diff --git a/rust/automerge-c/test/str_utils.c b/rust/automerge-c/test/str_utils.c index cc923cb4..2937217a 100644 --- a/rust/automerge-c/test/str_utils.c +++ b/rust/automerge-c/test/str_utils.c @@ -1,5 +1,5 @@ -#include #include +#include /* local */ #include "str_utils.h" diff --git a/rust/automerge-c/test/str_utils.h b/rust/automerge-c/test/str_utils.h index b9985683..14a4af73 100644 --- a/rust/automerge-c/test/str_utils.h +++ b/rust/automerge-c/test/str_utils.h @@ -1,14 +1,17 @@ -#ifndef STR_UTILS_H -#define STR_UTILS_H +#ifndef TESTS_STR_UTILS_H +#define TESTS_STR_UTILS_H /** - * \brief Converts a hexadecimal string into a sequence of bytes. + * \brief Converts a hexadecimal string into an array of bytes. * - * \param[in] hex_str A string. - * \param[in] src A pointer to a contiguous sequence of bytes. - * \param[in] count The number of bytes to copy to \p src. - * \pre \p count `<=` length of \p src. + * \param[in] hex_str A hexadecimal string. + * \param[in] src A pointer to an array of bytes. + * \param[in] count The count of bytes to copy into the array pointed to by + * \p src. + * \pre \p src `!= NULL` + * \pre `sizeof(`\p src `) > 0` + * \pre \p count `<= sizeof(`\p src `)` */ void hex_to_bytes(char const* hex_str, uint8_t* src, size_t const count); -#endif /* STR_UTILS_H */ +#endif /* TESTS_STR_UTILS_H */ diff --git a/rust/automerge/src/error.rs b/rust/automerge/src/error.rs index 57a87167..68b8ec65 100644 --- a/rust/automerge/src/error.rs +++ b/rust/automerge/src/error.rs @@ -1,3 +1,4 @@ +use crate::change::LoadError as LoadChangeError; use crate::storage::load::Error as LoadError; use crate::types::{ActorId, ScalarValue}; use crate::value::DataType; @@ -18,6 +19,8 @@ pub enum AutomergeError { Fail, #[error("invalid actor ID `{0}`")] InvalidActorId(String), + #[error(transparent)] + InvalidChangeHashBytes(#[from] InvalidChangeHashSlice), #[error("invalid UTF-8 character at {0}")] InvalidCharacter(usize), #[error("invalid hash {0}")] @@ -39,6 +42,8 @@ pub enum AutomergeError { }, #[error(transparent)] Load(#[from] LoadError), + #[error(transparent)] + LoadChangeError(#[from] LoadChangeError), #[error("increment operations must be against a counter value")] MissingCounter, #[error("hash {0} does not correspond to a change in this document")] diff --git a/scripts/ci/cmake-build b/scripts/ci/cmake-build index f6f9f9b1..25a69756 100755 --- a/scripts/ci/cmake-build +++ b/scripts/ci/cmake-build @@ -16,4 +16,4 @@ C_PROJECT=$THIS_SCRIPT/../../rust/automerge-c; mkdir -p $C_PROJECT/build; cd $C_PROJECT/build; cmake --log-level=ERROR -B . -S .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DBUILD_SHARED_LIBS=$SHARED_TOGGLE; -cmake --build . --target test_automerge; +cmake --build . --target automerge_test; From 44fa7ac41647fa465ee7baa0bc0ee64e811dded8 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Mon, 27 Feb 2023 13:12:09 -0700 Subject: [PATCH 67/72] Don't panic on missing deps of change chunks (#538) * Fix doubly-reported ops in load of change chunks Since c3c04128f5f1703007f650ea3104d98334334aab, observers have been called twice when calling Automerge::load() with change chunks. * Better handle change chunks with missing deps Before this change Automerge::load would panic if you passed a change chunk that was missing a dependency, or multiple change chunks not in strict dependency order. After this change these cases will error instead. --- rust/automerge/src/automerge.rs | 38 +++++++++--------- rust/automerge/src/automerge/current_state.rs | 29 ++++++++++++- rust/automerge/src/error.rs | 2 + .../fixtures/two_change_chunks.automerge | Bin 0 -> 177 bytes .../two_change_chunks_compressed.automerge | Bin 0 -> 192 bytes .../two_change_chunks_out_of_order.automerge | Bin 0 -> 177 bytes .../fuzz-crashers/missing_deps.automerge | Bin 0 -> 224 bytes .../missing_deps_compressed.automerge | Bin 0 -> 120 bytes .../missing_deps_subsequent.automerge | Bin 0 -> 180 bytes rust/automerge/tests/test.rs | 13 ++++++ 10 files changed, 62 insertions(+), 20 deletions(-) create mode 100644 rust/automerge/tests/fixtures/two_change_chunks.automerge create mode 100644 rust/automerge/tests/fixtures/two_change_chunks_compressed.automerge create mode 100644 rust/automerge/tests/fixtures/two_change_chunks_out_of_order.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/missing_deps.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/missing_deps_compressed.automerge create mode 100644 rust/automerge/tests/fuzz-crashers/missing_deps_subsequent.automerge diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 09c3cc9d..9c45ec51 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -464,6 +464,7 @@ impl Automerge { return Err(load::Error::BadChecksum.into()); } + let mut change: Option = None; let mut am = match first_chunk { storage::Chunk::Document(d) => { tracing::trace!("first chunk is document chunk, inflating"); @@ -501,30 +502,31 @@ impl Automerge { } } storage::Chunk::Change(stored_change) => { - tracing::trace!("first chunk is change chunk, applying"); - let change = Change::new_from_unverified(stored_change.into_owned(), None) - .map_err(|e| load::Error::InvalidChangeColumns(Box::new(e)))?; - let mut am = Self::new(); - am.apply_change(change, &mut observer); - am + tracing::trace!("first chunk is change chunk"); + change = Some( + Change::new_from_unverified(stored_change.into_owned(), None) + .map_err(|e| load::Error::InvalidChangeColumns(Box::new(e)))?, + ); + Self::new() } storage::Chunk::CompressedChange(stored_change, compressed) => { - tracing::trace!("first chunk is compressed change, decompressing and applying"); - let change = Change::new_from_unverified( - stored_change.into_owned(), - Some(compressed.into_owned()), - ) - .map_err(|e| load::Error::InvalidChangeColumns(Box::new(e)))?; - let mut am = Self::new(); - am.apply_change(change, &mut observer); - am + tracing::trace!("first chunk is compressed change"); + change = Some( + Change::new_from_unverified( + stored_change.into_owned(), + Some(compressed.into_owned()), + ) + .map_err(|e| load::Error::InvalidChangeColumns(Box::new(e)))?, + ); + Self::new() } }; - tracing::trace!("first chunk loaded, loading remaining chunks"); + tracing::trace!("loading change chunks"); match load::load_changes(remaining.reset()) { load::LoadedChanges::Complete(c) => { - for change in c { - am.apply_change(change, &mut observer); + am.apply_changes(change.into_iter().chain(c))?; + if !am.queue.is_empty() { + return Err(AutomergeError::MissingDeps); } } load::LoadedChanges::Partial { error, .. } => { diff --git a/rust/automerge/src/automerge/current_state.rs b/rust/automerge/src/automerge/current_state.rs index 1c1bceed..3f7f4afc 100644 --- a/rust/automerge/src/automerge/current_state.rs +++ b/rust/automerge/src/automerge/current_state.rs @@ -338,9 +338,9 @@ impl<'a, I: Iterator>> Iterator for TextActions<'a, I> { #[cfg(test)] mod tests { - use std::borrow::Cow; + use std::{borrow::Cow, fs}; - use crate::{transaction::Transactable, ObjType, OpObserver, Prop, ReadDoc, Value}; + use crate::{transaction::Transactable, Automerge, ObjType, OpObserver, Prop, ReadDoc, Value}; // Observer ops often carry a "tagged value", which is a value and the OpID of the op which // created that value. For a lot of values (i.e. any scalar value) we don't care about the @@ -887,4 +887,29 @@ mod tests { ]) ); } + + #[test] + fn test_load_changes() { + fn fixture(name: &str) -> Vec { + fs::read("./tests/fixtures/".to_owned() + name).unwrap() + } + + let mut obs = ObserverStub::new(); + let _doc = Automerge::load_with( + &fixture("counter_value_is_ok.automerge"), + crate::OnPartialLoad::Error, + crate::storage::VerificationMode::Check, + Some(&mut obs), + ); + + assert_eq!( + Calls(obs.ops), + Calls(vec![ObserverCall::Put { + obj: crate::ROOT, + prop: "a".into(), + value: ObservedValue::Untagged(crate::ScalarValue::Counter(2000.into()).into()), + conflict: false, + },]) + ); + } } diff --git a/rust/automerge/src/error.rs b/rust/automerge/src/error.rs index 68b8ec65..86dbe9f3 100644 --- a/rust/automerge/src/error.rs +++ b/rust/automerge/src/error.rs @@ -48,6 +48,8 @@ pub enum AutomergeError { MissingCounter, #[error("hash {0} does not correspond to a change in this document")] MissingHash(ChangeHash), + #[error("change's deps should already be in the document")] + MissingDeps, #[error("compressed chunk was not a change")] NonChangeCompressed, #[error("id was not an object id")] diff --git a/rust/automerge/tests/fixtures/two_change_chunks.automerge b/rust/automerge/tests/fixtures/two_change_chunks.automerge new file mode 100644 index 0000000000000000000000000000000000000000..1a84b363ccab6161890367b7b6fadd84091acc1a GIT binary patch literal 177 zcmZq8_iCPX___h3C4;~%u8ahNZ6`uDR*U$arwyk>-a69LX7pdFiPNh77Et z%qEOZOkqp~O!bV3jP(p4*a|da`E&KFj46yDlRhQgGJP()b>hw!qP#CRXsF%#9>DfV qvr}yCn>=m|E0~y2tuSKXU}R!qf>{&J2(*Zyo)K&rW4%~XJp%xrEC}cVUk{s_*GfwAzq7vd=R$+BrLv*EZRo)X zjiFO+wp{Gg>{;2ca-QMDjq?~;7#SHD{{L?UnzQ`5`cUCz3gfK9*9|@;-7W5 zAx_SoEv*boUq4)P)0c_q;Jzcx4-GhyGZORCQx%LDI2f6jm_(UP7@e5Hn8FzgnCcno q8S5Dnfw*2Qsh*(~XdB2HMoR_^(-;|1O*3R*g_#622V@2V2m%1g@ISTy literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/missing_deps.automerge b/rust/automerge/tests/fuzz-crashers/missing_deps.automerge new file mode 100644 index 0000000000000000000000000000000000000000..8a57a0f4c8a82541f9236c878cd22599aefbcce2 GIT binary patch literal 224 zcmZq8_i8>FcHEBfDkJ0W>J_a(?qU6^Yf=o1{~5o&kywAoaLxb!s;Hm{4n%=~4@7_f dT+w7W3mbz0J3uIvfiW3@Dq(DLXvVLM3jvQ0EVKXs literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/missing_deps_compressed.automerge b/rust/automerge/tests/fuzz-crashers/missing_deps_compressed.automerge new file mode 100644 index 0000000000000000000000000000000000000000..2c7b123b6805032546ec438597e31a03245b5a79 GIT binary patch literal 120 zcmV-;0EhpDZ%TvQ<(umQZUAHeoBsjf#K?6GIWE|lwH=|kchIwRB>mYqPdl0|$S{b; zlZl!T#tysb@0Cu7tB#1rhSmZA0s_mq^zPs=2xDkrZf9j6G5`nx0s;aR12h3b0#*W7 a0dN9;0Dl300bv1u0e==^e*ggh0RR6R126Ib literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fuzz-crashers/missing_deps_subsequent.automerge b/rust/automerge/tests/fuzz-crashers/missing_deps_subsequent.automerge new file mode 100644 index 0000000000000000000000000000000000000000..2fe439afd0c7792801f52a5325a2582478efdd1d GIT binary patch literal 180 zcmZq8_iE-b7ZG8!VGvl5pXAtm&!MU9#x>WYe^O^NGTz(X^8SGVM{-7DUV5s6F$0?@ zvk9XUQy5b?V*yh=Vc^1qZv~et#%p2bkvx1Prjyk;Lcr*+ew Date: Fri, 3 Mar 2023 17:42:40 -0500 Subject: [PATCH 68/72] Suppress clippy warning in parse.rs + bump toolchain (#542) * Fix rust error in parse.rs * Bump toolchain to 1.67.0 --- .github/workflows/ci.yaml | 14 +++++++------- rust/automerge/src/storage/parse.rs | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index bfa31bd5..0263f408 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true components: rustfmt - uses: Swatinem/rust-cache@v1 @@ -28,7 +28,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true components: clippy - uses: Swatinem/rust-cache@v1 @@ -42,7 +42,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true - uses: Swatinem/rust-cache@v1 - name: Build rust docs @@ -118,7 +118,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true - uses: Swatinem/rust-cache@v1 - name: Install CMocka @@ -136,7 +136,7 @@ jobs: strategy: matrix: toolchain: - - 1.66.0 + - 1.67.0 steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 @@ -155,7 +155,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true - uses: Swatinem/rust-cache@v1 - run: ./scripts/ci/build-test @@ -168,7 +168,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.66.0 + toolchain: 1.67.0 default: true - uses: Swatinem/rust-cache@v1 - run: ./scripts/ci/build-test diff --git a/rust/automerge/src/storage/parse.rs b/rust/automerge/src/storage/parse.rs index 54668da4..6751afb4 100644 --- a/rust/automerge/src/storage/parse.rs +++ b/rust/automerge/src/storage/parse.rs @@ -308,6 +308,7 @@ impl<'a> Input<'a> { } /// The bytes behind this input - including bytes which have been consumed + #[allow(clippy::misnamed_getters)] pub(crate) fn bytes(&self) -> &'a [u8] { self.original } From 2c1970f6641ea3fe10976721316ae6d07765e4a1 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Sat, 4 Mar 2023 05:09:08 -0700 Subject: [PATCH 69/72] Fix panic on invalid action (#541) We make the validation on parsing operations in the encoded changes stricter to avoid a possible panic when applying changes. --- rust/automerge/src/automerge.rs | 2 +- rust/automerge/src/change.rs | 2 +- .../src/columnar/encoding/col_error.rs | 2 +- rust/automerge/src/error.rs | 2 +- .../src/storage/change/change_op_columns.rs | 20 ++++++++- rust/automerge/src/types.rs | 40 ++++++++++++------ .../fuzz-crashers/action-is-48.automerge | Bin 0 -> 58 bytes 7 files changed, 48 insertions(+), 20 deletions(-) create mode 100644 rust/automerge/tests/fuzz-crashers/action-is-48.automerge diff --git a/rust/automerge/src/automerge.rs b/rust/automerge/src/automerge.rs index 9c45ec51..0dd82253 100644 --- a/rust/automerge/src/automerge.rs +++ b/rust/automerge/src/automerge.rs @@ -723,7 +723,7 @@ impl Automerge { obj, Op { id, - action: OpType::from_index_and_value(c.action, c.val).unwrap(), + action: OpType::from_action_and_value(c.action, c.val), key, succ: Default::default(), pred, diff --git a/rust/automerge/src/change.rs b/rust/automerge/src/change.rs index b5cae7df..be467a84 100644 --- a/rust/automerge/src/change.rs +++ b/rust/automerge/src/change.rs @@ -278,7 +278,7 @@ impl From<&Change> for crate::ExpandedChange { let operations = c .iter_ops() .map(|o| crate::legacy::Op { - action: crate::types::OpType::from_index_and_value(o.action, o.val).unwrap(), + action: crate::types::OpType::from_action_and_value(o.action, o.val), insert: o.insert, key: match o.key { StoredKey::Elem(e) if e.is_head() => { diff --git a/rust/automerge/src/columnar/encoding/col_error.rs b/rust/automerge/src/columnar/encoding/col_error.rs index c8d5c5c0..089556b6 100644 --- a/rust/automerge/src/columnar/encoding/col_error.rs +++ b/rust/automerge/src/columnar/encoding/col_error.rs @@ -1,5 +1,5 @@ #[derive(Clone, Debug)] -pub(crate) struct DecodeColumnError { +pub struct DecodeColumnError { path: Path, error: DecodeColErrorKind, } diff --git a/rust/automerge/src/error.rs b/rust/automerge/src/error.rs index 86dbe9f3..62a7b72f 100644 --- a/rust/automerge/src/error.rs +++ b/rust/automerge/src/error.rs @@ -99,7 +99,7 @@ pub struct InvalidElementId(pub String); pub struct InvalidOpId(pub String); #[derive(Error, Debug)] -pub(crate) enum InvalidOpType { +pub enum InvalidOpType { #[error("unrecognized action index {0}")] UnknownAction(u64), #[error("non numeric argument for inc op")] diff --git a/rust/automerge/src/storage/change/change_op_columns.rs b/rust/automerge/src/storage/change/change_op_columns.rs index 7c3a65ec..cd1cb150 100644 --- a/rust/automerge/src/storage/change/change_op_columns.rs +++ b/rust/automerge/src/storage/change/change_op_columns.rs @@ -14,6 +14,7 @@ use crate::{ }, }, convert, + error::InvalidOpType, storage::{ change::AsChangeOp, columns::{ @@ -22,6 +23,7 @@ use crate::{ RawColumns, }, types::{ElemId, ObjId, OpId, ScalarValue}, + OpType, }; const OBJ_COL_ID: ColumnId = ColumnId::new(0); @@ -276,7 +278,12 @@ impl ChangeOpsColumns { #[derive(thiserror::Error, Debug)] #[error(transparent)] -pub struct ReadChangeOpError(#[from] DecodeColumnError); +pub enum ReadChangeOpError { + #[error(transparent)] + DecodeError(#[from] DecodeColumnError), + #[error(transparent)] + InvalidOpType(#[from] InvalidOpType), +} #[derive(Clone)] pub(crate) struct ChangeOpsIter<'a> { @@ -308,6 +315,11 @@ impl<'a> ChangeOpsIter<'a> { let action = self.action.next_in_col("action")?; let val = self.val.next_in_col("value")?; let pred = self.pred.next_in_col("pred")?; + + // This check is necessary to ensure that OpType::from_action_and_value + // cannot panic later in the process. + OpType::validate_action_and_value(action, &val)?; + Ok(Some(ChangeOp { obj, key, @@ -458,10 +470,14 @@ mod tests { action in 0_u64..6, obj in opid(), insert in any::()) -> ChangeOp { + + let val = if action == 5 && !(value.is_int() || value.is_uint()) { + ScalarValue::Uint(0) + } else { value }; ChangeOp { obj: obj.into(), key, - val: value, + val, pred, action, insert, diff --git a/rust/automerge/src/types.rs b/rust/automerge/src/types.rs index 870569e9..2978aa97 100644 --- a/rust/automerge/src/types.rs +++ b/rust/automerge/src/types.rs @@ -216,23 +216,35 @@ impl OpType { } } - pub(crate) fn from_index_and_value( - index: u64, - value: ScalarValue, - ) -> Result { - match index { - 0 => Ok(Self::Make(ObjType::Map)), - 1 => Ok(Self::Put(value)), - 2 => Ok(Self::Make(ObjType::List)), - 3 => Ok(Self::Delete), - 4 => Ok(Self::Make(ObjType::Text)), + pub(crate) fn validate_action_and_value( + action: u64, + value: &ScalarValue, + ) -> Result<(), error::InvalidOpType> { + match action { + 0..=4 => Ok(()), 5 => match value { - ScalarValue::Int(i) => Ok(Self::Increment(i)), - ScalarValue::Uint(i) => Ok(Self::Increment(i as i64)), + ScalarValue::Int(_) | ScalarValue::Uint(_) => Ok(()), _ => Err(error::InvalidOpType::NonNumericInc), }, - 6 => Ok(Self::Make(ObjType::Table)), - other => Err(error::InvalidOpType::UnknownAction(other)), + 6 => Ok(()), + _ => Err(error::InvalidOpType::UnknownAction(action)), + } + } + + pub(crate) fn from_action_and_value(action: u64, value: ScalarValue) -> OpType { + match action { + 0 => Self::Make(ObjType::Map), + 1 => Self::Put(value), + 2 => Self::Make(ObjType::List), + 3 => Self::Delete, + 4 => Self::Make(ObjType::Text), + 5 => match value { + ScalarValue::Int(i) => Self::Increment(i), + ScalarValue::Uint(i) => Self::Increment(i as i64), + _ => unreachable!("validate_action_and_value returned NonNumericInc"), + }, + 6 => Self::Make(ObjType::Table), + _ => unreachable!("validate_action_and_value returned UnknownAction"), } } } diff --git a/rust/automerge/tests/fuzz-crashers/action-is-48.automerge b/rust/automerge/tests/fuzz-crashers/action-is-48.automerge new file mode 100644 index 0000000000000000000000000000000000000000..16e6f719a13dd6b1d9eff8488ee651ab7f72bfc3 GIT binary patch literal 58 vcmZq8_i8>{b9^SF0fT@6CSYJ-6J<7GbYco)N@OZvGGH_SqI$Lq{Phd~tz-

Date: Tue, 7 Mar 2023 09:49:04 -0700 Subject: [PATCH 70/72] Error instead of corrupt large op counters (#543) Since b78211ca6, OpIds have been silently truncated to 2**32. This causes corruption in the case the op id overflows. This change converts the silent error to a panic, and guards against the panic on the codepath found by the fuzzer. --- .../automerge/src/columnar/column_range/opid.rs | 6 +++--- .../src/columnar/encoding/properties.rs | 2 +- rust/automerge/src/storage/change.rs | 3 +++ .../src/storage/change/change_op_columns.rs | 2 ++ rust/automerge/src/types.rs | 6 +++--- rust/automerge/src/types/opids.rs | 2 +- .../fixtures/64bit_obj_id_change.automerge | Bin 0 -> 73 bytes .../tests/fixtures/64bit_obj_id_doc.automerge | Bin 0 -> 147 bytes rust/automerge/tests/test.rs | 16 ++++++++++++++++ 9 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 rust/automerge/tests/fixtures/64bit_obj_id_change.automerge create mode 100644 rust/automerge/tests/fixtures/64bit_obj_id_doc.automerge diff --git a/rust/automerge/src/columnar/column_range/opid.rs b/rust/automerge/src/columnar/column_range/opid.rs index ae95d758..d2cdce79 100644 --- a/rust/automerge/src/columnar/column_range/opid.rs +++ b/rust/automerge/src/columnar/column_range/opid.rs @@ -104,11 +104,11 @@ impl<'a> OpIdIter<'a> { .transpose() .map_err(|e| DecodeColumnError::decode_raw("counter", e))?; match (actor, counter) { - (Some(Some(a)), Some(Some(c))) => match c.try_into() { - Ok(c) => Ok(Some(OpId::new(c, a as usize))), + (Some(Some(a)), Some(Some(c))) => match u32::try_from(c) { + Ok(c) => Ok(Some(OpId::new(c as u64, a as usize))), Err(_) => Err(DecodeColumnError::invalid_value( "counter", - "negative value encountered", + "negative or large value encountered", )), }, (Some(None), _) => Err(DecodeColumnError::unexpected_null("actor")), diff --git a/rust/automerge/src/columnar/encoding/properties.rs b/rust/automerge/src/columnar/encoding/properties.rs index a3bf1ed0..30f1169d 100644 --- a/rust/automerge/src/columnar/encoding/properties.rs +++ b/rust/automerge/src/columnar/encoding/properties.rs @@ -139,7 +139,7 @@ pub(crate) fn option_splice_scenario< } pub(crate) fn opid() -> impl Strategy + Clone { - (0..(i64::MAX as usize), 0..(i64::MAX as u64)).prop_map(|(actor, ctr)| OpId::new(ctr, actor)) + (0..(u32::MAX as usize), 0..(u32::MAX as u64)).prop_map(|(actor, ctr)| OpId::new(ctr, actor)) } pub(crate) fn elemid() -> impl Strategy + Clone { diff --git a/rust/automerge/src/storage/change.rs b/rust/automerge/src/storage/change.rs index ff3cc9ab..61db0b00 100644 --- a/rust/automerge/src/storage/change.rs +++ b/rust/automerge/src/storage/change.rs @@ -177,6 +177,9 @@ impl<'a> Change<'a, Unverified> { for op in self.iter_ops() { f(op?); } + if u32::try_from(u64::from(self.start_op)).is_err() { + return Err(ReadChangeOpError::CounterTooLarge); + } Ok(Change { bytes: self.bytes, header: self.header, diff --git a/rust/automerge/src/storage/change/change_op_columns.rs b/rust/automerge/src/storage/change/change_op_columns.rs index cd1cb150..86ec59c2 100644 --- a/rust/automerge/src/storage/change/change_op_columns.rs +++ b/rust/automerge/src/storage/change/change_op_columns.rs @@ -283,6 +283,8 @@ pub enum ReadChangeOpError { DecodeError(#[from] DecodeColumnError), #[error(transparent)] InvalidOpType(#[from] InvalidOpType), + #[error("counter too large")] + CounterTooLarge, } #[derive(Clone)] diff --git a/rust/automerge/src/types.rs b/rust/automerge/src/types.rs index 2978aa97..468986ec 100644 --- a/rust/automerge/src/types.rs +++ b/rust/automerge/src/types.rs @@ -439,17 +439,17 @@ pub(crate) struct OpId(u32, u32); impl OpId { pub(crate) fn new(counter: u64, actor: usize) -> Self { - Self(counter as u32, actor as u32) + Self(counter.try_into().unwrap(), actor.try_into().unwrap()) } #[inline] pub(crate) fn counter(&self) -> u64 { - self.0 as u64 + self.0.into() } #[inline] pub(crate) fn actor(&self) -> usize { - self.1 as usize + self.1.try_into().unwrap() } #[inline] diff --git a/rust/automerge/src/types/opids.rs b/rust/automerge/src/types/opids.rs index eaeed471..a81ccb36 100644 --- a/rust/automerge/src/types/opids.rs +++ b/rust/automerge/src/types/opids.rs @@ -129,7 +129,7 @@ mod tests { fn gen_opid(actors: Vec) -> impl Strategy { (0..actors.len()).prop_flat_map(|actor_idx| { - (Just(actor_idx), 0..u64::MAX) + (Just(actor_idx), 0..(u32::MAX as u64)) .prop_map(|(actor_idx, counter)| OpId::new(counter, actor_idx)) }) } diff --git a/rust/automerge/tests/fixtures/64bit_obj_id_change.automerge b/rust/automerge/tests/fixtures/64bit_obj_id_change.automerge new file mode 100644 index 0000000000000000000000000000000000000000..700342a2df71772d78f0373385f44aae9eb88c7b GIT binary patch literal 73 zcmZq8_i9cmO}NHr&meG%DY?GbS?DGk_of5L_6# literal 0 HcmV?d00001 diff --git a/rust/automerge/tests/fixtures/64bit_obj_id_doc.automerge b/rust/automerge/tests/fixtures/64bit_obj_id_doc.automerge new file mode 100644 index 0000000000000000000000000000000000000000..6beb57fe9ad7d5428d5b854c0e39f8bb57dcfdf7 GIT binary patch literal 147 zcmZq8_i7GNJ@|p4gOO3-7FS4!le1?_E5p*)57*rEWlSnfxVFRCa9`J~6^(1kJz9Nc zT|0UFe#&#R(pL)KvpP?)GcqwV33Dj3n{qiYg)y; Date: Thu, 9 Mar 2023 08:09:43 -0700 Subject: [PATCH 71/72] smaller automerge c (#545) * Fix automerge-c tests on mac * Generate significantly smaller automerge-c builds This cuts the size of libautomerge_core.a from 25Mb to 1.6Mb on macOS and 53Mb to 2.7Mb on Linux. As a side-effect of setting codegen-units = 1 for all release builds the optimized wasm files are also 100kb smaller. --- .github/workflows/ci.yaml | 8 +++++--- README.md | 5 ++++- rust/Cargo.toml | 9 ++------- rust/automerge-c/CMakeLists.txt | 26 +++++++++++++++++++++---- rust/automerge-c/test/byte_span_tests.c | 1 + 5 files changed, 34 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0263f408..8519ac5e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -2,10 +2,10 @@ name: CI on: push: branches: - - main + - main pull_request: branches: - - main + - main jobs: fmt: runs-on: ubuntu-latest @@ -118,7 +118,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.67.0 + toolchain: nightly-2023-01-26 default: true - uses: Swatinem/rust-cache@v1 - name: Install CMocka @@ -127,6 +127,8 @@ jobs: uses: jwlawson/actions-setup-cmake@v1.12 with: cmake-version: latest + - name: Install rust-src + run: rustup component add rust-src - name: Build and test C bindings run: ./scripts/ci/cmake-build Release Static shell: bash diff --git a/README.md b/README.md index 76d48ddd..ad174da4 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,6 @@ to figure out how to use it. If you are looking to build rust applications which use automerge you may want to look into [autosurgeon](https://github.com/alexjg/autosurgeon) - ## Repository Organisation - `./rust` - the rust rust implementation and also the Rust components of @@ -119,6 +118,10 @@ yarn --cwd ./javascript # install rust dependencies cargo install wasm-bindgen-cli wasm-opt cargo-deny +# get nightly rust to produce optimized automerge-c builds +rustup toolchain install nightly +rustup component add rust-src --toolchain nightly + # add wasm target in addition to current architecture rustup target add wasm32-unknown-unknown diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 938100cf..5d29fc9f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -10,13 +10,8 @@ members = [ resolver = "2" [profile.release] -debug = true lto = true -opt-level = 3 +codegen-units = 1 [profile.bench] -debug = true - -[profile.release.package.automerge-wasm] -debug = false -opt-level = 3 +debug = true \ No newline at end of file diff --git a/rust/automerge-c/CMakeLists.txt b/rust/automerge-c/CMakeLists.txt index 056d111b..0c35eebd 100644 --- a/rust/automerge-c/CMakeLists.txt +++ b/rust/automerge-c/CMakeLists.txt @@ -43,19 +43,37 @@ endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) +# In order to build with -Z build-std, we need to pass target explicitly. +# https://doc.rust-lang.org/cargo/reference/unstable.html#build-std +execute_process ( + COMMAND rustc -vV + OUTPUT_VARIABLE RUSTC_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE +) +string(REGEX REPLACE ".*host: ([^ \n]*).*" "\\1" + CARGO_TARGET + ${RUSTC_VERSION} +) + if(BUILD_TYPE_LOWER STREQUAL debug) set(CARGO_BUILD_TYPE "debug") - set(CARGO_FLAG "") + set(CARGO_FLAG --target=${CARGO_TARGET}) else() set(CARGO_BUILD_TYPE "release") - set(CARGO_FLAG "--release") + if (NOT RUSTC_VERSION MATCHES "nightly") + set(RUSTUP_TOOLCHAIN nightly) + endif() + + set(RUSTFLAGS -C\ panic=abort) + + set(CARGO_FLAG -Z build-std=std,panic_abort --release --target=${CARGO_TARGET}) endif() set(CARGO_FEATURES "") -set(CARGO_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_BUILD_TYPE}") +set(CARGO_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_TARGET}/${CARGO_BUILD_TYPE}") set(BINDINGS_NAME "${LIBRARY_NAME}_core") @@ -90,7 +108,7 @@ add_custom_command( # configuration file has been updated. ${CMAKE_COMMAND} -DCONDITION=NOT_EXISTS -P ${CMAKE_SOURCE_DIR}/cmake/file-touch.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CMAKE_SOURCE_DIR}/cbindgen.toml COMMAND - ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} + ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN} RUSTFLAGS=${RUSTFLAGS} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} COMMAND # Compensate for cbindgen's translation of consecutive uppercase letters to "ScreamingSnakeCase". ${CMAKE_COMMAND} -DMATCH_REGEX=A_M\([^_]+\)_ -DREPLACE_EXPR=AM_\\1_ -P ${CMAKE_SOURCE_DIR}/cmake/file-regex-replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h diff --git a/rust/automerge-c/test/byte_span_tests.c b/rust/automerge-c/test/byte_span_tests.c index 43856f3b..0b1c86a1 100644 --- a/rust/automerge-c/test/byte_span_tests.c +++ b/rust/automerge-c/test/byte_span_tests.c @@ -3,6 +3,7 @@ #include #include #include +#include /* third-party */ #include From cb409b6ffe2cec15ce7724c291cf91d383b4c19b Mon Sep 17 00:00:00 2001 From: alexjg Date: Thu, 9 Mar 2023 18:10:23 +0000 Subject: [PATCH 72/72] docs: timestamp -> time in automerge.change examples (#548) --- javascript/src/stable.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/src/stable.ts b/javascript/src/stable.ts index 74410346..e83b127f 100644 --- a/javascript/src/stable.ts +++ b/javascript/src/stable.ts @@ -305,7 +305,7 @@ export function from>( * @example A change with a message and a timestamp * * ``` - * doc1 = automerge.change(doc1, {message: "add another value", timestamp: 1640995200}, d => { + * doc1 = automerge.change(doc1, {message: "add another value", time: 1640995200}, d => { * d.key2 = "value2" * }) * ``` @@ -316,7 +316,7 @@ export function from>( * let patchCallback = patch => { * patchedPath = patch.path * } - * doc1 = automerge.change(doc1, {message, "add another value", timestamp: 1640995200, patchCallback}, d => { + * doc1 = automerge.change(doc1, {message, "add another value", time: 1640995200, patchCallback}, d => { * d.key2 = "value2" * }) * assert.equal(patchedPath, ["key2"])