Compare commits
828 commits
rework-ids
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
cb409b6ffe | ||
|
b34b46fa16 | ||
|
7b747b8341 | ||
|
2c1970f664 | ||
|
63b761c0d1 | ||
|
44fa7ac416 | ||
|
8de2fa9bd4 | ||
|
407faefa6e | ||
|
1425af43cd | ||
|
c92d042c87 | ||
|
9271b20cf5 | ||
|
5e82dbc3c8 | ||
|
2cd7427f35 | ||
|
11f063cbfe | ||
|
a24d536d16 | ||
|
c5fde2802f | ||
|
13a775ed9a | ||
|
1e33c9d9e0 | ||
|
c3c04128f5 | ||
|
da55dfac7a | ||
|
9195e9cb76 | ||
|
f8d5a8ea98 | ||
|
2a9652e642 | ||
|
a6959e70e8 | ||
|
de5af2fffa | ||
|
08801ab580 | ||
|
89a0866272 | ||
|
9b6a3c8691 | ||
|
58a7a06b75 | ||
|
f428fe0169 | ||
|
931ee7e77b | ||
|
819767cc33 | ||
|
78adbc4ff9 | ||
|
1f7b109dcd | ||
|
98e755106f | ||
|
6b0ee6da2e | ||
|
9b44a75f69 | ||
|
d8baa116e7 | ||
|
5629a7bec4 | ||
|
964ae2bd81 | ||
|
d8df1707d9 | ||
|
681a3f1f3f | ||
|
22e9915fac | ||
|
2d8df12522 | ||
|
f073dbf701 | ||
|
5c02445bee | ||
|
3ef60747f4 | ||
|
d12bd3bb06 | ||
|
a0d698dc8e | ||
|
93a257896e | ||
|
9c3d0976c8 | ||
|
1ca1cc38ef | ||
|
0e7fb6cc10 | ||
|
d1220b9dd0 | ||
|
6c0d102032 | ||
|
5763210b07 | ||
|
18a3f61704 | ||
|
0306ade939 | ||
|
1e7dcdedec | ||
|
8a645bb193 | ||
|
4de0756bb4 | ||
|
d678280b57 | ||
|
f682db3039 | ||
|
6da93b6adc | ||
|
0f90fe4d02 | ||
|
8aff1296b9 | ||
|
6dad2b7df1 | ||
|
e75ca2a834 | ||
|
3229548fc7 | ||
|
a96f77c96b | ||
|
b78211ca65 | ||
|
1222fc0df1 | ||
|
2db9e78f2a | ||
|
b05c9e83a4 | ||
|
c3932e6267 | ||
|
becc301877 | ||
|
0ab6a770d8 | ||
|
2826f4f08c | ||
|
de16adbcc5 | ||
|
ea5688e418 | ||
|
149f870102 | ||
|
e0b2bc995a | ||
|
aaddb3c9ea | ||
|
2400d67755 | ||
|
d3885a3443 | ||
|
f8428896bd | ||
|
fb0c69cc52 | ||
|
edbb33522d | ||
|
625f48f33a | ||
|
7c9f927136 | ||
|
b60c310f5c | ||
|
3dd954d5b7 | ||
|
3e2e697504 | ||
|
a324b02005 | ||
|
d26cb0c0cb | ||
|
ed108ba6fc | ||
|
484a5bac4f | ||
|
01350c2b3f | ||
|
22d60987f6 | ||
|
bbf729e1d6 | ||
|
ca25ed0ca0 | ||
|
03b3da203d | ||
|
e713c35d21 | ||
|
92c044eadb | ||
|
a7656b999b | ||
|
05093071ce | ||
|
bcab3b6e47 | ||
|
b53584bec0 | ||
|
91f313bb83 | ||
|
6bbed76f0f | ||
|
bba4fe2c36 | ||
|
61aaa52718 | ||
|
20d543d28d | ||
|
5adb6952e9 | ||
|
3705212747 | ||
|
d7d2916acb | ||
|
3482e06b15 | ||
|
59289f67b1 | ||
|
a4a3dd9ed3 | ||
|
ac6eeb8711 | ||
|
20adff0071 | ||
|
6bb611e4b3 | ||
|
e8309495ce | ||
|
4755c5bf5e | ||
|
38205fbcc2 | ||
|
a2704bac4b | ||
|
ac90f8f028 | ||
|
c602e9e7ed | ||
|
1c6da6f9a3 | ||
|
24dcf8270a | ||
|
e189ec9ca8 | ||
|
96f15c6e00 | ||
|
8e131922e7 | ||
|
dd3c6d1303 | ||
|
5ce3a556a9 | ||
|
dd5edafa9d | ||
|
cd2997e63f | ||
|
f0f036eb89 | ||
|
ee0c3ef3ac | ||
|
e6d1828c12 | ||
|
4c17fd9c00 | ||
|
660678d038 | ||
|
a7a4bd42f1 | ||
|
352a0127c7 | ||
|
ed0da24020 | ||
|
3989cac405 | ||
|
2d072d81fb | ||
|
430d842343 | ||
|
dff0fc2b21 | ||
|
9e1fe65a64 | ||
|
3d5fe83e2b | ||
|
ba328992ff | ||
|
23a07699e2 | ||
|
238d05a0e3 | ||
|
7a6dfcc289 | ||
|
92145e6131 | ||
|
2012f5c6e4 | ||
|
fb4d1f4361 | ||
|
74af537800 | ||
|
29f2c9945e | ||
|
d6a8d41e0a | ||
|
b6c375efb9 | ||
|
16f2272b5b | ||
|
da51492327 | ||
|
577bda3e7f | ||
|
20dc0fb54e | ||
|
4f03cd2a37 | ||
|
7825da3ab9 | ||
|
8557ce0b69 | ||
|
a9e23308ce | ||
|
837c07b23a | ||
|
3d59e61cd6 | ||
|
e57548f6e2 | ||
|
c7e370a1df | ||
|
427002caf3 | ||
|
fc9cb17b34 | ||
|
f586c82557 | ||
|
649b75deb1 | ||
|
eba7038bd2 | ||
|
dd69f6f7b4 | ||
|
e295a55b41 | ||
|
c2ed212dbc | ||
|
1817e98ec9 | ||
|
a0eb4218d8 | ||
|
9879fd9342 | ||
|
59bde120ee | ||
|
22f720c465 | ||
|
e6cd366aa0 | ||
|
6d05cbd9e3 | ||
|
43bdd60904 | ||
|
363ad7d59a | ||
|
7da1832b52 | ||
|
5e37ebfed0 | ||
|
1ed67a7658 | ||
|
3ddde2fff2 | ||
|
b4705691c2 | ||
|
9ac8827219 | ||
|
9c86c09aaa | ||
|
632da04d60 | ||
|
8f2d4a494f | ||
|
db4cb52750 | ||
|
fc94d43e53 | ||
|
d53d107076 | ||
|
63dca26fe2 | ||
|
252a7eb8a5 | ||
|
34e919a4c8 | ||
|
fc7657bcc6 | ||
|
771733deac | ||
|
3a3df45b85 | ||
|
d28767e689 | ||
|
de997e2c50 | ||
|
782f351322 | ||
|
e1295b9daa | ||
|
d785c319b8 | ||
|
88f8976d0a | ||
|
56563a4a60 | ||
|
ff90327b52 | ||
|
ece66396e8 | ||
|
d1a926bcbe | ||
|
1a955e1f0d | ||
|
f89e9ad9cc | ||
|
bc28faee71 | ||
|
50981acc5a | ||
|
7ec17b26a9 | ||
|
825342cbb1 | ||
|
04d0175113 | ||
|
14bd8fbe97 | ||
|
d48e366272 | ||
|
4217019cbc | ||
|
eeb75f74f4 | ||
|
a22afdd70d | ||
|
5e8f4caed6 | ||
|
5c6f375f99 | ||
|
3a556c5991 | ||
|
1bc5fbb81e | ||
|
69de8187a5 | ||
|
877744d40b | ||
|
14b55c4a73 | ||
|
23fbb4917a | ||
|
877dbbfce8 | ||
|
a22bcb916b | ||
|
42ab1639db | ||
|
eba18d1ad6 | ||
|
ee68645f31 | ||
|
cc19a37f01 | ||
|
15c9adf965 | ||
|
52a558ee4d | ||
|
668b7b86ca | ||
|
d71a734e49 | ||
|
97575d3a90 | ||
|
359376b3db | ||
|
5452aa4e4d | ||
|
8c93d498b3 | ||
|
f14a61e581 | ||
|
65c478981c | ||
|
75fb4f0f0c | ||
|
be439892a4 | ||
|
6ea5982c16 | ||
|
0cd515526d | ||
|
246ed4afab | ||
|
0a86a4d92c | ||
|
1d3263c002 | ||
|
7e8cbf510a | ||
|
c49ba5ea98 | ||
|
fe4071316d | ||
|
1a6f56f7e6 | ||
|
d5ca0947c0 | ||
|
e5a8b67b11 | ||
|
aeb8db556c | ||
|
eb462cb228 | ||
|
0cbacaebb6 | ||
|
bf4988dcca | ||
|
770c064978 | ||
|
db0333fc5a | ||
|
7bdf726ce1 | ||
|
47c5277406 | ||
|
ea8bd32cc1 | ||
|
be130560f0 | ||
|
103d729bd1 | ||
|
7b30c84a4c | ||
|
39db64e5d9 | ||
|
32baae1a31 | ||
|
88073c0cf4 | ||
|
f5e9e3537d | ||
|
44b6709a60 | ||
|
1610f6d6a6 | ||
|
40b32566f4 | ||
|
3a4af9a719 | ||
|
400b8acdff | ||
|
2f37d194ba | ||
|
ceecef3b87 | ||
|
6de9ff620d | ||
|
84fa83a3f0 | ||
|
ac3709e670 | ||
|
71d8a7e717 | ||
|
bdedafa021 | ||
|
efa0a5624a | ||
|
4efe9a4f68 | ||
|
4f7843e007 | ||
|
30dd3da578 | ||
|
6668f79a6e | ||
|
0c9e77b644 | ||
|
d6bce697a5 | ||
|
22117f4997 | ||
|
b20d04b0f2 | ||
|
d5c07f22af | ||
|
bfa85050b8 | ||
|
1c78aab5f0 | ||
|
ad7dd07cf7 | ||
|
2e84c6e9ef | ||
|
0ecb9e7dce | ||
|
99ab5b4ed7 | ||
|
7439a49e37 | ||
|
7a9786a146 | ||
|
82fe420a10 | ||
|
7d2be219ac | ||
|
00ab853813 | ||
|
97ef4fe7cd | ||
|
5c1cbc8eeb | ||
|
cf264f3bf4 | ||
|
8222ec1705 | ||
|
74632a0512 | ||
|
7e1ae60bdc | ||
|
92d6fff22f | ||
|
92f3efd6e0 | ||
|
31fe8dbb36 | ||
|
d4d1b64cf4 | ||
|
92b1216101 | ||
|
1990f29c60 | ||
|
b38be0750b | ||
|
3866e9066f | ||
|
51554e7793 | ||
|
afddf7d508 | ||
|
ca383f03e4 | ||
|
de25e8f7c8 | ||
|
27dfa4ca27 | ||
|
9a0dd24714 | ||
|
8ce10dab69 | ||
|
fbdb5da508 | ||
|
cdcd5156db | ||
|
d08eeeed61 | ||
|
472b5dc348 | ||
|
846b96bc9a | ||
|
4cb7481a1b | ||
|
3c11946c16 | ||
|
c5d3d1b0a0 | ||
|
be3c7d6233 | ||
|
9213d43850 | ||
|
18ee9b71e0 | ||
|
a9912d4b9f | ||
|
d9bf29e8fd | ||
|
546b6ccbbd | ||
|
bb0b023c9a | ||
|
c3554199f3 | ||
|
e56fe64a18 | ||
|
007253d6ae | ||
|
e8f1f07f21 | ||
|
3ad979a178 | ||
|
fb0ea2c7a4 | ||
|
a31a65033f | ||
|
5765fea771 | ||
|
4bed03f008 | ||
|
210c6d2045 | ||
|
a569611d83 | ||
|
03a635a926 | ||
|
97a5144d59 | ||
|
03289510d6 | ||
|
b1712cb0c6 | ||
|
dae6509e13 | ||
|
587adf7418 | ||
|
df8cae8a2b | ||
|
3a44ccd52d | ||
|
07f5678a2b | ||
|
d638a41a6c | ||
|
bd35361354 | ||
|
d2fba6bf04 | ||
|
fd02585d2a | ||
|
515a2eb94b | ||
|
5e1bdb79ed | ||
|
1cf8f80ba4 | ||
|
226bbeb023 | ||
|
1eec70f116 | ||
|
4f898b67b3 | ||
|
551f6e1343 | ||
|
c353abfe4e | ||
|
f0abcf0605 | ||
|
2c1a71e143 | ||
|
8b1c3c73cd | ||
|
3a8e833187 | ||
|
1355a024a7 | ||
|
e5b527e17d | ||
|
4b344ac308 | ||
|
36857e0f6b | ||
|
b7c50e47b9 | ||
|
16f1304345 | ||
|
933bf5ee07 | ||
|
c2765885fd | ||
|
5e088ee9e0 | ||
|
1b34892585 | ||
|
0de37d292d | ||
|
b9a6b3129f | ||
|
11fbde47bb | ||
|
70021556c0 | ||
|
e8e42b2d16 | ||
|
6bce8bf4fd | ||
|
c7429abbf5 | ||
|
24fa61c11d | ||
|
d89669fcaa | ||
|
43c4ce76fb | ||
|
531e434bf6 | ||
|
e1f3ecfcf5 | ||
|
409189e36a | ||
|
81dd1a56eb | ||
|
7acb9ed0e2 | ||
|
d01e7ceb0e | ||
|
aa5a03a0c4 | ||
|
f6eca5eec6 | ||
|
b17c86e36e | ||
|
f373deba6b | ||
|
8f71ac30a4 | ||
|
4e431c00a1 | ||
|
004d1a0cf2 | ||
|
d6a6b34e99 | ||
|
fdd3880bd3 | ||
|
f0da2d2348 | ||
|
b56464c2e7 | ||
|
bb3d75604a | ||
|
eb3155e49b | ||
|
28a61f2dcd | ||
|
944e5d8001 | ||
|
7d5eaa0b7f | ||
|
5b15a04516 | ||
|
dc441a1a61 | ||
|
3f746a0dc3 | ||
|
c43f672924 | ||
|
fb8f3e5d4e | ||
|
54042bcf96 | ||
|
729752dac2 | ||
|
3cf990eabf | ||
|
069c33a13e | ||
|
58e0ce5efb | ||
|
c6e7f993fd | ||
|
30b220d9b7 | ||
|
bf6ee85c58 | ||
|
a728b8216b | ||
|
0aab13a990 | ||
|
3ec1127b50 | ||
|
291557a019 | ||
|
cc4b8399b1 | ||
|
bcdc8a2752 | ||
|
0d3eb07f3f | ||
|
7f4460f200 | ||
|
9e6044c128 | ||
|
6bf03e006c | ||
|
8baacb281b | ||
|
7de0cff2c9 | ||
|
c38b49609f | ||
|
db280c3d1d | ||
|
7dfe311aae | ||
|
bb4727ac34 | ||
|
bdacaa1703 | ||
|
a388ffbf19 | ||
|
ca8a2a0762 | ||
|
be33f91346 | ||
|
1f86a92ca1 | ||
|
8e6306b546 | ||
|
37e29e4473 | ||
|
adf8a5db12 | ||
|
ec446f4839 | ||
|
1e504de6ea | ||
|
67da930a40 | ||
|
9788cd881d | ||
|
af951f324a | ||
|
48e397e82f | ||
|
e7a8718434 | ||
|
5b0ce54229 | ||
|
64c575fa85 | ||
|
a033ffa02b | ||
|
afb1957d19 | ||
|
78ef6e3a2d | ||
|
e3864e8fbd | ||
|
23786bc746 | ||
|
64363d7da2 | ||
|
070608ddf2 | ||
|
4f187859e7 | ||
|
e41c5ae021 | ||
|
e36f3c27c9 | ||
|
1bee30c784 | ||
|
9152c8366b | ||
|
d099d553cc | ||
|
1fc5e551bd | ||
|
d667552a98 | ||
|
bfe7378968 | ||
|
bc01267425 | ||
|
dad2fd4928 | ||
|
5128d1926d | ||
|
aaa2f7489b | ||
|
8005f31a95 | ||
|
439b9104d6 | ||
|
e65200b150 | ||
|
77eb094aea | ||
|
aa3c32cea3 | ||
|
76a19185b7 | ||
|
e1283e781d | ||
|
702a0ec172 | ||
|
b6fd7ac26e | ||
|
d4a904414d | ||
|
696adb5005 | ||
|
9d7798a8c4 | ||
|
6872e3fa9b | ||
|
96d5fc7e60 | ||
|
757f1f058a | ||
|
c66d8a5b54 | ||
|
ab09a7aa5d | ||
|
5923d67bea | ||
|
a65838076d | ||
|
122b227101 | ||
|
fb3b740a57 | ||
|
cdfc2d056f | ||
|
d1407480d2 | ||
|
93870d4127 | ||
|
99dc6e2314 | ||
|
a791714f74 | ||
|
965240d8f6 | ||
|
09259e5f68 | ||
|
5555d50693 | ||
|
07553195fa | ||
|
679b3d20ce | ||
|
bcf191bea3 | ||
|
89eb598858 | ||
|
baa56b0b57 | ||
|
decd03a5d7 | ||
|
1ca49cfa9b | ||
|
4406a5b208 | ||
|
609234bb9d | ||
|
69f51b77f4 | ||
|
94a122478d | ||
|
1bbcd4c151 | ||
|
80ce447d72 | ||
|
e4e9e9a691 | ||
|
842797f3aa | ||
|
9ca4792424 | ||
|
37d90c5b8e | ||
|
9f3ae61b91 | ||
|
f5d858df82 | ||
|
6d9ed5cde4 | ||
|
88bd14c07e | ||
|
06d2306d54 | ||
|
cc8134047a | ||
|
e9adc32486 | ||
|
a88d49cf45 | ||
|
ebb73738da | ||
|
bd2f252e0b | ||
|
9e71736b88 | ||
|
12a4987ce7 | ||
|
aeadedd584 | ||
|
dcc6c68485 | ||
|
0f2bd3fb27 | ||
|
9fe8447d21 | ||
|
d65280518d | ||
|
53f6904ae5 | ||
|
330aebb44a | ||
|
17acab25b5 | ||
|
0d83f5f595 | ||
|
777a516051 | ||
|
4edb034a64 | ||
|
3737ad316b | ||
|
051a0bbb54 | ||
|
83c08344e7 | ||
|
d8c126d1bc | ||
|
545807cf74 | ||
|
fa2971a29a | ||
|
a2d4b2a778 | ||
|
48ce85dbfb | ||
|
8f4562b2cb | ||
|
b54075fe4d | ||
|
6494945a42 | ||
|
d331ceb6d4 | ||
|
5cbc977076 | ||
|
632857a4e6 | ||
|
1a66dc7ab1 | ||
|
790423c7ae | ||
|
3631ddfd55 | ||
|
0c16dfe2aa | ||
|
35ddda5e0f | ||
|
0e457d5891 | ||
|
12f070ce45 | ||
|
a69643c9cc | ||
|
1c4dc88de3 | ||
|
ab580df947 | ||
|
2dcbfbf27d | ||
|
f83fb5ec61 | ||
|
ab4dc331ac | ||
|
a9eddd88cc | ||
|
975338900c | ||
|
361db06eb5 | ||
|
ba177c3d83 | ||
|
fa0a8953dc | ||
|
cf508a94a9 | ||
|
289dd95196 | ||
|
c908979372 | ||
|
7025bb6541 | ||
|
145969152a | ||
|
94ff10f690 | ||
|
26efee509d | ||
|
3039efca9b | ||
|
a989e294f8 | ||
|
3c294d8fca | ||
|
0af471a1a1 | ||
|
0da8ceddce | ||
|
be8f367d07 | ||
|
93082ad6a9 | ||
|
fb586455dd | ||
|
5d9880e1e1 | ||
|
f002e7261b | ||
|
636fe75647 | ||
|
1c6032bee0 | ||
|
fb6f2787b2 | ||
|
ece1e22283 | ||
|
8f201562c3 | ||
|
a19aae484c | ||
|
b280138f84 | ||
|
1b5730c0ae | ||
|
49c4bf4911 | ||
|
a30bdc3888 | ||
|
e945ebbe74 | ||
|
20229ee2d0 | ||
|
83d298ce8d | ||
|
192356c099 | ||
|
666782896d | ||
|
edbfce056c | ||
|
9cb52d127f | ||
|
ed244d980a | ||
|
ec3785ab2b | ||
|
f5e8b998ca | ||
|
9e1a063bc0 | ||
|
a4e8d20266 | ||
|
ac18f7116f | ||
|
67251f4d53 | ||
|
2e49561ab2 | ||
|
927c867884 | ||
|
288b4674a0 | ||
|
488df55385 | ||
|
a2cb15e936 | ||
|
4b52c2053e | ||
|
4fa1d056c6 | ||
|
ed232fae72 | ||
|
4ff6dca175 | ||
|
ee116bb5d7 | ||
|
c51073c150 | ||
|
0fca6a48ee | ||
|
5b2582bc04 | ||
|
42233414b3 | ||
|
0d7f52d21f | ||
|
d3b97a3cbb | ||
|
f230be8aec | ||
|
e4d85f47a3 | ||
|
266f112e91 | ||
|
e26837b09d | ||
|
d00cee1637 | ||
|
3cff67002a | ||
|
ebe7bae992 | ||
|
875bfdd7f2 | ||
|
5f200e3bf5 | ||
|
a9737a6815 | ||
|
73ac96b7a2 | ||
|
4b32ee882a | ||
|
beae33402a | ||
|
95f27f362c | ||
|
6b505419b6 | ||
|
b9acf611fa | ||
|
390ae49be0 | ||
|
b79da38dea | ||
|
cd5e734735 | ||
|
a4432bdc3d | ||
|
000576191e | ||
|
d71e87882e | ||
|
c406742760 | ||
|
2f3fe0e342 | ||
|
555f4c6b98 | ||
|
535d2eb92f | ||
|
2ebb3fea6f | ||
|
e1aeb4fd88 | ||
|
4fe7df3d0e | ||
|
93a20f302d | ||
|
f8cffa3deb | ||
|
b6c9d90d84 | ||
|
338dc1bece | ||
|
79d493ddd2 | ||
|
e42adaf84b | ||
|
9406bf09ea | ||
|
1a6abddb50 | ||
|
affb85b0b4 | ||
|
9a89db3f91 | ||
|
9d01406e13 | ||
|
967b467aa6 | ||
|
c0070e081d | ||
|
4fbecf86af | ||
|
76ff910e06 | ||
|
c46e6e6321 | ||
|
7cf9faf7da | ||
|
9ae988e754 | ||
|
51f1c05545 | ||
|
b323f988f9 | ||
|
682b8007b9 | ||
|
0f71b48857 | ||
|
0141bcdc8f | ||
|
0b9d14edc4 | ||
|
f6f6b5181d | ||
|
712697cff0 | ||
|
8f11825003 | ||
|
8f2877a67c | ||
|
06241336fe | ||
|
52eb193950 | ||
|
30e0748c15 | ||
|
8eea9d7c0b | ||
|
2747d5bf2b | ||
|
93e0156c87 | ||
|
dfd3d27d44 | ||
|
d2e33867f6 | ||
|
57cf8200ac | ||
|
7a930db44d | ||
|
cffadafbd0 | ||
|
96488a2774 | ||
|
dfb21ea8d6 | ||
|
d80a9c6746 | ||
|
f8af94b317 | ||
|
6f2536c232 | ||
|
4ff456cdcc | ||
|
989310866f | ||
|
f51e44c211 | ||
|
a726cf33c7 | ||
|
7439593caf | ||
|
337fabe5a9 | ||
|
06302e4a17 | ||
|
2fc0705907 | ||
|
b96aa168b4 | ||
|
8d24c9e4c3 | ||
|
4a6b91adb2 | ||
|
6b4393c0b3 | ||
|
355cbdd251 | ||
|
3493dbd74a | ||
|
cbd3406f8d | ||
|
66f8c73dba | ||
|
50a1b4f99c | ||
|
f8c9343a45 | ||
|
59e36cebe4 | ||
|
62c71845cd | ||
|
e970854042 | ||
|
2f49a82eea | ||
|
ea826b70f4 | ||
|
7cbd6effb7 | ||
|
d7da7267d9 | ||
|
735a4ab84c | ||
|
ef938fdf0a | ||
|
b6e0da28d8 | ||
|
c8c695618b | ||
|
d1b0d41239 | ||
|
9136f00e43 | ||
|
b53305cf7f | ||
|
98a65f98f7 | ||
|
c655427f9a | ||
|
1aab66d160 | ||
|
a9ddb9398c | ||
|
3f82850e44 | ||
|
c54aab66c4 | ||
|
70c5fea968 | ||
|
df435b671f | ||
|
7607ebbfcc | ||
|
bf184fe980 | ||
|
2019943849 | ||
|
0f49608dde | ||
|
1d0c54ca9a | ||
|
ee80837feb | ||
|
da73607c98 | ||
|
e88f673d63 | ||
|
5b9360155c | ||
|
17e6a9a955 | ||
|
1269a8951e | ||
|
836e6ba510 | ||
|
a9dec7aa0b | ||
|
7b32faa238 | ||
|
c49bf55ea4 | ||
|
d3f4be0654 | ||
|
831faa2589 | ||
|
4c84ccba06 | ||
|
bfc051f4fb | ||
|
a2e433348a | ||
|
b794f4803d | ||
|
e679c4f6a0 | ||
|
a59ffebd64 | ||
|
e85f47b1f4 | ||
|
2990f33803 | ||
|
9ff0c60ccb | ||
|
cfa1067c19 | ||
|
3393a60e59 | ||
|
7b3db2f15a | ||
|
54fec3e438 | ||
|
0388c46480 | ||
|
429426a693 | ||
|
2015428452 | ||
|
812c7df3a7 | ||
|
5867c8d131 | ||
|
0ccf36fe49 | ||
|
a12af10ee1 | ||
|
faf3e2cae4 | ||
|
8b2f0238f3 | ||
|
acbf394290 | ||
|
b30a2b9cc1 | ||
|
d50062b769 | ||
|
e59d24f68b | ||
|
642a7ce316 | ||
|
067df1f894 | ||
|
fdab61e213 | ||
|
557bfe1cc9 | ||
|
a2e6778730 | ||
|
04c7e9184d | ||
|
b67098d5e1 | ||
|
45ee5ddbd9 | ||
|
d2a7cc5f75 | ||
|
1f0a1e4071 | ||
|
ef89520d7c | ||
|
96a8357e36 | ||
|
4c4484b897 | ||
|
dc8140cb0b | ||
|
3046cbab35 | ||
|
de5332af05 | ||
|
6932bdff08 |
2
.github/workflows/advisory-cron.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: ci
|
||||
name: Advisories
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 18 * * *'
|
||||
|
|
99
.github/workflows/ci.yaml
vendored
|
@ -1,5 +1,11 @@
|
|||
name: ci
|
||||
on: [push, pull_request]
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -8,7 +14,8 @@ jobs:
|
|||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
default: true
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/fmt
|
||||
|
@ -21,7 +28,8 @@ jobs:
|
|||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
default: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/lint
|
||||
|
@ -34,9 +42,14 @@ jobs:
|
|||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
default: true
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/docs
|
||||
- name: Build rust docs
|
||||
run: ./scripts/ci/rust-docs
|
||||
shell: bash
|
||||
- name: Install doxygen
|
||||
run: sudo apt-get install -y doxygen
|
||||
shell: bash
|
||||
|
||||
cargo-deny:
|
||||
|
@ -51,31 +64,88 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- uses: EmbarkStudios/cargo-deny-action@v1
|
||||
with:
|
||||
arguments: '--manifest-path ./rust/Cargo.toml'
|
||||
command: check ${{ matrix.checks }}
|
||||
|
||||
wasm_tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install wasm-bindgen-cli
|
||||
run: cargo install wasm-bindgen-cli wasm-opt
|
||||
- name: Install wasm32 target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: run tests
|
||||
run: ./scripts/ci/wasm_tests
|
||||
deno_tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: denoland/setup-deno@v1
|
||||
with:
|
||||
deno-version: v1.x
|
||||
- name: Install wasm-bindgen-cli
|
||||
run: cargo install wasm-bindgen-cli wasm-opt
|
||||
- name: Install wasm32 target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: run tests
|
||||
run: ./scripts/ci/deno_tests
|
||||
|
||||
js_fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: install
|
||||
run: yarn global add prettier
|
||||
- name: format
|
||||
run: prettier -c javascript/.prettierrc javascript
|
||||
|
||||
js_tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install wasm-pack
|
||||
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
- name: Install wasm-bindgen-cli
|
||||
run: cargo install wasm-bindgen-cli wasm-opt
|
||||
- name: Install wasm32 target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: run tests
|
||||
run: ./scripts/ci/js_tests
|
||||
|
||||
cmake_build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly-2023-01-26
|
||||
default: true
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- name: Install CMocka
|
||||
run: sudo apt-get install -y libcmocka-dev
|
||||
- name: Install/update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
with:
|
||||
cmake-version: latest
|
||||
- name: Install rust-src
|
||||
run: rustup component add rust-src
|
||||
- name: Build and test C bindings
|
||||
run: ./scripts/ci/cmake-build Release Static
|
||||
shell: bash
|
||||
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
toolchain:
|
||||
- stable
|
||||
- nightly
|
||||
continue-on-error: ${{ matrix.toolchain == 'nightly' }}
|
||||
- 1.67.0
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{ matrix.toolchain }}
|
||||
default: true
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/build-test
|
||||
shell: bash
|
||||
|
@ -87,7 +157,8 @@ jobs:
|
|||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
default: true
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/build-test
|
||||
shell: bash
|
||||
|
@ -99,8 +170,8 @@ jobs:
|
|||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
toolchain: 1.67.0
|
||||
default: true
|
||||
- uses: Swatinem/rust-cache@v1
|
||||
- run: ./scripts/ci/build-test
|
||||
shell: bash
|
||||
|
||||
|
|
52
.github/workflows/docs.yaml
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
name: Documentation
|
||||
|
||||
jobs:
|
||||
deploy-docs:
|
||||
concurrency: deploy-docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Cache
|
||||
uses: Swatinem/rust-cache@v1
|
||||
|
||||
- name: Clean docs dir
|
||||
run: rm -rf docs
|
||||
shell: bash
|
||||
|
||||
- name: Clean Rust docs dir
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clean
|
||||
args: --manifest-path ./rust/Cargo.toml --doc
|
||||
|
||||
- name: Build Rust docs
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: doc
|
||||
args: --manifest-path ./rust/Cargo.toml --workspace --all-features --no-deps
|
||||
|
||||
- name: Move Rust docs
|
||||
run: mkdir -p docs && mv rust/target/doc/* docs/.
|
||||
shell: bash
|
||||
|
||||
- name: Configure root page
|
||||
run: echo '<meta http-equiv="refresh" content="0; url=automerge">' > docs/index.html
|
||||
|
||||
- name: Deploy docs
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./docs
|
214
.github/workflows/release.yaml
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
name: Release
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check_if_wasm_version_upgraded:
|
||||
name: Check if WASM version has been upgraded
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
wasm_version: ${{ steps.version-updated.outputs.current-package-version }}
|
||||
wasm_has_updated: ${{ steps.version-updated.outputs.has-updated }}
|
||||
steps:
|
||||
- uses: JiPaix/package-json-updated-action@v1.0.5
|
||||
id: version-updated
|
||||
with:
|
||||
path: rust/automerge-wasm/package.json
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish-wasm:
|
||||
name: Publish WASM package
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_if_wasm_version_upgraded
|
||||
# We create release only if the version in the package.json has been upgraded
|
||||
if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true'
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '16.x'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
- uses: denoland/setup-deno@v1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }}
|
||||
- name: Get rid of local github workflows
|
||||
run: rm -r .github/workflows
|
||||
- name: Remove tmp_branch if it exists
|
||||
run: git push origin :tmp_branch || true
|
||||
- run: git checkout -b tmp_branch
|
||||
- name: Install wasm-bindgen-cli
|
||||
run: cargo install wasm-bindgen-cli wasm-opt
|
||||
- name: Install wasm32 target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: run wasm js tests
|
||||
id: wasm_js_tests
|
||||
run: ./scripts/ci/wasm_tests
|
||||
- name: run wasm deno tests
|
||||
id: wasm_deno_tests
|
||||
run: ./scripts/ci/deno_tests
|
||||
- name: build release
|
||||
id: build_release
|
||||
run: |
|
||||
npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm run release
|
||||
- name: Collate deno release files
|
||||
if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success'
|
||||
run: |
|
||||
mkdir $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
cp $GITHUB_WORKSPACE/rust/automerge-wasm/deno/* $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
cp $GITHUB_WORKSPACE/rust/automerge-wasm/index.d.ts $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
cp $GITHUB_WORKSPACE/rust/automerge-wasm/README.md $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
cp $GITHUB_WORKSPACE/rust/automerge-wasm/LICENSE $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
sed -i '1i /// <reference types="./index.d.ts" />' $GITHUB_WORKSPACE/deno_wasm_dist/automerge_wasm.js
|
||||
- name: Create npm release
|
||||
if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success'
|
||||
run: |
|
||||
if [ "$(npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm show . version)" = "$VERSION" ]; then
|
||||
echo "This version is already published"
|
||||
exit 0
|
||||
fi
|
||||
EXTRA_ARGS="--access public"
|
||||
if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then
|
||||
echo "Is pre-release version"
|
||||
EXTRA_ARGS="$EXTRA_ARGS --tag next"
|
||||
fi
|
||||
if [ "$NODE_AUTH_TOKEN" = "" ]; then
|
||||
echo "Can't publish on NPM, You need a NPM_TOKEN secret."
|
||||
false
|
||||
fi
|
||||
npm publish $GITHUB_WORKSPACE/rust/automerge-wasm $EXTRA_ARGS
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
|
||||
VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }}
|
||||
- name: Commit wasm deno release files
|
||||
run: |
|
||||
git config --global user.name "actions"
|
||||
git config --global user.email actions@github.com
|
||||
git add $GITHUB_WORKSPACE/deno_wasm_dist
|
||||
git commit -am "Add deno release files"
|
||||
git push origin tmp_branch
|
||||
- name: Tag wasm release
|
||||
if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success'
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: Automerge Wasm v${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }}
|
||||
tag_name: js/automerge-wasm-${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }}
|
||||
target_commitish: tmp_branch
|
||||
generate_release_notes: false
|
||||
draft: false
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Remove tmp_branch
|
||||
run: git push origin :tmp_branch
|
||||
check_if_js_version_upgraded:
|
||||
name: Check if JS version has been upgraded
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
js_version: ${{ steps.version-updated.outputs.current-package-version }}
|
||||
js_has_updated: ${{ steps.version-updated.outputs.has-updated }}
|
||||
steps:
|
||||
- uses: JiPaix/package-json-updated-action@v1.0.5
|
||||
id: version-updated
|
||||
with:
|
||||
path: javascript/package.json
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish-js:
|
||||
name: Publish JS package
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_if_js_version_upgraded
|
||||
- check_if_wasm_version_upgraded
|
||||
- publish-wasm
|
||||
# We create release only if the version in the package.json has been upgraded and after the WASM release
|
||||
if: |
|
||||
(always() && ! cancelled()) &&
|
||||
(needs.publish-wasm.result == 'success' || needs.publish-wasm.result == 'skipped') &&
|
||||
needs.check_if_js_version_upgraded.outputs.js_has_updated == 'true'
|
||||
steps:
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '16.x'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
- uses: denoland/setup-deno@v1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }}
|
||||
- name: Get rid of local github workflows
|
||||
run: rm -r .github/workflows
|
||||
- name: Remove js_tmp_branch if it exists
|
||||
run: git push origin :js_tmp_branch || true
|
||||
- run: git checkout -b js_tmp_branch
|
||||
- name: check js formatting
|
||||
run: |
|
||||
yarn global add prettier
|
||||
prettier -c javascript/.prettierrc javascript
|
||||
- name: run js tests
|
||||
id: js_tests
|
||||
run: |
|
||||
cargo install wasm-bindgen-cli wasm-opt
|
||||
rustup target add wasm32-unknown-unknown
|
||||
./scripts/ci/js_tests
|
||||
- name: build js release
|
||||
id: build_release
|
||||
run: |
|
||||
npm --prefix $GITHUB_WORKSPACE/javascript run build
|
||||
- name: build js deno release
|
||||
id: build_deno_release
|
||||
run: |
|
||||
VERSION=$WASM_VERSION npm --prefix $GITHUB_WORKSPACE/javascript run deno:build
|
||||
env:
|
||||
WASM_VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }}
|
||||
- name: run deno tests
|
||||
id: deno_tests
|
||||
run: |
|
||||
npm --prefix $GITHUB_WORKSPACE/javascript run deno:test
|
||||
- name: Collate deno release files
|
||||
if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success'
|
||||
run: |
|
||||
mkdir $GITHUB_WORKSPACE/deno_js_dist
|
||||
cp $GITHUB_WORKSPACE/javascript/deno_dist/* $GITHUB_WORKSPACE/deno_js_dist
|
||||
- name: Create npm release
|
||||
if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success'
|
||||
run: |
|
||||
if [ "$(npm --prefix $GITHUB_WORKSPACE/javascript show . version)" = "$VERSION" ]; then
|
||||
echo "This version is already published"
|
||||
exit 0
|
||||
fi
|
||||
EXTRA_ARGS="--access public"
|
||||
if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then
|
||||
echo "Is pre-release version"
|
||||
EXTRA_ARGS="$EXTRA_ARGS --tag next"
|
||||
fi
|
||||
if [ "$NODE_AUTH_TOKEN" = "" ]; then
|
||||
echo "Can't publish on NPM, You need a NPM_TOKEN secret."
|
||||
false
|
||||
fi
|
||||
npm publish $GITHUB_WORKSPACE/javascript $EXTRA_ARGS
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
|
||||
VERSION: ${{ needs.check_if_js_version_upgraded.outputs.js_version }}
|
||||
- name: Commit js deno release files
|
||||
run: |
|
||||
git config --global user.name "actions"
|
||||
git config --global user.email actions@github.com
|
||||
git add $GITHUB_WORKSPACE/deno_js_dist
|
||||
git commit -am "Add deno js release files"
|
||||
git push origin js_tmp_branch
|
||||
- name: Tag JS release
|
||||
if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success'
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: Automerge v${{ needs.check_if_js_version_upgraded.outputs.js_version }}
|
||||
tag_name: js/automerge-${{ needs.check_if_js_version_upgraded.outputs.js_version }}
|
||||
target_commitish: js_tmp_branch
|
||||
generate_release_notes: false
|
||||
draft: false
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Remove js_tmp_branch
|
||||
run: git push origin :js_tmp_branch
|
4
.gitignore
vendored
|
@ -1,4 +1,6 @@
|
|||
/target
|
||||
/.direnv
|
||||
perf.*
|
||||
/Cargo.lock
|
||||
build/
|
||||
.vim/*
|
||||
/target
|
||||
|
|
13
Makefile
|
@ -1,13 +0,0 @@
|
|||
rust:
|
||||
cd automerge && cargo test
|
||||
|
||||
wasm:
|
||||
cd automerge-wasm && yarn
|
||||
cd automerge-wasm && yarn build
|
||||
cd automerge-wasm && yarn test
|
||||
cd automerge-wasm && yarn link
|
||||
|
||||
js: wasm
|
||||
cd automerge-js && yarn
|
||||
cd automerge-js && yarn link "automerge-wasm"
|
||||
cd automerge-js && yarn test
|
188
README.md
|
@ -1,81 +1,147 @@
|
|||
# Automerge - NEXT
|
||||
# Automerge
|
||||
|
||||
This is pretty much a ground up rewrite of automerge-rs. The objective of this
|
||||
rewrite is to radically simplify the API. The end goal being to produce a library
|
||||
which is easy to work with both in Rust and from FFI.
|
||||
<img src='./img/sign.svg' width='500' alt='Automerge logo' />
|
||||
|
||||
## How?
|
||||
[](https://automerge.org/)
|
||||
[](https://automerge.org/automerge-rs/automerge/)
|
||||
[](https://github.com/automerge/automerge-rs/actions/workflows/ci.yaml)
|
||||
[](https://github.com/automerge/automerge-rs/actions/workflows/docs.yaml)
|
||||
|
||||
The current iteration of automerge-rs is complicated to work with because it
|
||||
adopts the frontend/backend split architecture of the JS implementation. This
|
||||
architecture was necessary due to basic operations on the automerge opset being
|
||||
too slow to perform on the UI thread. Recently @orionz has been able to improve
|
||||
the performance to the point where the split is no longer necessary. This means
|
||||
we can adopt a much simpler mutable API.
|
||||
Automerge is a library which provides fast implementations of several different
|
||||
CRDTs, a compact compression format for these CRDTs, and a sync protocol for
|
||||
efficiently transmitting those changes over the network. The objective of the
|
||||
project is to support [local-first](https://www.inkandswitch.com/local-first/) applications in the same way that relational
|
||||
databases support server applications - by providing mechanisms for persistence
|
||||
which allow application developers to avoid thinking about hard distributed
|
||||
computing problems. Automerge aims to be PostgreSQL for your local-first app.
|
||||
|
||||
The architecture is now built around the `OpTree`. This is a data structure
|
||||
which supports efficiently inserting new operations and realising values of
|
||||
existing operations. Most interactions with the `OpTree` are in the form of
|
||||
implementations of `TreeQuery` - a trait which can be used to traverse the
|
||||
optree and producing state of some kind. User facing operations are exposed on
|
||||
an `Automerge` object, under the covers these operations typically instantiate
|
||||
some `TreeQuery` and run it over the `OpTree`.
|
||||
If you're looking for documentation on the JavaScript implementation take a look
|
||||
at https://automerge.org/docs/hello/. There are other implementations in both
|
||||
Rust and C, but they are earlier and don't have documentation yet. You can find
|
||||
them in `rust/automerge` and `rust/automerge-c` if you are comfortable
|
||||
reading the code and tests to figure out how to use them.
|
||||
|
||||
If you're familiar with CRDTs and interested in the design of Automerge in
|
||||
particular take a look at https://automerge.org/docs/how-it-works/backend/
|
||||
|
||||
Finally, if you want to talk to us about this project please [join the
|
||||
Slack](https://join.slack.com/t/automerge/shared_invite/zt-e4p3760n-kKh7r3KRH1YwwNfiZM8ktw)
|
||||
|
||||
## Status
|
||||
|
||||
We have working code which passes all of the tests in the JS test suite. We're
|
||||
now working on writing a bunch more tests and cleaning up the API.
|
||||
This project is formed of a core Rust implementation which is exposed via FFI in
|
||||
javascript+WASM, C, and soon other languages. Alex
|
||||
([@alexjg](https://github.com/alexjg/)]) is working full time on maintaining
|
||||
automerge, other members of Ink and Switch are also contributing time and there
|
||||
are several other maintainers. The focus is currently on shipping the new JS
|
||||
package. We expect to be iterating the API and adding new features over the next
|
||||
six months so there will likely be several major version bumps in all packages
|
||||
in that time.
|
||||
|
||||
## Development
|
||||
In general we try and respect semver.
|
||||
|
||||
### Running CI
|
||||
### JavaScript
|
||||
|
||||
The steps CI will run are all defined in `./scripts/ci`. Obviously CI will run
|
||||
everything when you submit a PR, but if you want to run everything locally
|
||||
before you push you can run `./scripts/ci/run` to run everything.
|
||||
A stable release of the javascript package is currently available as
|
||||
`@automerge/automerge@2.0.0` where. pre-release verisions of the `2.0.1` are
|
||||
available as `2.0.1-alpha.n`. `2.0.1*` packages are also available for Deno at
|
||||
https://deno.land/x/automerge
|
||||
|
||||
### Running the JS tests
|
||||
### Rust
|
||||
|
||||
You will need to have [node](https://nodejs.org/en/), [yarn](https://yarnpkg.com/getting-started/install), [rust](https://rustup.rs/) and [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/) installed.
|
||||
The rust codebase is currently oriented around producing a performant backend
|
||||
for the Javascript wrapper and as such the API for Rust code is low level and
|
||||
not well documented. We will be returning to this over the next few months but
|
||||
for now you will need to be comfortable reading the tests and asking questions
|
||||
to figure out how to use it. If you are looking to build rust applications which
|
||||
use automerge you may want to look into
|
||||
[autosurgeon](https://github.com/alexjg/autosurgeon)
|
||||
|
||||
To build and test the rust library:
|
||||
## Repository Organisation
|
||||
|
||||
```shell
|
||||
$ cd automerge
|
||||
$ cargo test
|
||||
- `./rust` - the rust rust implementation and also the Rust components of
|
||||
platform specific wrappers (e.g. `automerge-wasm` for the WASM API or
|
||||
`automerge-c` for the C FFI bindings)
|
||||
- `./javascript` - The javascript library which uses `automerge-wasm`
|
||||
internally but presents a more idiomatic javascript interface
|
||||
- `./scripts` - scripts which are useful to maintenance of the repository.
|
||||
This includes the scripts which are run in CI.
|
||||
- `./img` - static assets for use in `.md` files
|
||||
|
||||
## Building
|
||||
|
||||
To build this codebase you will need:
|
||||
|
||||
- `rust`
|
||||
- `node`
|
||||
- `yarn`
|
||||
- `cmake`
|
||||
- `cmocka`
|
||||
|
||||
You will also need to install the following with `cargo install`
|
||||
|
||||
- `wasm-bindgen-cli`
|
||||
- `wasm-opt`
|
||||
- `cargo-deny`
|
||||
|
||||
And ensure you have added the `wasm32-unknown-unknown` target for rust cross-compilation.
|
||||
|
||||
The various subprojects (the rust code, the wrapper projects) have their own
|
||||
build instructions, but to run the tests that will be run in CI you can run
|
||||
`./scripts/ci/run`.
|
||||
|
||||
### For macOS
|
||||
|
||||
These instructions worked to build locally on macOS 13.1 (arm64) as of
|
||||
Nov 29th 2022.
|
||||
|
||||
```bash
|
||||
# clone the repo
|
||||
git clone https://github.com/automerge/automerge-rs
|
||||
cd automerge-rs
|
||||
|
||||
# install rustup
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# install homebrew
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
|
||||
# install cmake, node, cmocka
|
||||
brew install cmake node cmocka
|
||||
|
||||
# install yarn
|
||||
npm install --global yarn
|
||||
|
||||
# install javascript dependencies
|
||||
yarn --cwd ./javascript
|
||||
|
||||
# install rust dependencies
|
||||
cargo install wasm-bindgen-cli wasm-opt cargo-deny
|
||||
|
||||
# get nightly rust to produce optimized automerge-c builds
|
||||
rustup toolchain install nightly
|
||||
rustup component add rust-src --toolchain nightly
|
||||
|
||||
# add wasm target in addition to current architecture
|
||||
rustup target add wasm32-unknown-unknown
|
||||
|
||||
# Run ci script
|
||||
./scripts/ci/run
|
||||
```
|
||||
|
||||
To build and test the wasm library:
|
||||
If your build fails to find `cmocka.h` you may need to teach it about homebrew's
|
||||
installation location:
|
||||
|
||||
```shell
|
||||
## setup
|
||||
$ cd automerge-wasm
|
||||
$ yarn
|
||||
|
||||
## building or testing
|
||||
$ yarn build
|
||||
$ yarn test
|
||||
|
||||
## without this the js library wont automatically use changes
|
||||
$ yarn link
|
||||
|
||||
## cutting a release or doing benchmarking
|
||||
$ yarn release
|
||||
$ yarn opt ## or set `wasm-opt = false` in Cargo.toml on supported platforms (not arm64 osx)
|
||||
```
|
||||
export CPATH=/opt/homebrew/include
|
||||
export LIBRARY_PATH=/opt/homebrew/lib
|
||||
./scripts/ci/run
|
||||
```
|
||||
|
||||
And finally to test the js library. This is where most of the tests reside.
|
||||
## Contributing
|
||||
|
||||
```shell
|
||||
## setup
|
||||
$ cd automerge-js
|
||||
$ yarn
|
||||
$ yarn link "automerge-wasm"
|
||||
|
||||
## testing
|
||||
$ yarn test
|
||||
```
|
||||
|
||||
## Benchmarking
|
||||
|
||||
The `edit-trace` folder has the main code for running the edit trace benchmarking.
|
||||
Please try and split your changes up into relatively independent commits which
|
||||
change one subsystem at a time and add good commit messages which describe what
|
||||
the change is and why you're making it (err on the side of longer commit
|
||||
messages). `git blame` should give future maintainers a good idea of why
|
||||
something is the way it is.
|
||||
|
|
20
TODO.md
|
@ -1,20 +0,0 @@
|
|||
|
||||
### next steps:
|
||||
1. C API
|
||||
|
||||
### ergronomics:
|
||||
1. value() -> () or something that into's a value
|
||||
|
||||
### automerge:
|
||||
1. single pass (fast) load
|
||||
2. micro-patches / bare bones observation API / fully hydrated documents
|
||||
|
||||
### sync
|
||||
1. get all sync tests passing
|
||||
|
||||
### maybe:
|
||||
1. tables
|
||||
|
||||
### no:
|
||||
1. cursors
|
||||
|
|
@ -1 +0,0 @@
|
|||
todo
|
2
automerge-wasm/automerge-js/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
/node_modules
|
||||
/yarn.lock
|
|
@ -1,18 +0,0 @@
|
|||
{
|
||||
"name": "automerge-js",
|
||||
"version": "0.1.0",
|
||||
"main": "src/index.js",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test": "mocha --bail --full-trace"
|
||||
},
|
||||
"devDependencies": {
|
||||
"mocha": "^9.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"automerge-wasm": "file:../dev",
|
||||
"fast-sha256": "^1.3.0",
|
||||
"pako": "^2.0.4",
|
||||
"uuid": "^8.3"
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Properties of the document root object
|
||||
//const OPTIONS = Symbol('_options') // object containing options passed to init()
|
||||
//const CACHE = Symbol('_cache') // map from objectId to immutable object
|
||||
const STATE = Symbol('_state') // object containing metadata about current state (e.g. sequence numbers)
|
||||
const HEADS = Symbol('_heads') // object containing metadata about current state (e.g. sequence numbers)
|
||||
const OBJECT_ID = Symbol('_objectId') // object containing metadata about current state (e.g. sequence numbers)
|
||||
const READ_ONLY = Symbol('_readOnly') // object containing metadata about current state (e.g. sequence numbers)
|
||||
const FROZEN = Symbol('_frozen') // object containing metadata about current state (e.g. sequence numbers)
|
||||
|
||||
// Properties of all Automerge objects
|
||||
//const OBJECT_ID = Symbol('_objectId') // the object ID of the current object (string)
|
||||
//const CONFLICTS = Symbol('_conflicts') // map or list (depending on object type) of conflicts
|
||||
//const CHANGE = Symbol('_change') // the context object on proxy objects used in change callback
|
||||
//const ELEM_IDS = Symbol('_elemIds') // list containing the element ID of each list element
|
||||
|
||||
module.exports = {
|
||||
STATE, HEADS, OBJECT_ID, READ_ONLY, FROZEN
|
||||
}
|
|
@ -1,372 +0,0 @@
|
|||
const AutomergeWASM = require("automerge-wasm")
|
||||
const uuid = require('./uuid')
|
||||
|
||||
let { rootProxy, listProxy, textProxy, mapProxy } = require("./proxies")
|
||||
let { Counter } = require("./counter")
|
||||
let { Text } = require("./text")
|
||||
let { Int, Uint, Float64 } = require("./numbers")
|
||||
let { STATE, HEADS, OBJECT_ID, READ_ONLY, FROZEN } = require("./constants")
|
||||
|
||||
function init(actor) {
|
||||
const state = AutomergeWASM.init(actor)
|
||||
return rootProxy(state, true);
|
||||
}
|
||||
|
||||
function clone(doc) {
|
||||
const state = doc[STATE].clone()
|
||||
return rootProxy(state, true);
|
||||
}
|
||||
|
||||
function free(doc) {
|
||||
return doc[STATE].free()
|
||||
}
|
||||
|
||||
function from(data, actor) {
|
||||
let doc1 = init(actor)
|
||||
let doc2 = change(doc1, (d) => Object.assign(d, data))
|
||||
return doc2
|
||||
}
|
||||
|
||||
function change(doc, options, callback) {
|
||||
if (callback === undefined) {
|
||||
// FIXME implement options
|
||||
callback = options
|
||||
options = {}
|
||||
}
|
||||
if (typeof options === "string") {
|
||||
options = { message: options }
|
||||
}
|
||||
if (doc === undefined || doc[STATE] === undefined || doc[OBJECT_ID] !== "_root") {
|
||||
throw new RangeError("must be the document root");
|
||||
}
|
||||
if (doc[FROZEN] === true) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (!!doc[HEADS] === true) {
|
||||
console.log("HEADS", doc[HEADS])
|
||||
throw new RangeError("Attempting to change an out of date document");
|
||||
}
|
||||
if (doc[READ_ONLY] === false) {
|
||||
throw new RangeError("Calls to Automerge.change cannot be nested")
|
||||
}
|
||||
const state = doc[STATE]
|
||||
const heads = state.getHeads()
|
||||
try {
|
||||
doc[HEADS] = heads
|
||||
doc[FROZEN] = true
|
||||
let root = rootProxy(state);
|
||||
callback(root)
|
||||
if (state.pending_ops() === 0) {
|
||||
doc[FROZEN] = false
|
||||
doc[HEADS] = undefined
|
||||
return doc
|
||||
} else {
|
||||
state.commit(options.message, options.time)
|
||||
return rootProxy(state, true);
|
||||
}
|
||||
} catch (e) {
|
||||
//console.log("ERROR: ",e)
|
||||
doc[FROZEN] = false
|
||||
doc[HEADS] = undefined
|
||||
state.rollback()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
function emptyChange(doc, options) {
|
||||
if (options === undefined) {
|
||||
options = {}
|
||||
}
|
||||
if (typeof options === "string") {
|
||||
options = { message: options }
|
||||
}
|
||||
|
||||
if (doc === undefined || doc[STATE] === undefined || doc[OBJECT_ID] !== "_root") {
|
||||
throw new RangeError("must be the document root");
|
||||
}
|
||||
if (doc[FROZEN] === true) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (doc[READ_ONLY] === false) {
|
||||
throw new RangeError("Calls to Automerge.change cannot be nested")
|
||||
}
|
||||
|
||||
const state = doc[STATE]
|
||||
state.commit(options.message, options.time)
|
||||
return rootProxy(state, true);
|
||||
}
|
||||
|
||||
function load(data, actor) {
|
||||
const state = AutomergeWASM.load(data, actor)
|
||||
return rootProxy(state, true);
|
||||
}
|
||||
|
||||
function save(doc) {
|
||||
const state = doc[STATE]
|
||||
return state.save()
|
||||
}
|
||||
|
||||
function merge(local, remote) {
|
||||
if (local[HEADS] === true) {
|
||||
throw new RangeError("Attempting to change an out of date document");
|
||||
}
|
||||
const localState = local[STATE]
|
||||
const heads = localState.getHeads()
|
||||
const remoteState = remote[STATE]
|
||||
const changes = localState.getChangesAdded(remoteState)
|
||||
localState.applyChanges(changes)
|
||||
local[HEADS] = heads
|
||||
return rootProxy(localState, true)
|
||||
}
|
||||
|
||||
function getActorId(doc) {
|
||||
const state = doc[STATE]
|
||||
return state.getActorId()
|
||||
}
|
||||
|
||||
function conflictAt(context, objectId, prop) {
|
||||
let values = context.values(objectId, prop)
|
||||
if (values.length <= 1) {
|
||||
return
|
||||
}
|
||||
let result = {}
|
||||
for (const conflict of values) {
|
||||
const datatype = conflict[0]
|
||||
const value = conflict[1]
|
||||
switch (datatype) {
|
||||
case "map":
|
||||
result[value] = mapProxy(context, value, [ prop ], true, true)
|
||||
break;
|
||||
case "list":
|
||||
result[value] = listProxy(context, value, [ prop ], true, true)
|
||||
break;
|
||||
case "text":
|
||||
result[value] = textProxy(context, value, [ prop ], true, true)
|
||||
break;
|
||||
//case "table":
|
||||
//case "cursor":
|
||||
case "str":
|
||||
case "uint":
|
||||
case "int":
|
||||
case "f64":
|
||||
case "boolean":
|
||||
case "bytes":
|
||||
case "null":
|
||||
result[conflict[2]] = value
|
||||
break;
|
||||
case "counter":
|
||||
result[conflict[2]] = new Counter(value)
|
||||
break;
|
||||
case "timestamp":
|
||||
result[conflict[2]] = new Date(value)
|
||||
break;
|
||||
default:
|
||||
throw RangeError(`datatype ${datatype} unimplemented`)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
function getConflicts(doc, prop) {
|
||||
const state = doc[STATE]
|
||||
const objectId = doc[OBJECT_ID]
|
||||
return conflictAt(state, objectId, prop)
|
||||
}
|
||||
|
||||
function getLastLocalChange(doc) {
|
||||
const state = doc[STATE]
|
||||
return state.getLastLocalChange()
|
||||
}
|
||||
|
||||
function getObjectId(doc) {
|
||||
return doc[OBJECT_ID]
|
||||
}
|
||||
|
||||
function getChanges(oldState, newState) {
|
||||
const o = oldState[STATE]
|
||||
const n = newState[STATE]
|
||||
const heads = oldState[HEADS]
|
||||
return n.getChanges(heads || o.getHeads())
|
||||
}
|
||||
|
||||
function getAllChanges(doc) {
|
||||
const state = doc[STATE]
|
||||
return state.getChanges([])
|
||||
}
|
||||
|
||||
function applyChanges(doc, changes) {
|
||||
if (doc === undefined || doc[STATE] === undefined || doc[OBJECT_ID] !== "_root") {
|
||||
throw new RangeError("must be the document root");
|
||||
}
|
||||
if (doc[FROZEN] === true) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (doc[READ_ONLY] === false) {
|
||||
throw new RangeError("Calls to Automerge.change cannot be nested")
|
||||
}
|
||||
const state = doc[STATE]
|
||||
const heads = state.getHeads()
|
||||
state.applyChanges(changes)
|
||||
doc[HEADS] = heads
|
||||
return [rootProxy(state, true)];
|
||||
}
|
||||
|
||||
function getHistory(doc) {
|
||||
const actor = getActorId(doc)
|
||||
const history = getAllChanges(doc)
|
||||
return history.map((change, index) => ({
|
||||
get change () {
|
||||
return decodeChange(change)
|
||||
},
|
||||
get snapshot () {
|
||||
const [state] = applyChanges(init(), history.slice(0, index + 1))
|
||||
return state
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
function equals() {
|
||||
if (!isObject(val1) || !isObject(val2)) return val1 === val2
|
||||
const keys1 = Object.keys(val1).sort(), keys2 = Object.keys(val2).sort()
|
||||
if (keys1.length !== keys2.length) return false
|
||||
for (let i = 0; i < keys1.length; i++) {
|
||||
if (keys1[i] !== keys2[i]) return false
|
||||
if (!equals(val1[keys1[i]], val2[keys2[i]])) return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
function encodeSyncMessage(msg) {
|
||||
return AutomergeWASM.encodeSyncMessage(msg)
|
||||
}
|
||||
|
||||
function decodeSyncMessage(msg) {
|
||||
return AutomergeWASM.decodeSyncMessage(msg)
|
||||
}
|
||||
|
||||
function encodeSyncState(state) {
|
||||
return AutomergeWASM.encodeSyncState(state)
|
||||
}
|
||||
|
||||
function decodeSyncState() {
|
||||
return AutomergeWASM.decodeSyncState(state)
|
||||
}
|
||||
|
||||
function generateSyncMessage(doc, syncState) {
|
||||
const state = doc[STATE]
|
||||
return [ syncState, state.generateSyncMessage(syncState) ]
|
||||
}
|
||||
|
||||
function receiveSyncMessage(doc, syncState, message) {
|
||||
if (doc === undefined || doc[STATE] === undefined || doc[OBJECT_ID] !== "_root") {
|
||||
throw new RangeError("must be the document root");
|
||||
}
|
||||
if (doc[FROZEN] === true) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (!!doc[HEADS] === true) {
|
||||
throw new RangeError("Attempting to change an out of date document");
|
||||
}
|
||||
if (doc[READ_ONLY] === false) {
|
||||
throw new RangeError("Calls to Automerge.change cannot be nested")
|
||||
}
|
||||
const state = doc[STATE]
|
||||
const heads = state.getHeads()
|
||||
state.receiveSyncMessage(syncState, message)
|
||||
doc[HEADS] = heads
|
||||
return [rootProxy(state, true), syncState, null];
|
||||
}
|
||||
|
||||
function initSyncState() {
|
||||
return AutomergeWASM.initSyncState(change)
|
||||
}
|
||||
|
||||
function encodeChange(change) {
|
||||
return AutomergeWASM.encodeChange(change)
|
||||
}
|
||||
|
||||
function decodeChange(data) {
|
||||
return AutomergeWASM.decodeChange(data)
|
||||
}
|
||||
|
||||
function encodeSyncMessage(change) {
|
||||
return AutomergeWASM.encodeSyncMessage(change)
|
||||
}
|
||||
|
||||
function decodeSyncMessage(data) {
|
||||
return AutomergeWASM.decodeSyncMessage(data)
|
||||
}
|
||||
|
||||
function encodeSyncState(change) {
|
||||
return AutomergeWASM.encodeSyncState(change)
|
||||
}
|
||||
|
||||
function decodeSyncState(data) {
|
||||
return AutomergeWASM.decodeSyncState(data)
|
||||
}
|
||||
|
||||
function getMissingDeps(doc, heads) {
|
||||
const state = doc[STATE]
|
||||
if (!heads) {
|
||||
heads = []
|
||||
}
|
||||
return state.getMissingDeps(heads)
|
||||
}
|
||||
|
||||
function getHeads(doc) {
|
||||
const state = doc[STATE]
|
||||
return doc[HEADS] || state.getHeads()
|
||||
}
|
||||
|
||||
function dump(doc) {
|
||||
const state = doc[STATE]
|
||||
state.dump()
|
||||
}
|
||||
|
||||
function toJS(doc) {
|
||||
if (typeof doc === "object") {
|
||||
if (doc instanceof Uint8Array) {
|
||||
return doc
|
||||
}
|
||||
if (doc === null) {
|
||||
return doc
|
||||
}
|
||||
if (doc instanceof Array) {
|
||||
return doc.map((a) => toJS(a))
|
||||
}
|
||||
if (doc instanceof Text) {
|
||||
return doc.map((a) => toJS(a))
|
||||
}
|
||||
let tmp = {}
|
||||
for (index in doc) {
|
||||
tmp[index] = toJS(doc[index])
|
||||
}
|
||||
return tmp
|
||||
} else {
|
||||
return doc
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
init, from, change, emptyChange, clone, free,
|
||||
load, save, merge, getChanges, getAllChanges, applyChanges,
|
||||
getLastLocalChange, getObjectId, getActorId, getConflicts,
|
||||
encodeChange, decodeChange, equals, getHistory, getHeads, uuid,
|
||||
generateSyncMessage, receiveSyncMessage, initSyncState,
|
||||
decodeSyncMessage, encodeSyncMessage, decodeSyncState, encodeSyncState,
|
||||
getMissingDeps,
|
||||
dump, Text, Counter, Int, Uint, Float64, toJS,
|
||||
}
|
||||
|
||||
// depricated
|
||||
// Frontend, setDefaultBackend, Backend
|
||||
|
||||
// more...
|
||||
/*
|
||||
for (let name of ['getObjectId', 'getObjectById',
|
||||
'setActorId',
|
||||
'Text', 'Table', 'Counter', 'Observable' ]) {
|
||||
module.exports[name] = Frontend[name]
|
||||
}
|
||||
*/
|
|
@ -1,33 +0,0 @@
|
|||
// Convience classes to allow users to stricly specify the number type they want
|
||||
|
||||
class Int {
|
||||
constructor(value) {
|
||||
if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= Number.MIN_SAFE_INTEGER)) {
|
||||
throw new RangeError(`Value ${value} cannot be a uint`)
|
||||
}
|
||||
this.value = value
|
||||
Object.freeze(this)
|
||||
}
|
||||
}
|
||||
|
||||
class Uint {
|
||||
constructor(value) {
|
||||
if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= 0)) {
|
||||
throw new RangeError(`Value ${value} cannot be a uint`)
|
||||
}
|
||||
this.value = value
|
||||
Object.freeze(this)
|
||||
}
|
||||
}
|
||||
|
||||
class Float64 {
|
||||
constructor(value) {
|
||||
if (typeof value !== 'number') {
|
||||
throw new RangeError(`Value ${value} cannot be a float64`)
|
||||
}
|
||||
this.value = value || 0.0
|
||||
Object.freeze(this)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { Int, Uint, Float64 }
|
|
@ -1,623 +0,0 @@
|
|||
|
||||
const AutomergeWASM = require("automerge-wasm")
|
||||
const { Int, Uint, Float64 } = require("./numbers");
|
||||
const { Counter, getWriteableCounter } = require("./counter");
|
||||
const { Text } = require("./text");
|
||||
const { STATE, HEADS, FROZEN, OBJECT_ID, READ_ONLY } = require("./constants")
|
||||
const { MAP, LIST, TABLE, TEXT } = require("automerge-wasm")
|
||||
|
||||
function parseListIndex(key) {
|
||||
if (typeof key === 'string' && /^[0-9]+$/.test(key)) key = parseInt(key, 10)
|
||||
if (typeof key !== 'number') {
|
||||
// throw new TypeError('A list index must be a number, but you passed ' + JSON.stringify(key))
|
||||
return key
|
||||
}
|
||||
if (key < 0 || isNaN(key) || key === Infinity || key === -Infinity) {
|
||||
throw new RangeError('A list index must be positive, but you passed ' + key)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
function valueAt(target, prop) {
|
||||
const { context, objectId, path, readonly, heads} = target
|
||||
let value = context.value(objectId, prop, heads)
|
||||
if (value === undefined) {
|
||||
return
|
||||
}
|
||||
const datatype = value[0]
|
||||
const val = value[1]
|
||||
switch (datatype) {
|
||||
case undefined: return;
|
||||
case "map": return mapProxy(context, val, [ ... path, prop ], readonly, heads);
|
||||
case "list": return listProxy(context, val, [ ... path, prop ], readonly, heads);
|
||||
case "text": return textProxy(context, val, [ ... path, prop ], readonly, heads);
|
||||
//case "table":
|
||||
//case "cursor":
|
||||
case "str": return val;
|
||||
case "uint": return val;
|
||||
case "int": return val;
|
||||
case "f64": return val;
|
||||
case "boolean": return val;
|
||||
case "null": return null;
|
||||
case "bytes": return val;
|
||||
case "counter": {
|
||||
if (readonly) {
|
||||
return new Counter(val);
|
||||
} else {
|
||||
return getWriteableCounter(val, context, path, objectId, prop)
|
||||
}
|
||||
}
|
||||
case "timestamp": return new Date(val);
|
||||
default:
|
||||
throw RangeError(`datatype ${datatype} unimplemented`)
|
||||
}
|
||||
}
|
||||
|
||||
function import_value(value) {
|
||||
switch (typeof value) {
|
||||
case 'object':
|
||||
if (value == null) {
|
||||
return [ null, "null"]
|
||||
} else if (value instanceof Uint) {
|
||||
return [ value.value, "uint" ]
|
||||
} else if (value instanceof Int) {
|
||||
return [ value.value, "int" ]
|
||||
} else if (value instanceof Float64) {
|
||||
return [ value.value, "f64" ]
|
||||
} else if (value instanceof Counter) {
|
||||
return [ value.value, "counter" ]
|
||||
} else if (value instanceof Date) {
|
||||
return [ value.getTime(), "timestamp" ]
|
||||
} else if (value instanceof Uint8Array) {
|
||||
return [ value, "bytes" ]
|
||||
} else if (value instanceof Array) {
|
||||
return [ value, "list" ]
|
||||
} else if (value instanceof Text) {
|
||||
return [ value, "text" ]
|
||||
} else if (value[OBJECT_ID]) {
|
||||
throw new RangeError('Cannot create a reference to an existing document object')
|
||||
} else {
|
||||
return [ value, "map" ]
|
||||
}
|
||||
break;
|
||||
case 'boolean':
|
||||
return [ value, "boolean" ]
|
||||
case 'number':
|
||||
if (Number.isInteger(value)) {
|
||||
return [ value, "int" ]
|
||||
} else {
|
||||
return [ value, "f64" ]
|
||||
}
|
||||
break;
|
||||
case 'string':
|
||||
return [ value ]
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Unsupported type of value: ${typeof value}`)
|
||||
}
|
||||
}
|
||||
|
||||
const MapHandler = {
|
||||
get (target, key) {
|
||||
const { context, objectId, path, readonly, frozen, heads } = target
|
||||
if (key === Symbol.toStringTag) { return target[Symbol.toStringTag] }
|
||||
if (key === OBJECT_ID) return objectId
|
||||
if (key === READ_ONLY) return readonly
|
||||
if (key === FROZEN) return frozen
|
||||
if (key === HEADS) return heads
|
||||
if (key === STATE) return context;
|
||||
return valueAt(target, key)
|
||||
},
|
||||
|
||||
set (target, key, val) {
|
||||
let { context, objectId, path, readonly, frozen} = target
|
||||
if (val && val[OBJECT_ID]) {
|
||||
throw new RangeError('Cannot create a reference to an existing document object')
|
||||
}
|
||||
if (key === FROZEN) {
|
||||
target.frozen = val
|
||||
return
|
||||
}
|
||||
if (key === HEADS) {
|
||||
target.heads = val
|
||||
return
|
||||
}
|
||||
let [ value, datatype ] = import_value(val)
|
||||
if (frozen) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (readonly) {
|
||||
throw new RangeError(`Object property "${key}" cannot be modified`)
|
||||
}
|
||||
switch (datatype) {
|
||||
case "list":
|
||||
const list = context.set(objectId, key, LIST)
|
||||
const proxyList = listProxy(context, list, [ ... path, key ], readonly );
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
proxyList[i] = value[i]
|
||||
}
|
||||
break;
|
||||
case "text":
|
||||
const text = context.set(objectId, key, TEXT)
|
||||
const proxyText = textProxy(context, text, [ ... path, key ], readonly );
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
proxyText[i] = value.get(i)
|
||||
}
|
||||
break;
|
||||
case "map":
|
||||
const map = context.set(objectId, key, MAP)
|
||||
const proxyMap = mapProxy(context, map, [ ... path, key ], readonly );
|
||||
for (const key in value) {
|
||||
proxyMap[key] = value[key]
|
||||
}
|
||||
break;
|
||||
default:
|
||||
context.set(objectId, key, value, datatype)
|
||||
}
|
||||
return true
|
||||
},
|
||||
|
||||
deleteProperty (target, key) {
|
||||
const { context, objectId, path, readonly, frozen } = target
|
||||
if (readonly) {
|
||||
throw new RangeError(`Object property "${key}" cannot be modified`)
|
||||
}
|
||||
context.del(objectId, key)
|
||||
return true
|
||||
},
|
||||
|
||||
has (target, key) {
|
||||
const value = this.get(target, key)
|
||||
return value !== undefined
|
||||
},
|
||||
|
||||
getOwnPropertyDescriptor (target, key) {
|
||||
const { context, objectId } = target
|
||||
const value = this.get(target, key)
|
||||
if (typeof value !== 'undefined') {
|
||||
return {
|
||||
configurable: true, enumerable: true, value
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
ownKeys (target) {
|
||||
const { context, objectId, heads} = target
|
||||
return context.keys(objectId, heads)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
const ListHandler = {
|
||||
get (target, index) {
|
||||
const {context, objectId, path, readonly, frozen, heads } = target
|
||||
index = parseListIndex(index)
|
||||
if (index === Symbol.hasInstance) { return (instance) => { return [].has(instance) } }
|
||||
if (index === Symbol.toStringTag) { return target[Symbol.toStringTag] }
|
||||
if (index === OBJECT_ID) return objectId
|
||||
if (index === READ_ONLY) return readonly
|
||||
if (index === FROZEN) return frozen
|
||||
if (index === HEADS) return heads
|
||||
if (index === STATE) return context;
|
||||
if (index === 'length') return context.length(objectId, heads);
|
||||
if (index === Symbol.iterator) {
|
||||
let i = 0;
|
||||
return function *() {
|
||||
// FIXME - ugly
|
||||
let value = valueAt(target, i)
|
||||
while (value !== undefined) {
|
||||
yield value
|
||||
i += 1
|
||||
value = valueAt(target, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (typeof index === 'number') {
|
||||
return valueAt(target, index)
|
||||
} else {
|
||||
return listMethods(target)[index]
|
||||
}
|
||||
},
|
||||
|
||||
set (target, index, val) {
|
||||
let {context, objectId, path, readonly, frozen } = target
|
||||
index = parseListIndex(index)
|
||||
if (val && val[OBJECT_ID]) {
|
||||
throw new RangeError('Cannot create a reference to an existing document object')
|
||||
}
|
||||
if (index === FROZEN) {
|
||||
target.frozen = val
|
||||
return
|
||||
}
|
||||
if (index === HEADS) {
|
||||
target.heads = val
|
||||
return
|
||||
}
|
||||
if (typeof index == "string") {
|
||||
throw new RangeError('list index must be a number')
|
||||
}
|
||||
const [ value, datatype] = import_value(val)
|
||||
if (frozen) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (readonly) {
|
||||
throw new RangeError(`Object property "${index}" cannot be modified`)
|
||||
}
|
||||
switch (datatype) {
|
||||
case "list":
|
||||
let list
|
||||
if (index >= context.length(objectId)) {
|
||||
list = context.insert(objectId, index, LIST)
|
||||
} else {
|
||||
list = context.set(objectId, index, LIST)
|
||||
}
|
||||
const proxyList = listProxy(context, list, [ ... path, index ], readonly);
|
||||
proxyList.splice(0,0,...value)
|
||||
break;
|
||||
case "text":
|
||||
let text
|
||||
if (index >= context.length(objectId)) {
|
||||
text = context.insert(objectId, index, TEXT)
|
||||
} else {
|
||||
text = context.set(objectId, index, TEXT)
|
||||
}
|
||||
const proxyText = textProxy(context, text, [ ... path, index ], readonly);
|
||||
proxyText.splice(0,0,...value)
|
||||
break;
|
||||
case "map":
|
||||
let map
|
||||
if (index >= context.length(objectId)) {
|
||||
map = context.insert(objectId, index, MAP)
|
||||
} else {
|
||||
map = context.set(objectId, index, MAP)
|
||||
}
|
||||
const proxyMap = mapProxy(context, map, [ ... path, index ], readonly);
|
||||
for (const key in value) {
|
||||
proxyMap[key] = value[key]
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (index >= context.length(objectId)) {
|
||||
context.insert(objectId, index, value, datatype)
|
||||
} else {
|
||||
context.set(objectId, index, value, datatype)
|
||||
}
|
||||
}
|
||||
return true
|
||||
},
|
||||
|
||||
deleteProperty (target, index) {
|
||||
const {context, objectId} = target
|
||||
index = parseListIndex(index)
|
||||
if (context.value(objectId, index)[0] == "counter") {
|
||||
throw new TypeError('Unsupported operation: deleting a counter from a list')
|
||||
}
|
||||
context.del(objectId, index)
|
||||
return true
|
||||
},
|
||||
|
||||
has (target, index) {
|
||||
const {context, objectId, heads} = target
|
||||
index = parseListIndex(index)
|
||||
if (typeof index === 'number') {
|
||||
return index < context.length(objectId, heads)
|
||||
}
|
||||
return index === 'length'
|
||||
},
|
||||
|
||||
getOwnPropertyDescriptor (target, index) {
|
||||
const {context, objectId, path, readonly, frozen, heads} = target
|
||||
|
||||
if (index === 'length') return {writable: true, value: context.length(objectId, heads) }
|
||||
if (index === OBJECT_ID) return {configurable: false, enumerable: false, value: objectId}
|
||||
|
||||
index = parseListIndex(index)
|
||||
|
||||
let value = valueAt(target, index)
|
||||
return { configurable: true, enumerable: true, value }
|
||||
},
|
||||
|
||||
getPrototypeOf(target) { return Object.getPrototypeOf([]) },
|
||||
ownKeys (target) {
|
||||
const {context, objectId, heads } = target
|
||||
let keys = []
|
||||
// uncommenting this causes assert.deepEqual() to fail when comparing to a pojo array
|
||||
// but not uncommenting it causes for (i in list) {} to not enumerate values properly
|
||||
//for (let i = 0; i < target.context.length(objectId, heads); i++) { keys.push(i.toString()) }
|
||||
keys.push("length");
|
||||
return keys
|
||||
}
|
||||
}
|
||||
|
||||
const TextHandler = Object.assign({}, ListHandler, {
|
||||
get (target, index) {
|
||||
// FIXME this is a one line change from ListHandler.get()
|
||||
const {context, objectId, path, readonly, frozen, heads } = target
|
||||
index = parseListIndex(index)
|
||||
if (index === Symbol.toStringTag) { return target[Symbol.toStringTag] }
|
||||
if (index === Symbol.hasInstance) { return (instance) => { return [].has(instance) } }
|
||||
if (index === OBJECT_ID) return objectId
|
||||
if (index === READ_ONLY) return readonly
|
||||
if (index === FROZEN) return frozen
|
||||
if (index === HEADS) return heads
|
||||
if (index === STATE) return context;
|
||||
if (index === 'length') return context.length(objectId, heads);
|
||||
if (index === Symbol.iterator) {
|
||||
let i = 0;
|
||||
return function *() {
|
||||
let value = valueAt(target, i)
|
||||
while (value !== undefined) {
|
||||
yield value
|
||||
i += 1
|
||||
value = valueAt(target, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (typeof index === 'number') {
|
||||
return valueAt(target, index)
|
||||
} else {
|
||||
return textMethods(target)[index] || listMethods(target)[index]
|
||||
}
|
||||
},
|
||||
getPrototypeOf(target) {
|
||||
return Object.getPrototypeOf(new Text())
|
||||
},
|
||||
})
|
||||
|
||||
function mapProxy(context, objectId, path, readonly, heads) {
|
||||
return new Proxy({context, objectId, path, readonly: !!readonly, frozen: false, heads}, MapHandler)
|
||||
}
|
||||
|
||||
function listProxy(context, objectId, path, readonly, heads) {
|
||||
let target = []
|
||||
Object.assign(target, {context, objectId, path, readonly: !!readonly, frozen: false, heads})
|
||||
return new Proxy(target, ListHandler)
|
||||
}
|
||||
|
||||
function textProxy(context, objectId, path, readonly, heads) {
|
||||
let target = []
|
||||
Object.assign(target, {context, objectId, path, readonly: !!readonly, frozen: false, heads})
|
||||
return new Proxy(target, TextHandler)
|
||||
}
|
||||
|
||||
function rootProxy(context, readonly) {
|
||||
return mapProxy(context, "_root", [], readonly, false)
|
||||
}
|
||||
|
||||
function listMethods(target) {
|
||||
const {context, objectId, path, readonly, frozen, heads} = target
|
||||
const methods = {
|
||||
deleteAt(index, numDelete) {
|
||||
// FIXME - what about many deletes?
|
||||
if (context.value(objectId, index)[0] == "counter") {
|
||||
throw new TypeError('Unsupported operation: deleting a counter from a list')
|
||||
}
|
||||
if (typeof numDelete === 'number') {
|
||||
context.splice(objectId, index, numDelete)
|
||||
} else {
|
||||
context.del(objectId, index)
|
||||
}
|
||||
return this
|
||||
},
|
||||
|
||||
fill(val, start, end) {
|
||||
// FIXME
|
||||
let list = context.getObject(objectId)
|
||||
let [value, datatype] = valueAt(target, index)
|
||||
for (let index = parseListIndex(start || 0); index < parseListIndex(end || list.length); index++) {
|
||||
context.set(objectId, index, value, datatype)
|
||||
}
|
||||
return this
|
||||
},
|
||||
|
||||
indexOf(o, start = 0) {
|
||||
// FIXME
|
||||
const id = o[OBJECT_ID]
|
||||
if (id) {
|
||||
const list = context.getObject(objectId)
|
||||
for (let index = start; index < list.length; index++) {
|
||||
if (list[index][OBJECT_ID] === id) {
|
||||
return index
|
||||
}
|
||||
}
|
||||
return -1
|
||||
} else {
|
||||
return context.indexOf(objectId, o, start)
|
||||
}
|
||||
},
|
||||
|
||||
insertAt(index, ...values) {
|
||||
this.splice(index, 0, ...values)
|
||||
return this
|
||||
},
|
||||
|
||||
pop() {
|
||||
let length = context.length(objectId)
|
||||
if (length == 0) {
|
||||
return undefined
|
||||
}
|
||||
let last = valueAt(target, length - 1)
|
||||
context.del(objectId, length - 1)
|
||||
return last
|
||||
},
|
||||
|
||||
push(...values) {
|
||||
let len = context.length(objectId)
|
||||
this.splice(len, 0, ...values)
|
||||
return context.length(objectId)
|
||||
},
|
||||
|
||||
shift() {
|
||||
if (context.length(objectId) == 0) return
|
||||
const first = valueAt(target, 0)
|
||||
context.del(objectId, 0)
|
||||
return first
|
||||
},
|
||||
|
||||
splice(index, del, ...vals) {
|
||||
index = parseListIndex(index)
|
||||
del = parseListIndex(del)
|
||||
for (let val of vals) {
|
||||
if (val && val[OBJECT_ID]) {
|
||||
throw new RangeError('Cannot create a reference to an existing document object')
|
||||
}
|
||||
}
|
||||
if (frozen) {
|
||||
throw new RangeError("Attempting to use an outdated Automerge document")
|
||||
}
|
||||
if (readonly) {
|
||||
throw new RangeError("Sequence object cannot be modified outside of a change block")
|
||||
}
|
||||
let result = []
|
||||
for (let i = 0; i < del; i++) {
|
||||
let value = valueAt(target, index)
|
||||
result.push(value)
|
||||
context.del(objectId, index)
|
||||
}
|
||||
const values = vals.map((val) => import_value(val))
|
||||
for (let [value,datatype] of values) {
|
||||
switch (datatype) {
|
||||
case "list":
|
||||
const list = context.insert(objectId, index, LIST)
|
||||
const proxyList = listProxy(context, list, [ ... path, index ], readonly);
|
||||
proxyList.splice(0,0,...value)
|
||||
break;
|
||||
case "text":
|
||||
const text = context.insert(objectId, index, TEXT)
|
||||
const proxyText = textProxy(context, text, [ ... path, index ], readonly);
|
||||
proxyText.splice(0,0,...value)
|
||||
break;
|
||||
case "map":
|
||||
const map = context.insert(objectId, index, MAP)
|
||||
const proxyMap = mapProxy(context, map, [ ... path, index ], readonly);
|
||||
for (const key in value) {
|
||||
proxyMap[key] = value[key]
|
||||
}
|
||||
break;
|
||||
default:
|
||||
context.insert(objectId, index, value, datatype)
|
||||
}
|
||||
index += 1
|
||||
}
|
||||
return result
|
||||
},
|
||||
|
||||
unshift(...values) {
|
||||
this.splice(0, 0, ...values)
|
||||
return context.length(objectId)
|
||||
},
|
||||
|
||||
entries() {
|
||||
let i = 0;
|
||||
const iterator = {
|
||||
next: () => {
|
||||
let value = valueAt(target, i)
|
||||
if (value === undefined) {
|
||||
return { value: undefined, done: true }
|
||||
} else {
|
||||
return { value: [ i, value ], done: false }
|
||||
}
|
||||
}
|
||||
}
|
||||
return iterator
|
||||
},
|
||||
|
||||
keys() {
|
||||
let i = 0;
|
||||
let len = context.length(objectId, heads)
|
||||
const iterator = {
|
||||
next: () => {
|
||||
let value = undefined
|
||||
if (i < len) { value = i; i++ }
|
||||
return { value, done: true }
|
||||
}
|
||||
}
|
||||
return iterator
|
||||
},
|
||||
|
||||
values() {
|
||||
let i = 0;
|
||||
const iterator = {
|
||||
next: () => {
|
||||
let value = valueAt(target, i)
|
||||
if (value === undefined) {
|
||||
return { value: undefined, done: true }
|
||||
} else {
|
||||
return { value, done: false }
|
||||
}
|
||||
}
|
||||
}
|
||||
return iterator
|
||||
}
|
||||
}
|
||||
|
||||
// Read-only methods that can delegate to the JavaScript built-in implementations
|
||||
// FIXME - super slow
|
||||
for (let method of ['concat', 'every', 'filter', 'find', 'findIndex', 'forEach', 'includes',
|
||||
'join', 'lastIndexOf', 'map', 'reduce', 'reduceRight',
|
||||
'slice', 'some', 'toLocaleString', 'toString']) {
|
||||
methods[method] = (...args) => {
|
||||
const list = []
|
||||
while (true) {
|
||||
let value = valueAt(target, list.length)
|
||||
if (value == undefined) {
|
||||
break
|
||||
}
|
||||
list.push(value)
|
||||
}
|
||||
|
||||
return list[method](...args)
|
||||
}
|
||||
}
|
||||
|
||||
return methods
|
||||
}
|
||||
|
||||
function textMethods(target) {
|
||||
const {context, objectId, path, readonly, frozen} = target
|
||||
const methods = {
|
||||
set (index, value) {
|
||||
return this[index] = value
|
||||
},
|
||||
get (index) {
|
||||
return this[index]
|
||||
},
|
||||
toString () {
|
||||
let str = ''
|
||||
let length = this.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const value = this.get(i)
|
||||
if (typeof value === 'string') str += value
|
||||
}
|
||||
return str
|
||||
},
|
||||
toSpans () {
|
||||
let spans = []
|
||||
let chars = ''
|
||||
let length = this.length
|
||||
for (let i = 0; i < length; i++) {
|
||||
const value = this[i]
|
||||
if (typeof value === 'string') {
|
||||
chars += value
|
||||
} else {
|
||||
if (chars.length > 0) {
|
||||
spans.push(chars)
|
||||
chars = ''
|
||||
}
|
||||
spans.push(value)
|
||||
}
|
||||
}
|
||||
if (chars.length > 0) {
|
||||
spans.push(chars)
|
||||
}
|
||||
return spans
|
||||
},
|
||||
toJSON () {
|
||||
return this.toString()
|
||||
}
|
||||
}
|
||||
return methods
|
||||
}
|
||||
|
||||
|
||||
module.exports = { rootProxy, textProxy, listProxy, mapProxy, MapHandler, ListHandler, TextHandler }
|
|
@ -1,132 +0,0 @@
|
|||
const { OBJECT_ID } = require('./constants')
|
||||
const { isObject } = require('../src/common')
|
||||
|
||||
class Text {
|
||||
constructor (text) {
|
||||
const instance = Object.create(Text.prototype)
|
||||
if (typeof text === 'string') {
|
||||
instance.elems = [...text]
|
||||
} else if (Array.isArray(text)) {
|
||||
instance.elems = text
|
||||
} else if (text === undefined) {
|
||||
instance.elems = []
|
||||
} else {
|
||||
throw new TypeError(`Unsupported initial value for Text: ${text}`)
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
get length () {
|
||||
return this.elems.length
|
||||
}
|
||||
|
||||
get (index) {
|
||||
return this.elems[index]
|
||||
}
|
||||
|
||||
getElemId (index) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterates over the text elements character by character, including any
|
||||
* inline objects.
|
||||
*/
|
||||
[Symbol.iterator] () {
|
||||
let elems = this.elems, index = -1
|
||||
return {
|
||||
next () {
|
||||
index += 1
|
||||
if (index < elems.length) {
|
||||
return {done: false, value: elems[index]}
|
||||
} else {
|
||||
return {done: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the content of the Text object as a simple string, ignoring any
|
||||
* non-character elements.
|
||||
*/
|
||||
toString() {
|
||||
// Concatting to a string is faster than creating an array and then
|
||||
// .join()ing for small (<100KB) arrays.
|
||||
// https://jsperf.com/join-vs-loop-w-type-test
|
||||
let str = ''
|
||||
for (const elem of this.elems) {
|
||||
if (typeof elem === 'string') str += elem
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the content of the Text object as a sequence of strings,
|
||||
* interleaved with non-character elements.
|
||||
*
|
||||
* For example, the value ['a', 'b', {x: 3}, 'c', 'd'] has spans:
|
||||
* => ['ab', {x: 3}, 'cd']
|
||||
*/
|
||||
toSpans() {
|
||||
let spans = []
|
||||
let chars = ''
|
||||
for (const elem of this.elems) {
|
||||
if (typeof elem === 'string') {
|
||||
chars += elem
|
||||
} else {
|
||||
if (chars.length > 0) {
|
||||
spans.push(chars)
|
||||
chars = ''
|
||||
}
|
||||
spans.push(elem)
|
||||
}
|
||||
}
|
||||
if (chars.length > 0) {
|
||||
spans.push(chars)
|
||||
}
|
||||
return spans
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the content of the Text object as a simple string, so that the
|
||||
* JSON serialization of an Automerge document represents text nicely.
|
||||
*/
|
||||
toJSON() {
|
||||
return this.toString()
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the list item at position `index` to a new value `value`.
|
||||
*/
|
||||
set (index, value) {
|
||||
this.elems[index] = value
|
||||
}
|
||||
|
||||
/**
|
||||
* Inserts new list items `values` starting at position `index`.
|
||||
*/
|
||||
insertAt(index, ...values) {
|
||||
this.elems.splice(index, 0, ... values)
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes `numDelete` list items starting at position `index`.
|
||||
* if `numDelete` is not given, one item is deleted.
|
||||
*/
|
||||
deleteAt(index, numDelete = 1) {
|
||||
this.elems.splice(index, numDelete)
|
||||
}
|
||||
}
|
||||
|
||||
// Read-only methods that can delegate to the JavaScript built-in array
|
||||
for (let method of ['concat', 'every', 'filter', 'find', 'findIndex', 'forEach', 'includes',
|
||||
'indexOf', 'join', 'lastIndexOf', 'map', 'reduce', 'reduceRight',
|
||||
'slice', 'some', 'toLocaleString']) {
|
||||
Text.prototype[method] = function (...args) {
|
||||
const array = [...this]
|
||||
return array[method](...args)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { Text }
|
|
@ -1,16 +0,0 @@
|
|||
const { v4: uuid } = require('uuid')
|
||||
|
||||
function defaultFactory() {
|
||||
return uuid().replace(/-/g, '')
|
||||
}
|
||||
|
||||
let factory = defaultFactory
|
||||
|
||||
function makeUuid() {
|
||||
return factory()
|
||||
}
|
||||
|
||||
makeUuid.setFactory = newFactory => { factory = newFactory }
|
||||
makeUuid.reset = () => { factory = defaultFactory }
|
||||
|
||||
module.exports = makeUuid
|
|
@ -1,164 +0,0 @@
|
|||
|
||||
const assert = require('assert')
|
||||
const util = require('util')
|
||||
const Automerge = require('..')
|
||||
|
||||
describe('Automerge', () => {
|
||||
describe('basics', () => {
|
||||
it('should init clone and free', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.clone(doc1);
|
||||
})
|
||||
|
||||
it('handle basic set and read on root object', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.hello = "world"
|
||||
d.big = "little"
|
||||
d.zip = "zop"
|
||||
d.app = "dap"
|
||||
assert.deepEqual(d, { hello: "world", big: "little", zip: "zop", app: "dap" })
|
||||
})
|
||||
assert.deepEqual(doc2, { hello: "world", big: "little", zip: "zop", app: "dap" })
|
||||
})
|
||||
|
||||
it('handle basic sets over many changes', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let timestamp = new Date();
|
||||
let counter = new Automerge.Counter(100);
|
||||
let bytes = new Uint8Array([10,11,12]);
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.hello = "world"
|
||||
})
|
||||
let doc3 = Automerge.change(doc2, (d) => {
|
||||
d.counter1 = counter
|
||||
})
|
||||
let doc4 = Automerge.change(doc3, (d) => {
|
||||
d.timestamp1 = timestamp
|
||||
})
|
||||
let doc5 = Automerge.change(doc4, (d) => {
|
||||
d.app = null
|
||||
})
|
||||
let doc6 = Automerge.change(doc5, (d) => {
|
||||
d.bytes1 = bytes
|
||||
})
|
||||
let doc7 = Automerge.change(doc6, (d) => {
|
||||
d.uint = new Automerge.Uint(1)
|
||||
d.int = new Automerge.Int(-1)
|
||||
d.float64 = new Automerge.Float64(5.5)
|
||||
d.number1 = 100
|
||||
d.number2 = -45.67
|
||||
d.true = true
|
||||
d.false = false
|
||||
})
|
||||
|
||||
assert.deepEqual(doc7, { hello: "world", true: true, false: false, int: -1, uint: 1, float64: 5.5, number1: 100, number2: -45.67, counter1: counter, timestamp1: timestamp, bytes1: bytes, app: null })
|
||||
|
||||
let changes = Automerge.getAllChanges(doc7)
|
||||
let t1 = Automerge.init()
|
||||
;let [t2] = Automerge.applyChanges(t1, changes)
|
||||
assert.deepEqual(doc7,t2)
|
||||
})
|
||||
|
||||
it('handle overwrites to values', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.hello = "world1"
|
||||
})
|
||||
let doc3 = Automerge.change(doc2, (d) => {
|
||||
d.hello = "world2"
|
||||
})
|
||||
let doc4 = Automerge.change(doc3, (d) => {
|
||||
d.hello = "world3"
|
||||
})
|
||||
let doc5 = Automerge.change(doc4, (d) => {
|
||||
d.hello = "world4"
|
||||
})
|
||||
assert.deepEqual(doc5, { hello: "world4" } )
|
||||
})
|
||||
|
||||
it('handle set with object value', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.subobj = { hello: "world", subsubobj: { zip: "zop" } }
|
||||
})
|
||||
assert.deepEqual(doc2, { subobj: { hello: "world", subsubobj: { zip: "zop" } } })
|
||||
})
|
||||
|
||||
it('handle simple list creation', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => d.list = [])
|
||||
assert.deepEqual(doc2, { list: []})
|
||||
})
|
||||
|
||||
it('handle simple lists', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.list = [ 1, 2, 3 ]
|
||||
})
|
||||
assert.deepEqual(doc2.list.length, 3)
|
||||
assert.deepEqual(doc2.list[0], 1)
|
||||
assert.deepEqual(doc2.list[1], 2)
|
||||
assert.deepEqual(doc2.list[2], 3)
|
||||
assert.deepEqual(doc2, { list: [1,2,3] })
|
||||
// assert.deepStrictEqual(Automerge.toJS(doc2), { list: [1,2,3] })
|
||||
|
||||
let doc3 = Automerge.change(doc2, (d) => {
|
||||
d.list[1] = "a"
|
||||
})
|
||||
|
||||
assert.deepEqual(doc3.list.length, 3)
|
||||
assert.deepEqual(doc3.list[0], 1)
|
||||
assert.deepEqual(doc3.list[1], "a")
|
||||
assert.deepEqual(doc3.list[2], 3)
|
||||
assert.deepEqual(doc3, { list: [1,"a",3] })
|
||||
})
|
||||
it('handle simple lists', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.list = [ 1, 2, 3 ]
|
||||
})
|
||||
let changes = Automerge.getChanges(doc1, doc2)
|
||||
let docB1 = Automerge.init()
|
||||
;let [docB2] = Automerge.applyChanges(docB1, changes)
|
||||
assert.deepEqual(docB2, doc2);
|
||||
})
|
||||
it('handle text', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let tmp = new Automerge.Text("hello")
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.list = new Automerge.Text("hello")
|
||||
d.list.insertAt(2,"Z")
|
||||
})
|
||||
let changes = Automerge.getChanges(doc1, doc2)
|
||||
let docB1 = Automerge.init()
|
||||
;let [docB2] = Automerge.applyChanges(docB1, changes)
|
||||
assert.deepEqual(docB2, doc2);
|
||||
})
|
||||
|
||||
it('have many list methods', () => {
|
||||
let doc1 = Automerge.from({ list: [1,2,3] })
|
||||
assert.deepEqual(doc1, { list: [1,2,3] });
|
||||
let doc2 = Automerge.change(doc1, (d) => {
|
||||
d.list.splice(1,1,9,10)
|
||||
})
|
||||
assert.deepEqual(doc2, { list: [1,9,10,3] });
|
||||
let doc3 = Automerge.change(doc2, (d) => {
|
||||
d.list.push(11,12)
|
||||
})
|
||||
assert.deepEqual(doc3, { list: [1,9,10,3,11,12] });
|
||||
let doc4 = Automerge.change(doc3, (d) => {
|
||||
d.list.unshift(2,2)
|
||||
})
|
||||
assert.deepEqual(doc4, { list: [2,2,1,9,10,3,11,12] });
|
||||
let doc5 = Automerge.change(doc4, (d) => {
|
||||
d.list.shift()
|
||||
})
|
||||
assert.deepEqual(doc5, { list: [2,1,9,10,3,11,12] });
|
||||
let doc6 = Automerge.change(doc5, (d) => {
|
||||
d.list.insertAt(3,100,101)
|
||||
})
|
||||
assert.deepEqual(doc6, { list: [2,1,9,100,101,10,3,11,12] });
|
||||
})
|
||||
})
|
||||
})
|
|
@ -1,97 +0,0 @@
|
|||
const assert = require('assert')
|
||||
const { checkEncoded } = require('./helpers')
|
||||
const Automerge = require('..')
|
||||
const { encodeChange, decodeChange } = Automerge
|
||||
|
||||
describe('change encoding', () => {
|
||||
it('should encode text edits', () => {
|
||||
/*
|
||||
const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: '', deps: [], ops: [
|
||||
{action: 'makeText', obj: '_root', key: 'text', insert: false, pred: []},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []},
|
||||
{action: 'del', obj: '1@aaaa', elemId: '2@aaaa', insert: false, pred: ['2@aaaa']},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []}
|
||||
]}
|
||||
*/
|
||||
const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: null, deps: [], ops: [
|
||||
{action: 'makeText', obj: '_root', key: 'text', pred: []},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []},
|
||||
{action: 'del', obj: '1@aaaa', elemId: '2@aaaa', pred: ['2@aaaa']},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []},
|
||||
{action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []}
|
||||
]}
|
||||
checkEncoded(encodeChange(change1), [
|
||||
0x85, 0x6f, 0x4a, 0x83, // magic bytes
|
||||
0xe2, 0xbd, 0xfb, 0xf5, // checksum
|
||||
1, 94, 0, 2, 0xaa, 0xaa, // chunkType: change, length, deps, actor 'aaaa'
|
||||
1, 1, 9, 0, 0, // seq, startOp, time, message, actor list
|
||||
12, 0x01, 4, 0x02, 4, // column count, objActor, objCtr
|
||||
0x11, 8, 0x13, 7, 0x15, 8, // keyActor, keyCtr, keyStr
|
||||
0x34, 4, 0x42, 6, // insert, action
|
||||
0x56, 6, 0x57, 3, // valLen, valRaw
|
||||
0x70, 6, 0x71, 2, 0x73, 2, // predNum, predActor, predCtr
|
||||
0, 1, 4, 0, // objActor column: null, 0, 0, 0, 0
|
||||
0, 1, 4, 1, // objCtr column: null, 1, 1, 1, 1
|
||||
0, 2, 0x7f, 0, 0, 1, 0x7f, 0, // keyActor column: null, null, 0, null, 0
|
||||
0, 1, 0x7c, 0, 2, 0x7e, 4, // keyCtr column: null, 0, 2, 0, 4
|
||||
0x7f, 4, 0x74, 0x65, 0x78, 0x74, 0, 4, // keyStr column: 'text', null, null, null, null
|
||||
1, 1, 1, 2, // insert column: false, true, false, true, true
|
||||
0x7d, 4, 1, 3, 2, 1, // action column: makeText, set, del, set, set
|
||||
0x7d, 0, 0x16, 0, 2, 0x16, // valLen column: 0, 0x16, 0, 0x16, 0x16
|
||||
0x68, 0x48, 0x69, // valRaw column: 'h', 'H', 'i'
|
||||
2, 0, 0x7f, 1, 2, 0, // predNum column: 0, 0, 1, 0, 0
|
||||
0x7f, 0, // predActor column: 0
|
||||
0x7f, 2 // predCtr column: 2
|
||||
])
|
||||
const decoded = decodeChange(encodeChange(change1))
|
||||
assert.deepStrictEqual(decoded, Object.assign({hash: decoded.hash}, change1))
|
||||
})
|
||||
|
||||
// FIXME - skipping this b/c it was never implemented in the rust impl and isnt trivial
|
||||
/*
|
||||
it.skip('should require strict ordering of preds', () => {
|
||||
const change = new Uint8Array([
|
||||
133, 111, 74, 131, 31, 229, 112, 44, 1, 105, 1, 58, 30, 190, 100, 253, 180, 180, 66, 49, 126,
|
||||
81, 142, 10, 3, 35, 140, 189, 231, 34, 145, 57, 66, 23, 224, 149, 64, 97, 88, 140, 168, 194,
|
||||
229, 4, 244, 209, 58, 138, 67, 140, 1, 152, 236, 250, 2, 0, 1, 4, 55, 234, 66, 242, 8, 21, 11,
|
||||
52, 1, 66, 2, 86, 3, 87, 10, 112, 2, 113, 3, 115, 4, 127, 9, 99, 111, 109, 109, 111, 110, 86,
|
||||
97, 114, 1, 127, 1, 127, 166, 1, 52, 48, 57, 49, 52, 57, 52, 53, 56, 50, 127, 2, 126, 0, 1,
|
||||
126, 139, 1, 0
|
||||
])
|
||||
assert.throws(() => { decodeChange(change) }, /operation IDs are not in ascending order/)
|
||||
})
|
||||
*/
|
||||
|
||||
describe('with trailing bytes', () => {
|
||||
let change = new Uint8Array([
|
||||
0x85, 0x6f, 0x4a, 0x83, // magic bytes
|
||||
0xb2, 0x98, 0x9e, 0xa9, // checksum
|
||||
1, 61, 0, 2, 0x12, 0x34, // chunkType: change, length, deps, actor '1234'
|
||||
1, 1, 252, 250, 220, 255, 5, // seq, startOp, time
|
||||
14, 73, 110, 105, 116, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, // message: 'Initialization'
|
||||
0, 6, // actor list, column count
|
||||
0x15, 3, 0x34, 1, 0x42, 2, // keyStr, insert, action
|
||||
0x56, 2, 0x57, 1, 0x70, 2, // valLen, valRaw, predNum
|
||||
0x7f, 1, 0x78, // keyStr: 'x'
|
||||
1, // insert: false
|
||||
0x7f, 1, // action: set
|
||||
0x7f, 19, // valLen: 1 byte of type uint
|
||||
1, // valRaw: 1
|
||||
0x7f, 0, // predNum: 0
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9 // 10 trailing bytes
|
||||
])
|
||||
|
||||
it('should allow decoding and re-encoding', () => {
|
||||
// NOTE: This calls the JavaScript encoding and decoding functions, even when the WebAssembly
|
||||
// backend is loaded. Should the wasm backend export its own functions for testing?
|
||||
checkEncoded(change, encodeChange(decodeChange(change)))
|
||||
})
|
||||
|
||||
it('should be preserved in document encoding', () => {
|
||||
const [doc] = Automerge.applyChanges(Automerge.init(), [change])
|
||||
const [reconstructed] = Automerge.getAllChanges(Automerge.load(Automerge.save(doc)))
|
||||
checkEncoded(change, reconstructed)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -1,697 +0,0 @@
|
|||
const assert = require('assert')
|
||||
const Automerge = require('..')
|
||||
const { assertEqualsOneOf } = require('./helpers')
|
||||
|
||||
function attributeStateToAttributes(accumulatedAttributes) {
|
||||
const attributes = {}
|
||||
Object.entries(accumulatedAttributes).forEach(([key, values]) => {
|
||||
if (values.length && values[0] !== null) {
|
||||
attributes[key] = values[0]
|
||||
}
|
||||
})
|
||||
return attributes
|
||||
}
|
||||
|
||||
function isEquivalent(a, b) {
|
||||
const aProps = Object.getOwnPropertyNames(a)
|
||||
const bProps = Object.getOwnPropertyNames(b)
|
||||
|
||||
if (aProps.length != bProps.length) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (let i = 0; i < aProps.length; i++) {
|
||||
const propName = aProps[i]
|
||||
if (a[propName] !== b[propName]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
function isControlMarker(pseudoCharacter) {
|
||||
return typeof pseudoCharacter === 'object' && pseudoCharacter.attributes
|
||||
}
|
||||
|
||||
function opFrom(text, attributes) {
|
||||
let op = { insert: text }
|
||||
if (Object.keys(attributes).length > 0) {
|
||||
op.attributes = attributes
|
||||
}
|
||||
return op
|
||||
}
|
||||
|
||||
function accumulateAttributes(span, accumulatedAttributes) {
|
||||
Object.entries(span).forEach(([key, value]) => {
|
||||
if (!accumulatedAttributes[key]) {
|
||||
accumulatedAttributes[key] = []
|
||||
}
|
||||
if (value === null) {
|
||||
if (accumulatedAttributes[key].length === 0 || accumulatedAttributes[key] === null) {
|
||||
accumulatedAttributes[key].unshift(null)
|
||||
} else {
|
||||
accumulatedAttributes[key].shift()
|
||||
}
|
||||
} else {
|
||||
if (accumulatedAttributes[key][0] === null) {
|
||||
accumulatedAttributes[key].shift()
|
||||
} else {
|
||||
accumulatedAttributes[key].unshift(value)
|
||||
}
|
||||
}
|
||||
})
|
||||
return accumulatedAttributes
|
||||
}
|
||||
|
||||
function automergeTextToDeltaDoc(text) {
|
||||
let ops = []
|
||||
let controlState = {}
|
||||
let currentString = ""
|
||||
let attributes = {}
|
||||
text.toSpans().forEach((span) => {
|
||||
if (isControlMarker(span)) {
|
||||
controlState = accumulateAttributes(span.attributes, controlState)
|
||||
} else {
|
||||
let next = attributeStateToAttributes(controlState)
|
||||
|
||||
// if the next span has the same calculated attributes as the current span
|
||||
// don't bother outputting it as a separate span, just let it ride
|
||||
if (typeof span === 'string' && isEquivalent(next, attributes)) {
|
||||
currentString = currentString + span
|
||||
return
|
||||
}
|
||||
|
||||
if (currentString) {
|
||||
ops.push(opFrom(currentString, attributes))
|
||||
}
|
||||
|
||||
// If we've got a string, we might be able to concatenate it to another
|
||||
// same-attributed-string, so remember it and go to the next iteration.
|
||||
if (typeof span === 'string') {
|
||||
currentString = span
|
||||
attributes = next
|
||||
} else {
|
||||
// otherwise we have an embed "character" and should output it immediately.
|
||||
// embeds are always one-"character" in length.
|
||||
ops.push(opFrom(span, next))
|
||||
currentString = ''
|
||||
attributes = {}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// at the end, flush any accumulated string out
|
||||
if (currentString) {
|
||||
ops.push(opFrom(currentString, attributes))
|
||||
}
|
||||
|
||||
return ops
|
||||
}
|
||||
|
||||
function inverseAttributes(attributes) {
|
||||
let invertedAttributes = {}
|
||||
Object.keys(attributes).forEach((key) => {
|
||||
invertedAttributes[key] = null
|
||||
})
|
||||
return invertedAttributes
|
||||
}
|
||||
|
||||
function applyDeleteOp(text, offset, op) {
|
||||
let length = op.delete
|
||||
while (length > 0) {
|
||||
if (isControlMarker(text.get(offset))) {
|
||||
offset += 1
|
||||
} else {
|
||||
// we need to not delete control characters, but we do delete embed characters
|
||||
text.deleteAt(offset, 1)
|
||||
length -= 1
|
||||
}
|
||||
}
|
||||
return [text, offset]
|
||||
}
|
||||
|
||||
function applyRetainOp(text, offset, op) {
|
||||
let length = op.retain
|
||||
|
||||
if (op.attributes) {
|
||||
text.insertAt(offset, { attributes: op.attributes })
|
||||
offset += 1
|
||||
}
|
||||
|
||||
while (length > 0) {
|
||||
const char = text.get(offset)
|
||||
offset += 1
|
||||
if (!isControlMarker(char)) {
|
||||
length -= 1
|
||||
}
|
||||
}
|
||||
|
||||
if (op.attributes) {
|
||||
text.insertAt(offset, { attributes: inverseAttributes(op.attributes) })
|
||||
offset += 1
|
||||
}
|
||||
|
||||
return [text, offset]
|
||||
}
|
||||
|
||||
|
||||
function applyInsertOp(text, offset, op) {
|
||||
let originalOffset = offset
|
||||
|
||||
if (typeof op.insert === 'string') {
|
||||
text.insertAt(offset, ...op.insert.split(''))
|
||||
offset += op.insert.length
|
||||
} else {
|
||||
// we have an embed or something similar
|
||||
text.insertAt(offset, op.insert)
|
||||
offset += 1
|
||||
}
|
||||
|
||||
if (op.attributes) {
|
||||
text.insertAt(originalOffset, { attributes: op.attributes })
|
||||
offset += 1
|
||||
}
|
||||
if (op.attributes) {
|
||||
text.insertAt(offset, { attributes: inverseAttributes(op.attributes) })
|
||||
offset += 1
|
||||
}
|
||||
return [text, offset]
|
||||
}
|
||||
|
||||
// XXX: uhhhhh, why can't I pass in text?
|
||||
function applyDeltaDocToAutomergeText(delta, doc) {
|
||||
let offset = 0
|
||||
|
||||
delta.forEach(op => {
|
||||
if (op.retain) {
|
||||
[, offset] = applyRetainOp(doc.text, offset, op)
|
||||
} else if (op.delete) {
|
||||
[, offset] = applyDeleteOp(doc.text, offset, op)
|
||||
} else if (op.insert) {
|
||||
[, offset] = applyInsertOp(doc.text, offset, op)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
describe('Automerge.Text', () => {
|
||||
let s1, s2
|
||||
beforeEach(() => {
|
||||
s1 = Automerge.change(Automerge.init(), doc => doc.text = new Automerge.Text())
|
||||
s2 = Automerge.merge(Automerge.init(), s1)
|
||||
})
|
||||
|
||||
it('should support insertion', () => {
|
||||
s1 = Automerge.change(s1, doc => doc.text.insertAt(0, 'a'))
|
||||
assert.strictEqual(s1.text.length, 1)
|
||||
assert.strictEqual(s1.text.get(0), 'a')
|
||||
assert.strictEqual(s1.text.toString(), 'a')
|
||||
//assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`)
|
||||
})
|
||||
|
||||
it('should support deletion', () => {
|
||||
s1 = Automerge.change(s1, doc => doc.text.insertAt(0, 'a', 'b', 'c'))
|
||||
s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 1))
|
||||
assert.strictEqual(s1.text.length, 2)
|
||||
assert.strictEqual(s1.text.get(0), 'a')
|
||||
assert.strictEqual(s1.text.get(1), 'c')
|
||||
assert.strictEqual(s1.text.toString(), 'ac')
|
||||
})
|
||||
|
||||
it("should support implicit and explicit deletion", () => {
|
||||
s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c"))
|
||||
s1 = Automerge.change(s1, doc => doc.text.deleteAt(1))
|
||||
s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 0))
|
||||
assert.strictEqual(s1.text.length, 2)
|
||||
assert.strictEqual(s1.text.get(0), "a")
|
||||
assert.strictEqual(s1.text.get(1), "c")
|
||||
assert.strictEqual(s1.text.toString(), "ac")
|
||||
})
|
||||
|
||||
it('should handle concurrent insertion', () => {
|
||||
s1 = Automerge.change(s1, doc => doc.text.insertAt(0, 'a', 'b', 'c'))
|
||||
s2 = Automerge.change(s2, doc => doc.text.insertAt(0, 'x', 'y', 'z'))
|
||||
s1 = Automerge.merge(s1, s2)
|
||||
assert.strictEqual(s1.text.length, 6)
|
||||
assertEqualsOneOf(s1.text.toString(), 'abcxyz', 'xyzabc')
|
||||
assertEqualsOneOf(s1.text.join(''), 'abcxyz', 'xyzabc')
|
||||
})
|
||||
|
||||
it('should handle text and other ops in the same change', () => {
|
||||
s1 = Automerge.change(s1, doc => {
|
||||
doc.foo = 'bar'
|
||||
doc.text.insertAt(0, 'a')
|
||||
})
|
||||
assert.strictEqual(s1.foo, 'bar')
|
||||
assert.strictEqual(s1.text.toString(), 'a')
|
||||
assert.strictEqual(s1.text.join(''), 'a')
|
||||
})
|
||||
|
||||
it('should serialize to JSON as a simple string', () => {
|
||||
s1 = Automerge.change(s1, doc => doc.text.insertAt(0, 'a', '"', 'b'))
|
||||
assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}')
|
||||
})
|
||||
|
||||
it('should allow modification before an object is assigned to a document', () => {
|
||||
s1 = Automerge.change(Automerge.init(), doc => {
|
||||
const text = new Automerge.Text()
|
||||
text.insertAt(0, 'a', 'b', 'c', 'd')
|
||||
text.deleteAt(2)
|
||||
doc.text = text
|
||||
assert.strictEqual(doc.text.toString(), 'abd')
|
||||
assert.strictEqual(doc.text.join(''), 'abd')
|
||||
})
|
||||
assert.strictEqual(s1.text.toString(), 'abd')
|
||||
assert.strictEqual(s1.text.join(''), 'abd')
|
||||
})
|
||||
|
||||
it('should allow modification after an object is assigned to a document', () => {
|
||||
s1 = Automerge.change(Automerge.init(), doc => {
|
||||
const text = new Automerge.Text()
|
||||
doc.text = text
|
||||
doc.text.insertAt(0, 'a', 'b', 'c', 'd')
|
||||
doc.text.deleteAt(2)
|
||||
assert.strictEqual(doc.text.toString(), 'abd')
|
||||
assert.strictEqual(doc.text.join(''), 'abd')
|
||||
})
|
||||
assert.strictEqual(s1.text.join(''), 'abd')
|
||||
})
|
||||
|
||||
it('should not allow modification outside of a change callback', () => {
|
||||
assert.throws(() => s1.text.insertAt(0, 'a'), /object cannot be modified outside of a change block/)
|
||||
})
|
||||
|
||||
describe('with initial value', () => {
|
||||
it('should accept a string as initial value', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => doc.text = new Automerge.Text('init'))
|
||||
assert.strictEqual(s1.text.length, 4)
|
||||
assert.strictEqual(s1.text.get(0), 'i')
|
||||
assert.strictEqual(s1.text.get(1), 'n')
|
||||
assert.strictEqual(s1.text.get(2), 'i')
|
||||
assert.strictEqual(s1.text.get(3), 't')
|
||||
assert.strictEqual(s1.text.toString(), 'init')
|
||||
})
|
||||
|
||||
it('should accept an array as initial value', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => doc.text = new Automerge.Text(['i', 'n', 'i', 't']))
|
||||
assert.strictEqual(s1.text.length, 4)
|
||||
assert.strictEqual(s1.text.get(0), 'i')
|
||||
assert.strictEqual(s1.text.get(1), 'n')
|
||||
assert.strictEqual(s1.text.get(2), 'i')
|
||||
assert.strictEqual(s1.text.get(3), 't')
|
||||
assert.strictEqual(s1.text.toString(), 'init')
|
||||
})
|
||||
|
||||
it('should initialize text in Automerge.from()', () => {
|
||||
let s1 = Automerge.from({text: new Automerge.Text('init')})
|
||||
assert.strictEqual(s1.text.length, 4)
|
||||
assert.strictEqual(s1.text.get(0), 'i')
|
||||
assert.strictEqual(s1.text.get(1), 'n')
|
||||
assert.strictEqual(s1.text.get(2), 'i')
|
||||
assert.strictEqual(s1.text.get(3), 't')
|
||||
assert.strictEqual(s1.text.toString(), 'init')
|
||||
})
|
||||
|
||||
it('should encode the initial value as a change', () => {
|
||||
const s1 = Automerge.from({text: new Automerge.Text('init')})
|
||||
const changes = Automerge.getAllChanges(s1)
|
||||
assert.strictEqual(changes.length, 1)
|
||||
const [s2] = Automerge.applyChanges(Automerge.init(), changes)
|
||||
assert.strictEqual(s2.text instanceof Automerge.Text, true)
|
||||
assert.strictEqual(s2.text.toString(), 'init')
|
||||
assert.strictEqual(s2.text.join(''), 'init')
|
||||
})
|
||||
|
||||
it('should allow immediate access to the value', () => {
|
||||
Automerge.change(Automerge.init(), doc => {
|
||||
const text = new Automerge.Text('init')
|
||||
assert.strictEqual(text.length, 4)
|
||||
assert.strictEqual(text.get(0), 'i')
|
||||
assert.strictEqual(text.toString(), 'init')
|
||||
doc.text = text
|
||||
assert.strictEqual(doc.text.length, 4)
|
||||
assert.strictEqual(doc.text.get(0), 'i')
|
||||
assert.strictEqual(doc.text.toString(), 'init')
|
||||
})
|
||||
})
|
||||
|
||||
it('should allow pre-assignment modification of the initial value', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
const text = new Automerge.Text('init')
|
||||
text.deleteAt(3)
|
||||
assert.strictEqual(text.join(''), 'ini')
|
||||
doc.text = text
|
||||
assert.strictEqual(doc.text.join(''), 'ini')
|
||||
assert.strictEqual(doc.text.toString(), 'ini')
|
||||
})
|
||||
assert.strictEqual(s1.text.toString(), 'ini')
|
||||
assert.strictEqual(s1.text.join(''), 'ini')
|
||||
})
|
||||
|
||||
it('should allow post-assignment modification of the initial value', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
const text = new Automerge.Text('init')
|
||||
doc.text = text
|
||||
doc.text.deleteAt(0)
|
||||
doc.text.insertAt(0, 'I')
|
||||
assert.strictEqual(doc.text.join(''), 'Init')
|
||||
assert.strictEqual(doc.text.toString(), 'Init')
|
||||
})
|
||||
assert.strictEqual(s1.text.join(''), 'Init')
|
||||
assert.strictEqual(s1.text.toString(), 'Init')
|
||||
})
|
||||
})
|
||||
|
||||
describe('non-textual control characters', () => {
|
||||
let s1
|
||||
beforeEach(() => {
|
||||
s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text()
|
||||
doc.text.insertAt(0, 'a')
|
||||
doc.text.insertAt(1, { attribute: 'bold' })
|
||||
})
|
||||
})
|
||||
|
||||
it('should allow fetching non-textual characters', () => {
|
||||
assert.deepEqual(s1.text.get(1), { attribute: 'bold' })
|
||||
//assert.strictEqual(s1.text.getElemId(1), `3@${Automerge.getActorId(s1)}`)
|
||||
})
|
||||
|
||||
it('should include control characters in string length', () => {
|
||||
assert.strictEqual(s1.text.length, 2)
|
||||
assert.strictEqual(s1.text.get(0), 'a')
|
||||
})
|
||||
|
||||
it('should exclude control characters from toString()', () => {
|
||||
assert.strictEqual(s1.text.toString(), 'a')
|
||||
})
|
||||
|
||||
it('should allow control characters to be updated', () => {
|
||||
const s2 = Automerge.change(s1, doc => doc.text.get(1).attribute = 'italic')
|
||||
const s3 = Automerge.load(Automerge.save(s2))
|
||||
assert.strictEqual(s1.text.get(1).attribute, 'bold')
|
||||
assert.strictEqual(s2.text.get(1).attribute, 'italic')
|
||||
assert.strictEqual(s3.text.get(1).attribute, 'italic')
|
||||
})
|
||||
|
||||
describe('spans interface to Text', () => {
|
||||
it('should return a simple string as a single span', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('hello world')
|
||||
})
|
||||
assert.deepEqual(s1.text.toSpans(), ['hello world'])
|
||||
})
|
||||
it('should return an empty string as an empty array', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text()
|
||||
})
|
||||
assert.deepEqual(s1.text.toSpans(), [])
|
||||
})
|
||||
it('should split a span at a control character', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('hello world')
|
||||
doc.text.insertAt(5, { attributes: { bold: true } })
|
||||
})
|
||||
assert.deepEqual(s1.text.toSpans(),
|
||||
['hello', { attributes: { bold: true } }, ' world'])
|
||||
})
|
||||
it('should allow consecutive control characters', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('hello world')
|
||||
doc.text.insertAt(5, { attributes: { bold: true } })
|
||||
doc.text.insertAt(6, { attributes: { italic: true } })
|
||||
})
|
||||
assert.deepEqual(s1.text.toSpans(),
|
||||
['hello',
|
||||
{ attributes: { bold: true } },
|
||||
{ attributes: { italic: true } },
|
||||
' world'
|
||||
])
|
||||
})
|
||||
it('should allow non-consecutive control characters', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('hello world')
|
||||
doc.text.insertAt(5, { attributes: { bold: true } })
|
||||
doc.text.insertAt(12, { attributes: { italic: true } })
|
||||
})
|
||||
assert.deepEqual(s1.text.toSpans(),
|
||||
['hello',
|
||||
{ attributes: { bold: true } },
|
||||
' world',
|
||||
{ attributes: { italic: true } }
|
||||
])
|
||||
})
|
||||
|
||||
it('should be convertable into a Quill delta', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Gandalf the Grey')
|
||||
doc.text.insertAt(0, { attributes: { bold: true } })
|
||||
doc.text.insertAt(7 + 1, { attributes: { bold: null } })
|
||||
doc.text.insertAt(12 + 2, { attributes: { color: '#cccccc' } })
|
||||
})
|
||||
|
||||
let deltaDoc = automergeTextToDeltaDoc(s1.text)
|
||||
|
||||
// From https://quilljs.com/docs/delta/
|
||||
let expectedDoc = [
|
||||
{ insert: 'Gandalf', attributes: { bold: true } },
|
||||
{ insert: ' the ' },
|
||||
{ insert: 'Grey', attributes: { color: '#cccccc' } }
|
||||
]
|
||||
|
||||
assert.deepEqual(deltaDoc, expectedDoc)
|
||||
})
|
||||
|
||||
it('should support embeds', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('')
|
||||
doc.text.insertAt(0, { attributes: { link: 'https://quilljs.com' } })
|
||||
doc.text.insertAt(1, {
|
||||
image: 'https://quilljs.com/assets/images/icon.png'
|
||||
})
|
||||
doc.text.insertAt(2, { attributes: { link: null } })
|
||||
})
|
||||
|
||||
let deltaDoc = automergeTextToDeltaDoc(s1.text)
|
||||
|
||||
// From https://quilljs.com/docs/delta/
|
||||
let expectedDoc = [{
|
||||
// An image link
|
||||
insert: {
|
||||
image: 'https://quilljs.com/assets/images/icon.png'
|
||||
},
|
||||
attributes: {
|
||||
link: 'https://quilljs.com'
|
||||
}
|
||||
}]
|
||||
|
||||
assert.deepEqual(deltaDoc, expectedDoc)
|
||||
})
|
||||
|
||||
it('should handle concurrent overlapping spans', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Gandalf the Grey')
|
||||
})
|
||||
|
||||
let s2 = Automerge.merge(Automerge.init(), s1)
|
||||
|
||||
let s3 = Automerge.change(s1, doc => {
|
||||
doc.text.insertAt(8, { attributes: { bold: true } })
|
||||
doc.text.insertAt(16 + 1, { attributes: { bold: null } })
|
||||
})
|
||||
|
||||
let s4 = Automerge.change(s2, doc => {
|
||||
doc.text.insertAt(0, { attributes: { bold: true } })
|
||||
doc.text.insertAt(11 + 1, { attributes: { bold: null } })
|
||||
})
|
||||
|
||||
let merged = Automerge.merge(s3, s4)
|
||||
|
||||
let deltaDoc = automergeTextToDeltaDoc(merged.text)
|
||||
|
||||
// From https://quilljs.com/docs/delta/
|
||||
let expectedDoc = [
|
||||
{ insert: 'Gandalf the Grey', attributes: { bold: true } },
|
||||
]
|
||||
|
||||
assert.deepEqual(deltaDoc, expectedDoc)
|
||||
})
|
||||
|
||||
it('should handle debolding spans', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Gandalf the Grey')
|
||||
})
|
||||
|
||||
let s2 = Automerge.merge(Automerge.init(), s1)
|
||||
|
||||
let s3 = Automerge.change(s1, doc => {
|
||||
doc.text.insertAt(0, { attributes: { bold: true } })
|
||||
doc.text.insertAt(16 + 1, { attributes: { bold: null } })
|
||||
})
|
||||
|
||||
let s4 = Automerge.change(s2, doc => {
|
||||
doc.text.insertAt(8, { attributes: { bold: null } })
|
||||
doc.text.insertAt(11 + 1, { attributes: { bold: true } })
|
||||
})
|
||||
|
||||
|
||||
let merged = Automerge.merge(s3, s4)
|
||||
|
||||
let deltaDoc = automergeTextToDeltaDoc(merged.text)
|
||||
|
||||
// From https://quilljs.com/docs/delta/
|
||||
let expectedDoc = [
|
||||
{ insert: 'Gandalf ', attributes: { bold: true } },
|
||||
{ insert: 'the' },
|
||||
{ insert: ' Grey', attributes: { bold: true } },
|
||||
]
|
||||
|
||||
assert.deepEqual(deltaDoc, expectedDoc)
|
||||
})
|
||||
|
||||
// xxx: how would this work for colors?
|
||||
it('should handle destyling across destyled spans', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Gandalf the Grey')
|
||||
})
|
||||
|
||||
let s2 = Automerge.merge(Automerge.init(), s1)
|
||||
|
||||
let s3 = Automerge.change(s1, doc => {
|
||||
doc.text.insertAt(0, { attributes: { bold: true } })
|
||||
doc.text.insertAt(16 + 1, { attributes: { bold: null } })
|
||||
})
|
||||
|
||||
let s4 = Automerge.change(s2, doc => {
|
||||
doc.text.insertAt(8, { attributes: { bold: null } })
|
||||
doc.text.insertAt(11 + 1, { attributes: { bold: true } })
|
||||
})
|
||||
|
||||
let merged = Automerge.merge(s3, s4)
|
||||
|
||||
let final = Automerge.change(merged, doc => {
|
||||
doc.text.insertAt(3 + 1, { attributes: { bold: null } })
|
||||
doc.text.insertAt(doc.text.length, { attributes: { bold: true } })
|
||||
})
|
||||
|
||||
let deltaDoc = automergeTextToDeltaDoc(final.text)
|
||||
|
||||
// From https://quilljs.com/docs/delta/
|
||||
let expectedDoc = [
|
||||
{ insert: 'Gan', attributes: { bold: true } },
|
||||
{ insert: 'dalf the Grey' },
|
||||
]
|
||||
|
||||
assert.deepEqual(deltaDoc, expectedDoc)
|
||||
})
|
||||
|
||||
it('should apply an insert', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Hello world')
|
||||
})
|
||||
|
||||
const delta = [
|
||||
{ retain: 6 },
|
||||
{ insert: 'reader' },
|
||||
{ delete: 5 }
|
||||
]
|
||||
|
||||
let s2 = Automerge.change(s1, doc => {
|
||||
applyDeltaDocToAutomergeText(delta, doc)
|
||||
})
|
||||
|
||||
assert.strictEqual(s2.text.join(''), 'Hello reader')
|
||||
})
|
||||
|
||||
it('should apply an insert with control characters', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Hello world')
|
||||
})
|
||||
|
||||
const delta = [
|
||||
{ retain: 6 },
|
||||
{ insert: 'reader', attributes: { bold: true } },
|
||||
{ delete: 5 },
|
||||
{ insert: '!' }
|
||||
]
|
||||
|
||||
let s2 = Automerge.change(s1, doc => {
|
||||
applyDeltaDocToAutomergeText(delta, doc)
|
||||
})
|
||||
|
||||
assert.strictEqual(s2.text.toString(), 'Hello reader!')
|
||||
assert.deepEqual(s2.text.toSpans(), [
|
||||
"Hello ",
|
||||
{ attributes: { bold: true } },
|
||||
"reader",
|
||||
{ attributes: { bold: null } },
|
||||
"!"
|
||||
])
|
||||
})
|
||||
|
||||
it('should account for control characters in retain/delete lengths', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('Hello world')
|
||||
doc.text.insertAt(4, { attributes: { color: '#ccc' } })
|
||||
doc.text.insertAt(10, { attributes: { color: '#f00' } })
|
||||
})
|
||||
|
||||
const delta = [
|
||||
{ retain: 6 },
|
||||
{ insert: 'reader', attributes: { bold: true } },
|
||||
{ delete: 5 },
|
||||
{ insert: '!' }
|
||||
]
|
||||
|
||||
let s2 = Automerge.change(s1, doc => {
|
||||
applyDeltaDocToAutomergeText(delta, doc)
|
||||
})
|
||||
|
||||
assert.strictEqual(s2.text.toString(), 'Hello reader!')
|
||||
assert.deepEqual(s2.text.toSpans(), [
|
||||
"Hell",
|
||||
{ attributes: { color: '#ccc'} },
|
||||
"o ",
|
||||
{ attributes: { bold: true } },
|
||||
"reader",
|
||||
{ attributes: { bold: null } },
|
||||
{ attributes: { color: '#f00'} },
|
||||
"!"
|
||||
])
|
||||
})
|
||||
|
||||
it('should support embeds', () => {
|
||||
let s1 = Automerge.change(Automerge.init(), doc => {
|
||||
doc.text = new Automerge.Text('')
|
||||
})
|
||||
|
||||
let deltaDoc = [{
|
||||
// An image link
|
||||
insert: {
|
||||
image: 'https://quilljs.com/assets/images/icon.png'
|
||||
},
|
||||
attributes: {
|
||||
link: 'https://quilljs.com'
|
||||
}
|
||||
}]
|
||||
|
||||
let s2 = Automerge.change(s1, doc => {
|
||||
applyDeltaDocToAutomergeText(deltaDoc, doc)
|
||||
})
|
||||
|
||||
assert.deepEqual(s2.text.toSpans(), [
|
||||
{ attributes: { link: 'https://quilljs.com' } },
|
||||
{ image: 'https://quilljs.com/assets/images/icon.png'},
|
||||
{ attributes: { link: null } },
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should support unicode when creating text', () => {
|
||||
s1 = Automerge.from({
|
||||
text: new Automerge.Text('🐦')
|
||||
})
|
||||
assert.strictEqual(s1.text.get(0), '🐦')
|
||||
})
|
||||
})
|
|
@ -1,32 +0,0 @@
|
|||
const assert = require('assert')
|
||||
const Automerge = require('..')
|
||||
|
||||
const uuid = Automerge.uuid
|
||||
|
||||
describe('uuid', () => {
|
||||
afterEach(() => {
|
||||
uuid.reset()
|
||||
})
|
||||
|
||||
describe('default implementation', () => {
|
||||
it('generates unique values', () => {
|
||||
assert.notEqual(uuid(), uuid())
|
||||
})
|
||||
})
|
||||
|
||||
describe('custom implementation', () => {
|
||||
let counter
|
||||
|
||||
function customUuid() {
|
||||
return `custom-uuid-${counter++}`
|
||||
}
|
||||
|
||||
before(() => uuid.setFactory(customUuid))
|
||||
beforeEach(() => counter = 0)
|
||||
|
||||
it('invokes the custom factory', () => {
|
||||
assert.equal(uuid(), 'custom-uuid-0')
|
||||
assert.equal(uuid(), 'custom-uuid-1')
|
||||
})
|
||||
})
|
||||
})
|
|
@ -1,30 +0,0 @@
|
|||
{
|
||||
"collaborators": [
|
||||
"Orion Henry <orion@inkandswitch.com>",
|
||||
"Alex Good <alex@memoryandthought.me>",
|
||||
"Martin Kleppmann"
|
||||
],
|
||||
"name": "automerge-wasm",
|
||||
"description": "wasm-bindgen bindings to the automerge rust implementation",
|
||||
"version": "0.1.0",
|
||||
"license": "MIT",
|
||||
"files": [
|
||||
"README.md",
|
||||
"LICENSE",
|
||||
"package.json",
|
||||
"automerge_wasm_bg.wasm",
|
||||
"automerge_wasm.js"
|
||||
],
|
||||
"main": "./dev/index.js",
|
||||
"scripts": {
|
||||
"build": "rm -rf dev && wasm-pack build --target nodejs --dev --out-name index -d dev",
|
||||
"release": "rm -rf dev && wasm-pack build --target nodejs --release --out-name index -d dev && yarn opt",
|
||||
"prof": "rm -rf dev && wasm-pack build --target nodejs --profiling --out-name index -d dev",
|
||||
"opt": "wasm-opt -Oz dev/index_bg.wasm -o tmp.wasm && mv tmp.wasm dev/index_bg.wasm",
|
||||
"test": "yarn build && mocha --bail --full-trace"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"mocha": "^9.1.3"
|
||||
}
|
||||
}
|
|
@ -1,822 +0,0 @@
|
|||
extern crate web_sys;
|
||||
use automerge as am;
|
||||
use automerge::{Change, ChangeHash, Prop, Value};
|
||||
use js_sys::{Array, Object, Reflect, Uint8Array};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryFrom;
|
||||
use std::convert::TryInto;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use wasm_bindgen::prelude::*;
|
||||
use wasm_bindgen::JsCast;
|
||||
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! log {
|
||||
( $( $t:tt )* ) => {
|
||||
web_sys::console::log_1(&format!( $( $t )* ).into());
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "wee_alloc")]
|
||||
#[global_allocator]
|
||||
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
|
||||
|
||||
fn datatype(s: &am::ScalarValue) -> String {
|
||||
match s {
|
||||
am::ScalarValue::Bytes(_) => "bytes".into(),
|
||||
am::ScalarValue::Str(_) => "str".into(),
|
||||
am::ScalarValue::Int(_) => "int".into(),
|
||||
am::ScalarValue::Uint(_) => "uint".into(),
|
||||
am::ScalarValue::F64(_) => "f64".into(),
|
||||
am::ScalarValue::Counter(_) => "counter".into(),
|
||||
am::ScalarValue::Timestamp(_) => "timestamp".into(),
|
||||
am::ScalarValue::Boolean(_) => "boolean".into(),
|
||||
am::ScalarValue::Null => "null".into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ScalarValue(am::ScalarValue);
|
||||
|
||||
impl From<ScalarValue> for JsValue {
|
||||
fn from(val: ScalarValue) -> Self {
|
||||
match &val.0 {
|
||||
am::ScalarValue::Bytes(v) => Uint8Array::from(v.as_slice()).into(),
|
||||
am::ScalarValue::Str(v) => v.to_string().into(),
|
||||
am::ScalarValue::Int(v) => (*v as f64).into(),
|
||||
am::ScalarValue::Uint(v) => (*v as f64).into(),
|
||||
am::ScalarValue::F64(v) => (*v).into(),
|
||||
am::ScalarValue::Counter(v) => (*v as f64).into(),
|
||||
am::ScalarValue::Timestamp(v) => (*v as f64).into(),
|
||||
am::ScalarValue::Boolean(v) => (*v).into(),
|
||||
am::ScalarValue::Null => JsValue::null(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
#[derive(Debug)]
|
||||
pub struct Automerge(automerge::Automerge);
|
||||
|
||||
#[wasm_bindgen]
|
||||
#[derive(Debug)]
|
||||
pub struct SyncState(am::SyncState);
|
||||
|
||||
#[wasm_bindgen]
|
||||
impl SyncState {
|
||||
#[wasm_bindgen(getter, js_name = sharedHeads)]
|
||||
pub fn shared_heads(&self) -> JsValue {
|
||||
rust_to_js(&self.0.shared_heads).unwrap()
|
||||
}
|
||||
|
||||
#[wasm_bindgen(getter, js_name = lastSentHeads)]
|
||||
pub fn last_sent_heads(&self) -> JsValue {
|
||||
rust_to_js(self.0.last_sent_heads.as_ref()).unwrap()
|
||||
}
|
||||
|
||||
#[wasm_bindgen(setter, js_name = lastSentHeads)]
|
||||
pub fn set_last_sent_heads(&mut self, heads: JsValue) {
|
||||
let heads: Option<Vec<ChangeHash>> = js_to_rust(&heads).unwrap();
|
||||
self.0.last_sent_heads = heads
|
||||
}
|
||||
|
||||
#[wasm_bindgen(setter, js_name = sentHashes)]
|
||||
pub fn set_sent_hashes(&mut self, hashes: JsValue) {
|
||||
let hashes_map: HashMap<ChangeHash, bool> = js_to_rust(&hashes).unwrap();
|
||||
let hashes_set: HashSet<ChangeHash> = hashes_map.keys().cloned().collect();
|
||||
self.0.sent_hashes = hashes_set
|
||||
}
|
||||
|
||||
fn decode(data: Uint8Array) -> Result<SyncState, JsValue> {
|
||||
let data = data.to_vec();
|
||||
let s = am::SyncState::decode(&data);
|
||||
let s = s.map_err(to_js_err)?;
|
||||
Ok(SyncState(s))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct JsErr(String);
|
||||
|
||||
impl From<JsErr> for JsValue {
|
||||
fn from(err: JsErr) -> Self {
|
||||
js_sys::Error::new(&std::format!("{}", err.0)).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for JsErr {
|
||||
fn from(s: &'a str) -> Self {
|
||||
JsErr(s.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
impl Automerge {
|
||||
pub fn new(actor: JsValue) -> Result<Automerge, JsValue> {
|
||||
let mut automerge = automerge::Automerge::new();
|
||||
if let Some(a) = actor.as_string() {
|
||||
let a = automerge::ActorId::from(hex::decode(a).map_err(to_js_err)?.to_vec());
|
||||
automerge.set_actor(a);
|
||||
}
|
||||
Ok(Automerge(automerge))
|
||||
}
|
||||
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn clone(&self) -> Self {
|
||||
Automerge(self.0.clone())
|
||||
}
|
||||
|
||||
pub fn free(self) {}
|
||||
|
||||
pub fn pending_ops(&self) -> JsValue {
|
||||
(self.0.pending_ops() as u32).into()
|
||||
}
|
||||
|
||||
pub fn commit(&mut self, message: JsValue, time: JsValue) -> Array {
|
||||
let message = message.as_string();
|
||||
let time = time.as_f64().map(|v| v as i64);
|
||||
let heads = self.0.commit(message, time);
|
||||
let heads: Array = heads
|
||||
.iter()
|
||||
.map(|h| JsValue::from_str(&hex::encode(&h.0)))
|
||||
.collect();
|
||||
heads
|
||||
}
|
||||
|
||||
pub fn rollback(&mut self) -> JsValue {
|
||||
self.0.rollback().into()
|
||||
}
|
||||
|
||||
pub fn keys(&mut self, obj: JsValue, heads: JsValue) -> Result<Array, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let result = if let Some(heads) = get_heads(heads) {
|
||||
self.0.keys_at(obj, &heads)
|
||||
} else {
|
||||
self.0.keys(obj)
|
||||
}
|
||||
.iter()
|
||||
.map(|s| JsValue::from_str(s))
|
||||
.collect();
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn text(&mut self, obj: JsValue, heads: JsValue) -> Result<JsValue, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
if let Some(heads) = get_heads(heads) {
|
||||
self.0.text_at(obj, &heads)
|
||||
} else {
|
||||
self.0.text(obj)
|
||||
}
|
||||
.map_err(to_js_err)
|
||||
.map(|t| t.into())
|
||||
}
|
||||
|
||||
pub fn splice(
|
||||
&mut self,
|
||||
obj: JsValue,
|
||||
start: JsValue,
|
||||
delete_count: JsValue,
|
||||
text: JsValue,
|
||||
) -> Result<(), JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let start = to_usize(start, "start")?;
|
||||
let delete_count = to_usize(delete_count, "deleteCount")?;
|
||||
let mut vals = vec![];
|
||||
if let Some(t) = text.as_string() {
|
||||
self.0
|
||||
.splice_text(obj, start, delete_count, &t)
|
||||
.map_err(to_js_err)?;
|
||||
} else {
|
||||
if let Ok(array) = text.dyn_into::<Array>() {
|
||||
for i in array.iter() {
|
||||
if let Some(t) = i.as_string() {
|
||||
vals.push(t.into());
|
||||
} else if let Ok(array) = i.dyn_into::<Array>() {
|
||||
let value = array.get(1);
|
||||
let datatype = array.get(2);
|
||||
let value = self.import_value(value, datatype)?;
|
||||
vals.push(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.0
|
||||
.splice(obj, start, delete_count, vals)
|
||||
.map_err(to_js_err)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
obj: JsValue,
|
||||
index: JsValue,
|
||||
value: JsValue,
|
||||
datatype: JsValue,
|
||||
) -> Result<JsValue, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
//let key = self.insert_pos_for_index(&obj, prop)?;
|
||||
let index: Result<_, JsValue> = index
|
||||
.as_f64()
|
||||
.ok_or_else(|| "insert index must be a number".into());
|
||||
let index = index?;
|
||||
let value = self.import_value(value, datatype)?;
|
||||
let opid = self
|
||||
.0
|
||||
.insert(obj, index as usize, value)
|
||||
.map_err(to_js_err)?;
|
||||
Ok(self.export(opid))
|
||||
}
|
||||
|
||||
pub fn set(
|
||||
&mut self,
|
||||
obj: JsValue,
|
||||
prop: JsValue,
|
||||
value: JsValue,
|
||||
datatype: JsValue,
|
||||
) -> Result<JsValue, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let prop = self.import_prop(prop)?;
|
||||
let value = self.import_value(value, datatype)?;
|
||||
let opid = self.0.set(obj, prop, value).map_err(to_js_err)?;
|
||||
match opid {
|
||||
Some(opid) => Ok(self.export(opid)),
|
||||
None => Ok(JsValue::null()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc(&mut self, obj: JsValue, prop: JsValue, value: JsValue) -> Result<(), JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let prop = self.import_prop(prop)?;
|
||||
let value: f64 = value
|
||||
.as_f64()
|
||||
.ok_or("inc needs a numberic value")
|
||||
.map_err(to_js_err)?;
|
||||
self.0.inc(obj, prop, value as i64).map_err(to_js_err)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn value(&mut self, obj: JsValue, prop: JsValue, heads: JsValue) -> Result<Array, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let result = Array::new();
|
||||
let prop = to_prop(prop);
|
||||
let heads = get_heads(heads);
|
||||
if let Ok(prop) = prop {
|
||||
let value = if let Some(h) = heads {
|
||||
self.0.value_at(obj, prop, &h)
|
||||
} else {
|
||||
self.0.value(obj, prop)
|
||||
}
|
||||
.map_err(to_js_err)?;
|
||||
match value {
|
||||
Some((Value::Object(obj_type), obj_id)) => {
|
||||
result.push(&obj_type.to_string().into());
|
||||
result.push(&self.export(obj_id));
|
||||
}
|
||||
Some((Value::Scalar(value), _)) => {
|
||||
result.push(&datatype(&value).into());
|
||||
result.push(&ScalarValue(value).into());
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn values(&mut self, obj: JsValue, arg: JsValue, heads: JsValue) -> Result<Array, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let result = Array::new();
|
||||
let prop = to_prop(arg);
|
||||
if let Ok(prop) = prop {
|
||||
let values = if let Some(heads) = get_heads(heads) {
|
||||
self.0.values_at(obj, prop, &heads)
|
||||
} else {
|
||||
self.0.values(obj, prop)
|
||||
}
|
||||
.map_err(to_js_err)?;
|
||||
for value in values {
|
||||
match value {
|
||||
(Value::Object(obj_type), obj_id) => {
|
||||
let sub = Array::new();
|
||||
sub.push(&obj_type.to_string().into());
|
||||
sub.push(&self.export(obj_id));
|
||||
result.push(&sub.into());
|
||||
}
|
||||
(Value::Scalar(value), id) => {
|
||||
let sub = Array::new();
|
||||
sub.push(&datatype(&value).into());
|
||||
sub.push(&ScalarValue(value).into());
|
||||
sub.push(&self.export(id));
|
||||
result.push(&sub.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn length(&mut self, obj: JsValue, heads: JsValue) -> Result<JsValue, JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
if let Some(heads) = get_heads(heads) {
|
||||
Ok((self.0.length_at(obj, &heads) as f64).into())
|
||||
} else {
|
||||
Ok((self.0.length(obj) as f64).into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn del(&mut self, obj: JsValue, prop: JsValue) -> Result<(), JsValue> {
|
||||
let obj: automerge::ObjId = self.import(obj)?;
|
||||
let prop = to_prop(prop)?;
|
||||
self.0.del(obj, prop).map_err(to_js_err)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn save(&mut self) -> Result<Uint8Array, JsValue> {
|
||||
self.0
|
||||
.save()
|
||||
.map(|v| Uint8Array::from(v.as_slice()))
|
||||
.map_err(to_js_err)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = saveIncremental)]
|
||||
pub fn save_incremental(&mut self) -> JsValue {
|
||||
let bytes = self.0.save_incremental();
|
||||
Uint8Array::from(bytes.as_slice()).into()
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = loadIncremental)]
|
||||
pub fn load_incremental(&mut self, data: Uint8Array) -> Result<JsValue, JsValue> {
|
||||
let data = data.to_vec();
|
||||
let len = self.0.load_incremental(&data).map_err(to_js_err)?;
|
||||
Ok(len.into())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = applyChanges)]
|
||||
pub fn apply_changes(&mut self, changes: JsValue) -> Result<(), JsValue> {
|
||||
let changes: Vec<_> = JS(changes).try_into()?;
|
||||
self.0.apply_changes(&changes).map_err(to_js_err)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getChanges)]
|
||||
pub fn get_changes(&mut self, have_deps: JsValue) -> Result<Array, JsValue> {
|
||||
let deps: Vec<_> = JS(have_deps).try_into()?;
|
||||
let changes = self.0.get_changes(&deps);
|
||||
let changes: Array = changes
|
||||
.iter()
|
||||
.map(|c| Uint8Array::from(c.raw_bytes()))
|
||||
.collect();
|
||||
Ok(changes)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getChangesAdded)]
|
||||
pub fn get_changes_added(&mut self, other: &Automerge) -> Result<Array, JsValue> {
|
||||
let changes = self.0.get_changes_added(&other.0);
|
||||
let changes: Array = changes
|
||||
.iter()
|
||||
.map(|c| Uint8Array::from(c.raw_bytes()))
|
||||
.collect();
|
||||
Ok(changes)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getHeads)]
|
||||
pub fn get_heads(&mut self) -> Result<Array, JsValue> {
|
||||
let heads = self.0.get_heads();
|
||||
let heads: Array = heads
|
||||
.iter()
|
||||
.map(|h| JsValue::from_str(&hex::encode(&h.0)))
|
||||
.collect();
|
||||
Ok(heads)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getActorId)]
|
||||
pub fn get_actor_id(&mut self) -> Result<JsValue, JsValue> {
|
||||
let actor = self.0.get_actor();
|
||||
Ok(actor.to_string().into())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getLastLocalChange)]
|
||||
pub fn get_last_local_change(&mut self) -> Result<JsValue, JsValue> {
|
||||
if let Some(change) = self.0.get_last_local_change() {
|
||||
Ok(Uint8Array::from(change.raw_bytes()).into())
|
||||
} else {
|
||||
Ok(JsValue::null())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dump(&self) {
|
||||
self.0.dump()
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = getMissingDeps)]
|
||||
pub fn get_missing_deps(&mut self, heads: JsValue) -> Result<Array, JsValue> {
|
||||
let heads: Vec<_> = JS(heads).try_into()?;
|
||||
let deps = self.0.get_missing_deps(&heads);
|
||||
let deps: Array = deps
|
||||
.iter()
|
||||
.map(|h| JsValue::from_str(&hex::encode(&h.0)))
|
||||
.collect();
|
||||
Ok(deps)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = receiveSyncMessage)]
|
||||
pub fn receive_sync_message(
|
||||
&mut self,
|
||||
state: &mut SyncState,
|
||||
message: Uint8Array,
|
||||
) -> Result<(), JsValue> {
|
||||
let message = message.to_vec();
|
||||
let message = am::SyncMessage::decode(message.as_slice()).map_err(to_js_err)?;
|
||||
self.0
|
||||
.receive_sync_message(&mut state.0, message)
|
||||
.map_err(to_js_err)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = generateSyncMessage)]
|
||||
pub fn generate_sync_message(&mut self, state: &mut SyncState) -> Result<JsValue, JsValue> {
|
||||
if let Some(message) = self.0.generate_sync_message(&mut state.0) {
|
||||
Ok(Uint8Array::from(message.encode().map_err(to_js_err)?.as_slice()).into())
|
||||
} else {
|
||||
Ok(JsValue::null())
|
||||
}
|
||||
}
|
||||
|
||||
fn export<D: std::fmt::Display>(&self, val: D) -> JsValue {
|
||||
val.to_string().into()
|
||||
}
|
||||
|
||||
fn import<F: FromStr>(&self, id: JsValue) -> Result<F, JsValue>
|
||||
where F::Err: std::fmt::Display
|
||||
{
|
||||
id
|
||||
.as_string()
|
||||
.ok_or("invalid opid/objid/elemid")?
|
||||
.parse::<F>()
|
||||
.map_err(to_js_err)
|
||||
}
|
||||
|
||||
fn import_prop(&mut self, prop: JsValue) -> Result<Prop, JsValue> {
|
||||
if let Some(s) = prop.as_string() {
|
||||
Ok(s.into())
|
||||
} else if let Some(n) = prop.as_f64() {
|
||||
Ok((n as usize).into())
|
||||
} else {
|
||||
Err(format!("invalid prop {:?}", prop).into())
|
||||
}
|
||||
}
|
||||
|
||||
fn import_value(&mut self, value: JsValue, datatype: JsValue) -> Result<Value, JsValue> {
|
||||
let datatype = datatype.as_string();
|
||||
match datatype.as_deref() {
|
||||
Some("boolean") => value
|
||||
.as_bool()
|
||||
.ok_or_else(|| "value must be a bool".into())
|
||||
.map(|v| am::ScalarValue::Boolean(v).into()),
|
||||
Some("int") => value
|
||||
.as_f64()
|
||||
.ok_or_else(|| "value must be a number".into())
|
||||
.map(|v| am::ScalarValue::Int(v as i64).into()),
|
||||
Some("uint") => value
|
||||
.as_f64()
|
||||
.ok_or_else(|| "value must be a number".into())
|
||||
.map(|v| am::ScalarValue::Uint(v as u64).into()),
|
||||
Some("f64") => value
|
||||
.as_f64()
|
||||
.ok_or_else(|| "value must be a number".into())
|
||||
.map(|n| am::ScalarValue::F64(n).into()),
|
||||
Some("bytes") => {
|
||||
Ok(am::ScalarValue::Bytes(value.dyn_into::<Uint8Array>().unwrap().to_vec()).into())
|
||||
}
|
||||
Some("counter") => value
|
||||
.as_f64()
|
||||
.ok_or_else(|| "value must be a number".into())
|
||||
.map(|v| am::ScalarValue::Counter(v as i64).into()),
|
||||
Some("timestamp") => value
|
||||
.as_f64()
|
||||
.ok_or_else(|| "value must be a number".into())
|
||||
.map(|v| am::ScalarValue::Timestamp(v as i64).into()),
|
||||
/*
|
||||
Some("bytes") => unimplemented!(),
|
||||
Some("cursor") => unimplemented!(),
|
||||
*/
|
||||
Some("null") => Ok(am::ScalarValue::Null.into()),
|
||||
Some(_) => Err(format!("unknown datatype {:?}", datatype).into()),
|
||||
None => {
|
||||
if value.is_null() {
|
||||
Ok(am::ScalarValue::Null.into())
|
||||
} else if let Some(b) = value.as_bool() {
|
||||
Ok(am::ScalarValue::Boolean(b).into())
|
||||
} else if let Some(s) = value.as_string() {
|
||||
// FIXME - we need to detect str vs int vs float vs bool here :/
|
||||
Ok(am::ScalarValue::Str(s.into()).into())
|
||||
} else if let Some(n) = value.as_f64() {
|
||||
if (n.round() - n).abs() < f64::EPSILON {
|
||||
Ok(am::ScalarValue::Int(n as i64).into())
|
||||
} else {
|
||||
Ok(am::ScalarValue::F64(n).into())
|
||||
}
|
||||
} else if let Some(o) = to_objtype(&value) {
|
||||
Ok(o.into())
|
||||
} else if let Ok(o) = &value.dyn_into::<Uint8Array>() {
|
||||
Ok(am::ScalarValue::Bytes(o.to_vec()).into())
|
||||
} else {
|
||||
Err("value is invalid".into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_usize(val: JsValue, name: &str) -> Result<usize, JsValue> {
|
||||
match val.as_f64() {
|
||||
Some(n) => Ok(n as usize),
|
||||
None => Err(format!("{} must be a number", name).into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_prop(p: JsValue) -> Result<Prop, JsValue> {
|
||||
if let Some(s) = p.as_string() {
|
||||
Ok(Prop::Map(s))
|
||||
} else if let Some(n) = p.as_f64() {
|
||||
Ok(Prop::Seq(n as usize))
|
||||
} else {
|
||||
Err("prop must me a string or number".into())
|
||||
}
|
||||
}
|
||||
|
||||
fn to_objtype(a: &JsValue) -> Option<am::ObjType> {
|
||||
if !a.is_function() {
|
||||
return None;
|
||||
}
|
||||
let f: js_sys::Function = a.clone().try_into().unwrap();
|
||||
let f = f.to_string();
|
||||
if f.starts_with("class MAP", 0) {
|
||||
Some(am::ObjType::Map)
|
||||
} else if f.starts_with("class LIST", 0) {
|
||||
Some(am::ObjType::List)
|
||||
} else if f.starts_with("class TEXT", 0) {
|
||||
Some(am::ObjType::Text)
|
||||
} else if f.starts_with("class TABLE", 0) {
|
||||
Some(am::ObjType::Table)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct ObjType(am::ObjType);
|
||||
|
||||
impl TryFrom<JsValue> for ObjType {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(val: JsValue) -> Result<Self, Self::Error> {
|
||||
match &val.as_string() {
|
||||
Some(o) if o == "map" => Ok(ObjType(am::ObjType::Map)),
|
||||
Some(o) if o == "list" => Ok(ObjType(am::ObjType::List)),
|
||||
Some(o) => Err(format!("unknown obj type {}", o).into()),
|
||||
_ => Err("obj type must be a string".into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn init(actor: JsValue) -> Result<Automerge, JsValue> {
|
||||
console_error_panic_hook::set_once();
|
||||
Automerge::new(actor)
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn load(data: Uint8Array, actor: JsValue) -> Result<Automerge, JsValue> {
|
||||
let data = data.to_vec();
|
||||
let mut automerge = am::Automerge::load(&data).map_err(to_js_err)?;
|
||||
if let Some(s) = actor.as_string() {
|
||||
let actor = automerge::ActorId::from(hex::decode(s).map_err(to_js_err)?.to_vec());
|
||||
automerge.set_actor(actor)
|
||||
}
|
||||
Ok(Automerge(automerge))
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = encodeChange)]
|
||||
pub fn encode_change(change: JsValue) -> Result<Uint8Array, JsValue> {
|
||||
let change: am::ExpandedChange = change.into_serde().map_err(to_js_err)?;
|
||||
let change: Change = change.into();
|
||||
Ok(Uint8Array::from(change.raw_bytes()))
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = decodeChange)]
|
||||
pub fn decode_change(change: Uint8Array) -> Result<JsValue, JsValue> {
|
||||
let change = Change::from_bytes(change.to_vec()).map_err(to_js_err)?;
|
||||
let change: am::ExpandedChange = change.decode();
|
||||
JsValue::from_serde(&change).map_err(to_js_err)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = initSyncState)]
|
||||
pub fn init_sync_state() -> SyncState {
|
||||
SyncState(Default::default())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = encodeSyncMessage)]
|
||||
pub fn encode_sync_message(message: JsValue) -> Result<Uint8Array, JsValue> {
|
||||
let heads = get(&message, "heads")?.try_into()?;
|
||||
let need = get(&message, "need")?.try_into()?;
|
||||
let changes = get(&message, "changes")?.try_into()?;
|
||||
let have = get(&message, "have")?.try_into()?;
|
||||
Ok(Uint8Array::from(
|
||||
am::SyncMessage {
|
||||
heads,
|
||||
need,
|
||||
have,
|
||||
changes,
|
||||
}
|
||||
.encode()
|
||||
.unwrap()
|
||||
.as_slice(),
|
||||
))
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = decodeSyncMessage)]
|
||||
pub fn decode_sync_message(msg: Uint8Array) -> Result<JsValue, JsValue> {
|
||||
let data = msg.to_vec();
|
||||
let msg = am::SyncMessage::decode(&data).map_err(to_js_err)?;
|
||||
let heads: Array = VH(&msg.heads).into();
|
||||
let need: Array = VH(&msg.need).into();
|
||||
let changes: Array = VC(&msg.changes).into();
|
||||
let have: Array = VSH(&msg.have).try_into()?;
|
||||
let obj = Object::new().into();
|
||||
set(&obj, "heads", heads)?;
|
||||
set(&obj, "need", need)?;
|
||||
set(&obj, "have", have)?;
|
||||
set(&obj, "changes", changes)?;
|
||||
Ok(obj)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = encodeSyncState)]
|
||||
pub fn encode_sync_state(state: SyncState) -> Result<Uint8Array, JsValue> {
|
||||
Ok(Uint8Array::from(
|
||||
state.0.encode().map_err(to_js_err)?.as_slice(),
|
||||
))
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = decodeSyncState)]
|
||||
pub fn decode_sync_state(state: Uint8Array) -> Result<SyncState, JsValue> {
|
||||
SyncState::decode(state)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = MAP)]
|
||||
pub struct Map {}
|
||||
|
||||
#[wasm_bindgen(js_name = LIST)]
|
||||
pub struct List {}
|
||||
|
||||
#[wasm_bindgen(js_name = TEXT)]
|
||||
pub struct Text {}
|
||||
|
||||
#[wasm_bindgen(js_name = TABLE)]
|
||||
pub struct Table {}
|
||||
|
||||
fn to_js_err<T: Display>(err: T) -> JsValue {
|
||||
js_sys::Error::new(&std::format!("{}", err)).into()
|
||||
}
|
||||
|
||||
fn get(obj: &JsValue, prop: &str) -> Result<JS, JsValue> {
|
||||
Ok(JS(Reflect::get(obj, &prop.into())?))
|
||||
}
|
||||
|
||||
fn set<V: Into<JsValue>>(obj: &JsValue, prop: &str, val: V) -> Result<bool, JsValue> {
|
||||
Reflect::set(obj, &prop.into(), &val.into())
|
||||
}
|
||||
|
||||
struct JS(JsValue);
|
||||
|
||||
impl TryFrom<JS> for Vec<ChangeHash> {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(value: JS) -> Result<Self, Self::Error> {
|
||||
let value = value.0.dyn_into::<Array>()?;
|
||||
let value: Result<Vec<ChangeHash>, _> = value.iter().map(|j| j.into_serde()).collect();
|
||||
let value = value.map_err(to_js_err)?;
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JS> for Option<Vec<ChangeHash>> {
|
||||
fn from(value: JS) -> Self {
|
||||
let value = value.0.dyn_into::<Array>().ok()?;
|
||||
let value: Result<Vec<ChangeHash>, _> = value.iter().map(|j| j.into_serde()).collect();
|
||||
let value = value.ok()?;
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<JS> for Vec<Change> {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(value: JS) -> Result<Self, Self::Error> {
|
||||
let value = value.0.dyn_into::<Array>()?;
|
||||
let changes: Result<Vec<Uint8Array>, _> = value.iter().map(|j| j.dyn_into()).collect();
|
||||
let changes = changes?;
|
||||
let changes: Result<Vec<Change>, _> = changes
|
||||
.iter()
|
||||
.map(|a| am::decode_change(a.to_vec()))
|
||||
.collect();
|
||||
let changes = changes.map_err(to_js_err)?;
|
||||
Ok(changes)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<JS> for Vec<am::SyncHave> {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(value: JS) -> Result<Self, Self::Error> {
|
||||
let value = value.0.dyn_into::<Array>()?;
|
||||
let have: Result<Vec<am::SyncHave>, JsValue> = value
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let last_sync = get(&s, "lastSync")?.try_into()?;
|
||||
let bloom = get(&s, "bloom")?.try_into()?;
|
||||
Ok(am::SyncHave { last_sync, bloom })
|
||||
})
|
||||
.collect();
|
||||
let have = have?;
|
||||
Ok(have)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<JS> for am::BloomFilter {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(value: JS) -> Result<Self, Self::Error> {
|
||||
let value: Uint8Array = value.0.dyn_into()?;
|
||||
let value = value.to_vec();
|
||||
let value = value.as_slice().try_into().map_err(to_js_err)?;
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
|
||||
struct VH<'a>(&'a [ChangeHash]);
|
||||
|
||||
impl<'a> From<VH<'a>> for Array {
|
||||
fn from(value: VH<'a>) -> Self {
|
||||
let heads: Array = value
|
||||
.0
|
||||
.iter()
|
||||
.map(|h| JsValue::from_str(&hex::encode(&h.0)))
|
||||
.collect();
|
||||
heads
|
||||
}
|
||||
}
|
||||
|
||||
struct VC<'a>(&'a [Change]);
|
||||
|
||||
impl<'a> From<VC<'a>> for Array {
|
||||
fn from(value: VC<'a>) -> Self {
|
||||
let changes: Array = value
|
||||
.0
|
||||
.iter()
|
||||
.map(|c| Uint8Array::from(c.raw_bytes()))
|
||||
.collect();
|
||||
changes
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
struct VSH<'a>(&'a [am::SyncHave]);
|
||||
|
||||
impl<'a> TryFrom<VSH<'a>> for Array {
|
||||
type Error = JsValue;
|
||||
|
||||
fn try_from(value: VSH<'a>) -> Result<Self, Self::Error> {
|
||||
let have: Result<Array, JsValue> = value
|
||||
.0
|
||||
.iter()
|
||||
.map(|have| {
|
||||
let last_sync: Array = have
|
||||
.last_sync
|
||||
.iter()
|
||||
.map(|h| JsValue::from_str(&hex::encode(&h.0)))
|
||||
.collect();
|
||||
// FIXME - the clone and the unwrap here shouldnt be needed - look at into_bytes()
|
||||
let bloom = Uint8Array::from(have.bloom.clone().into_bytes().unwrap().as_slice());
|
||||
let obj: JsValue = Object::new().into();
|
||||
Reflect::set(&obj, &"lastSync".into(), &last_sync.into())?;
|
||||
Reflect::set(&obj, &"bloom".into(), &bloom.into())?;
|
||||
Ok(obj)
|
||||
})
|
||||
.collect();
|
||||
let have = have?;
|
||||
Ok(have)
|
||||
}
|
||||
}
|
||||
|
||||
fn rust_to_js<T: Serialize>(value: T) -> Result<JsValue, JsValue> {
|
||||
JsValue::from_serde(&value).map_err(to_js_err)
|
||||
}
|
||||
|
||||
fn js_to_rust<T: DeserializeOwned>(value: &JsValue) -> Result<T, JsValue> {
|
||||
value.into_serde().map_err(to_js_err)
|
||||
}
|
||||
|
||||
fn get_heads(heads: JsValue) -> Option<Vec<ChangeHash>> {
|
||||
JS(heads).into()
|
||||
}
|
|
@ -1,284 +0,0 @@
|
|||
|
||||
const assert = require('assert')
|
||||
const util = require('util')
|
||||
const Automerge = require('..')
|
||||
const { MAP, LIST, TEXT } = Automerge
|
||||
|
||||
// str to uint8array
|
||||
function en(str) {
|
||||
return new TextEncoder('utf8').encode(str)
|
||||
}
|
||||
// uint8array to str
|
||||
function de(bytes) {
|
||||
return new TextDecoder('utf8').decode(bytes);
|
||||
}
|
||||
|
||||
describe('Automerge', () => {
|
||||
describe('basics', () => {
|
||||
it('should init clone and free', () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = doc1.clone()
|
||||
doc1.free()
|
||||
doc2.free()
|
||||
})
|
||||
|
||||
it('should be able to start and commit', () => {
|
||||
let doc = Automerge.init()
|
||||
doc.commit()
|
||||
})
|
||||
|
||||
it('getting a nonexistant prop does not throw an error', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
let result = doc.value(root,"hello")
|
||||
assert.deepEqual(result,[])
|
||||
})
|
||||
|
||||
it('should be able to set and get a simple value', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
let result
|
||||
|
||||
doc.set(root, "hello", "world")
|
||||
doc.set(root, "number1", 5, "uint")
|
||||
doc.set(root, "number2", 5)
|
||||
doc.set(root, "number3", 5.5)
|
||||
doc.set(root, "number4", 5.5, "f64")
|
||||
doc.set(root, "number5", 5.5, "int")
|
||||
doc.set(root, "bool", true)
|
||||
|
||||
result = doc.value(root,"hello")
|
||||
assert.deepEqual(result,["str","world"])
|
||||
|
||||
result = doc.value(root,"number1")
|
||||
assert.deepEqual(result,["uint",5])
|
||||
|
||||
result = doc.value(root,"number2")
|
||||
assert.deepEqual(result,["int",5])
|
||||
|
||||
result = doc.value(root,"number3")
|
||||
assert.deepEqual(result,["f64",5.5])
|
||||
|
||||
result = doc.value(root,"number4")
|
||||
assert.deepEqual(result,["f64",5.5])
|
||||
|
||||
result = doc.value(root,"number5")
|
||||
assert.deepEqual(result,["int",5])
|
||||
|
||||
result = doc.value(root,"bool")
|
||||
assert.deepEqual(result,["boolean",true])
|
||||
|
||||
doc.set(root, "bool", false, "boolean")
|
||||
|
||||
result = doc.value(root,"bool")
|
||||
assert.deepEqual(result,["boolean",false])
|
||||
})
|
||||
|
||||
it('should be able to use bytes', () => {
|
||||
let doc = Automerge.init()
|
||||
doc.set("_root","data1", new Uint8Array([10,11,12]));
|
||||
doc.set("_root","data2", new Uint8Array([13,14,15]), "bytes");
|
||||
let value1 = doc.value("_root", "data1")
|
||||
assert.deepEqual(value1, ["bytes", new Uint8Array([10,11,12])]);
|
||||
let value2 = doc.value("_root", "data2")
|
||||
assert.deepEqual(value2, ["bytes", new Uint8Array([13,14,15])]);
|
||||
})
|
||||
|
||||
it('should be able to make sub objects', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
let result
|
||||
|
||||
let submap = doc.set(root, "submap", MAP)
|
||||
doc.set(submap, "number", 6, "uint")
|
||||
assert.strictEqual(doc.pending_ops(),2)
|
||||
|
||||
result = doc.value(root,"submap")
|
||||
assert.deepEqual(result,["map",submap])
|
||||
|
||||
result = doc.value(submap,"number")
|
||||
assert.deepEqual(result,["uint",6])
|
||||
})
|
||||
|
||||
it('should be able to make lists', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
|
||||
let submap = doc.set(root, "numbers", LIST)
|
||||
doc.insert(submap, 0, "a");
|
||||
doc.insert(submap, 1, "b");
|
||||
doc.insert(submap, 2, "c");
|
||||
doc.insert(submap, 0, "z");
|
||||
|
||||
assert.deepEqual(doc.value(submap, 0),["str","z"])
|
||||
assert.deepEqual(doc.value(submap, 1),["str","a"])
|
||||
assert.deepEqual(doc.value(submap, 2),["str","b"])
|
||||
assert.deepEqual(doc.value(submap, 3),["str","c"])
|
||||
assert.deepEqual(doc.length(submap),4)
|
||||
|
||||
doc.set(submap, 2, "b v2");
|
||||
|
||||
assert.deepEqual(doc.value(submap, 2),["str","b v2"])
|
||||
assert.deepEqual(doc.length(submap),4)
|
||||
})
|
||||
|
||||
it('should be able delete non-existant props', () => {
|
||||
let doc = Automerge.init()
|
||||
|
||||
doc.set("_root", "foo","bar")
|
||||
doc.set("_root", "bip","bap")
|
||||
let heads1 = doc.commit()
|
||||
|
||||
assert.deepEqual(doc.keys("_root"),["bip","foo"])
|
||||
|
||||
doc.del("_root", "foo")
|
||||
doc.del("_root", "baz")
|
||||
let heads2 = doc.commit()
|
||||
|
||||
assert.deepEqual(doc.keys("_root"),["bip"])
|
||||
assert.deepEqual(doc.keys("_root", heads1),["bip", "foo"])
|
||||
assert.deepEqual(doc.keys("_root", heads2),["bip"])
|
||||
})
|
||||
|
||||
it('should be able to del', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
|
||||
doc.set(root, "xxx", "xxx");
|
||||
assert.deepEqual(doc.value(root, "xxx"),["str","xxx"])
|
||||
doc.del(root, "xxx");
|
||||
assert.deepEqual(doc.value(root, "xxx"),[])
|
||||
})
|
||||
|
||||
it('should be able to use counters', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root"
|
||||
|
||||
doc.set(root, "counter", 10, "counter");
|
||||
assert.deepEqual(doc.value(root, "counter"),["counter",10])
|
||||
doc.inc(root, "counter", 10);
|
||||
assert.deepEqual(doc.value(root, "counter"),["counter",20])
|
||||
doc.inc(root, "counter", -5);
|
||||
assert.deepEqual(doc.value(root, "counter"),["counter",15])
|
||||
})
|
||||
|
||||
it('should be able to splice text', () => {
|
||||
let doc = Automerge.init()
|
||||
let root = "_root";
|
||||
|
||||
let text = doc.set(root, "text", Automerge.TEXT);
|
||||
doc.splice(text, 0, 0, "hello ")
|
||||
doc.splice(text, 6, 0, ["w","o","r","l","d"])
|
||||
doc.splice(text, 11, 0, [["str","!"],["str","?"]])
|
||||
assert.deepEqual(doc.value(text, 0),["str","h"])
|
||||
assert.deepEqual(doc.value(text, 1),["str","e"])
|
||||
assert.deepEqual(doc.value(text, 9),["str","l"])
|
||||
assert.deepEqual(doc.value(text, 10),["str","d"])
|
||||
assert.deepEqual(doc.value(text, 11),["str","!"])
|
||||
assert.deepEqual(doc.value(text, 12),["str","?"])
|
||||
})
|
||||
|
||||
it('should be able save all or incrementally', () => {
|
||||
let doc = Automerge.init()
|
||||
|
||||
doc.set("_root", "foo", 1)
|
||||
|
||||
let save1 = doc.save()
|
||||
|
||||
doc.set("_root", "bar", 2)
|
||||
|
||||
let saveMidway = doc.clone().save();
|
||||
|
||||
let save2 = doc.saveIncremental();
|
||||
|
||||
doc.set("_root", "baz", 3);
|
||||
|
||||
let save3 = doc.saveIncremental();
|
||||
|
||||
let saveA = doc.save();
|
||||
let saveB = new Uint8Array([... save1, ...save2, ...save3]);
|
||||
|
||||
assert.notDeepEqual(saveA, saveB);
|
||||
|
||||
let docA = Automerge.load(saveA);
|
||||
let docB = Automerge.load(saveB);
|
||||
let docC = Automerge.load(saveMidway)
|
||||
docC.loadIncremental(save3)
|
||||
|
||||
assert.deepEqual(docA.keys("_root"), docB.keys("_root"));
|
||||
assert.deepEqual(docA.save(), docB.save());
|
||||
assert.deepEqual(docA.save(), docC.save());
|
||||
})
|
||||
|
||||
it('should be able to splice text', () => {
|
||||
let doc = Automerge.init()
|
||||
let text = doc.set("_root", "text", TEXT);
|
||||
doc.splice(text, 0, 0, "hello world");
|
||||
let heads1 = doc.commit();
|
||||
doc.splice(text, 6, 0, "big bad ");
|
||||
let heads2 = doc.commit();
|
||||
assert.strictEqual(doc.text(text), "hello big bad world")
|
||||
assert.strictEqual(doc.length(text), 19)
|
||||
assert.strictEqual(doc.text(text, heads1), "hello world")
|
||||
assert.strictEqual(doc.length(text, heads1), 11)
|
||||
assert.strictEqual(doc.text(text, heads2), "hello big bad world")
|
||||
assert.strictEqual(doc.length(text, heads2), 19)
|
||||
})
|
||||
|
||||
it('local inc increments all visible counters in a map', () => {
|
||||
let doc1 = Automerge.init("aaaa")
|
||||
doc1.set("_root", "hello", "world")
|
||||
let doc2 = Automerge.load(doc1.save(), "bbbb");
|
||||
let doc3 = Automerge.load(doc1.save(), "cccc");
|
||||
doc1.set("_root", "cnt", 20)
|
||||
doc2.set("_root", "cnt", 0, "counter")
|
||||
doc3.set("_root", "cnt", 10, "counter")
|
||||
doc1.applyChanges(doc2.getChanges(doc1.getHeads()))
|
||||
doc1.applyChanges(doc3.getChanges(doc1.getHeads()))
|
||||
let result = doc1.values("_root", "cnt")
|
||||
assert.deepEqual(result,[
|
||||
['counter',10,'2@cccc'],
|
||||
['counter',0,'2@bbbb'],
|
||||
['int',20,'2@aaaa']
|
||||
])
|
||||
doc1.inc("_root", "cnt", 5)
|
||||
result = doc1.values("_root", "cnt")
|
||||
assert.deepEqual(result, [
|
||||
[ 'counter', 15, '2@cccc' ], [ 'counter', 5, '2@bbbb' ]
|
||||
])
|
||||
|
||||
let save1 = doc1.save()
|
||||
let doc4 = Automerge.load(save1)
|
||||
assert.deepEqual(doc4.save(), save1);
|
||||
})
|
||||
|
||||
it('local inc increments all visible counters in a sequence', () => {
|
||||
let doc1 = Automerge.init("aaaa")
|
||||
let seq = doc1.set("_root", "seq", LIST)
|
||||
doc1.insert(seq, 0, "hello")
|
||||
let doc2 = Automerge.load(doc1.save(), "bbbb");
|
||||
let doc3 = Automerge.load(doc1.save(), "cccc");
|
||||
doc1.set(seq, 0, 20)
|
||||
doc2.set(seq, 0, 0, "counter")
|
||||
doc3.set(seq, 0, 10, "counter")
|
||||
doc1.applyChanges(doc2.getChanges(doc1.getHeads()))
|
||||
doc1.applyChanges(doc3.getChanges(doc1.getHeads()))
|
||||
let result = doc1.values(seq, 0)
|
||||
assert.deepEqual(result,[
|
||||
['counter',10,'3@cccc'],
|
||||
['counter',0,'3@bbbb'],
|
||||
['int',20,'3@aaaa']
|
||||
])
|
||||
doc1.inc(seq, 0, 5)
|
||||
result = doc1.values(seq, 0)
|
||||
assert.deepEqual(result, [
|
||||
[ 'counter', 15, '3@cccc' ], [ 'counter', 5, '3@bbbb' ]
|
||||
])
|
||||
|
||||
let save = doc1.save()
|
||||
let doc4 = Automerge.load(save)
|
||||
assert.deepEqual(doc4.save(), save);
|
||||
})
|
||||
|
||||
})
|
||||
})
|
|
@ -1,38 +0,0 @@
|
|||
[package]
|
||||
name = "automerge"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
license = "MIT"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
optree-visualisation = ["dot"]
|
||||
|
||||
[dependencies]
|
||||
hex = "^0.4.3"
|
||||
leb128 = "^0.2.5"
|
||||
sha2 = "^0.10.0"
|
||||
rand = { version = "^0.8.4" }
|
||||
thiserror = "^1.0.16"
|
||||
itertools = "^0.10.3"
|
||||
flate2 = "^1.0.22"
|
||||
nonzero_ext = "^0.2.0"
|
||||
uuid = { version = "^0.8.2", features=["v4", "wasm-bindgen", "serde"] }
|
||||
smol_str = "^0.1.21"
|
||||
tracing = { version = "^0.1.29", features = ["log"] }
|
||||
fxhash = "^0.2.1"
|
||||
tinyvec = { version = "^1.5.1", features = ["alloc"] }
|
||||
unicode-segmentation = "1.7.1"
|
||||
serde = { version = "^1.0", features=["derive"] }
|
||||
dot = { version = "0.1.4", optional = true }
|
||||
|
||||
[dependencies.web-sys]
|
||||
version = "^0.3.55"
|
||||
features = ["console"]
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.0.0"
|
||||
proptest = { version = "^1.0.0", default-features = false, features = ["std"] }
|
||||
serde_json = { version = "^1.0.73", features=["float_roundtrip"], default-features=true }
|
||||
maplit = { version = "^1.0" }
|
|
@ -1,18 +0,0 @@
|
|||
|
||||
counters -> Visibility
|
||||
|
||||
fast load
|
||||
|
||||
values at clock
|
||||
length at clock
|
||||
keys at clock
|
||||
text at clock
|
||||
|
||||
extra tests
|
||||
counters in lists -> inserts with tombstones
|
||||
|
||||
ergronomics
|
||||
|
||||
set(obj, prop, val) vs mapset(obj, str, val) and seqset(obj, usize, val)
|
||||
value() -> (id, value)
|
||||
|
|
@ -1,916 +0,0 @@
|
|||
use crate::columnar::{
|
||||
ChangeEncoder, ChangeIterator, ColumnEncoder, DepsIterator, DocChange, DocOp, DocOpEncoder,
|
||||
DocOpIterator, OperationIterator, COLUMN_TYPE_DEFLATE,
|
||||
};
|
||||
use crate::decoding;
|
||||
use crate::decoding::{Decodable, InvalidChangeError};
|
||||
use crate::encoding::{Encodable, DEFLATE_MIN_SIZE};
|
||||
use crate::legacy as amp;
|
||||
use crate::{
|
||||
types::{ObjId, OpId},
|
||||
ActorId, AutomergeError, ElemId, IndexedCache, Key, Op, OpType, Transaction, HEAD,
|
||||
};
|
||||
use core::ops::Range;
|
||||
use flate2::{
|
||||
bufread::{DeflateDecoder, DeflateEncoder},
|
||||
Compression,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryInto;
|
||||
use std::fmt::Debug;
|
||||
use std::io::{Read, Write};
|
||||
use tracing::instrument;
|
||||
|
||||
const MAGIC_BYTES: [u8; 4] = [0x85, 0x6f, 0x4a, 0x83];
|
||||
const PREAMBLE_BYTES: usize = 8;
|
||||
const HEADER_BYTES: usize = PREAMBLE_BYTES + 1;
|
||||
|
||||
const HASH_BYTES: usize = 32;
|
||||
const BLOCK_TYPE_DOC: u8 = 0;
|
||||
const BLOCK_TYPE_CHANGE: u8 = 1;
|
||||
const BLOCK_TYPE_DEFLATE: u8 = 2;
|
||||
const CHUNK_START: usize = 8;
|
||||
const HASH_RANGE: Range<usize> = 4..8;
|
||||
|
||||
fn get_heads(changes: &[amp::Change]) -> HashSet<amp::ChangeHash> {
|
||||
changes.iter().fold(HashSet::new(), |mut acc, c| {
|
||||
if let Some(h) = c.hash {
|
||||
acc.insert(h);
|
||||
}
|
||||
for dep in &c.deps {
|
||||
acc.remove(dep);
|
||||
}
|
||||
acc
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn encode_document(
|
||||
changes: &[amp::Change],
|
||||
doc_ops: &[Op],
|
||||
actors_index: &IndexedCache<ActorId>,
|
||||
props: &[String],
|
||||
) -> Result<Vec<u8>, AutomergeError> {
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
|
||||
let heads = get_heads(changes);
|
||||
|
||||
let actors_map = actors_index.encode_index();
|
||||
let actors = actors_index.sorted();
|
||||
|
||||
/*
|
||||
// this assumes that all actor_ids referenced are seen in changes.actor_id which is true
|
||||
// so long as we have a full history
|
||||
let mut actors: Vec<_> = changes
|
||||
.iter()
|
||||
.map(|c| &c.actor)
|
||||
.unique()
|
||||
.sorted()
|
||||
.cloned()
|
||||
.collect();
|
||||
*/
|
||||
|
||||
let (change_bytes, change_info) = ChangeEncoder::encode_changes(changes, &actors);
|
||||
|
||||
//let doc_ops = group_doc_ops(changes, &actors);
|
||||
|
||||
let (ops_bytes, ops_info) = DocOpEncoder::encode_doc_ops(doc_ops, &actors_map, props);
|
||||
|
||||
bytes.extend(&MAGIC_BYTES);
|
||||
bytes.extend(vec![0, 0, 0, 0]); // we dont know the hash yet so fill in a fake
|
||||
bytes.push(BLOCK_TYPE_DOC);
|
||||
|
||||
let mut chunk = Vec::new();
|
||||
|
||||
actors.len().encode(&mut chunk)?;
|
||||
|
||||
for a in actors.into_iter() {
|
||||
a.to_bytes().encode(&mut chunk)?;
|
||||
}
|
||||
|
||||
heads.len().encode(&mut chunk)?;
|
||||
for head in heads.iter().sorted() {
|
||||
chunk.write_all(&head.0).unwrap();
|
||||
}
|
||||
|
||||
chunk.extend(change_info);
|
||||
chunk.extend(ops_info);
|
||||
|
||||
chunk.extend(change_bytes);
|
||||
chunk.extend(ops_bytes);
|
||||
|
||||
leb128::write::unsigned(&mut bytes, chunk.len() as u64).unwrap();
|
||||
|
||||
bytes.extend(&chunk);
|
||||
|
||||
let hash_result = Sha256::digest(&bytes[CHUNK_START..bytes.len()]);
|
||||
|
||||
bytes.splice(HASH_RANGE, hash_result[0..4].iter().copied());
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
impl From<amp::Change> for Change {
|
||||
fn from(value: amp::Change) -> Self {
|
||||
encode(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&::Change> for Change {
|
||||
fn from(value: &::Change) -> Self {
|
||||
encode(value)
|
||||
}
|
||||
}
|
||||
|
||||
fn encode(change: &::Change) -> Change {
|
||||
let mut deps = change.deps.clone();
|
||||
deps.sort_unstable();
|
||||
|
||||
let mut chunk = encode_chunk(change, &deps);
|
||||
|
||||
let mut bytes = Vec::with_capacity(MAGIC_BYTES.len() + 4 + chunk.bytes.len());
|
||||
|
||||
bytes.extend(&MAGIC_BYTES);
|
||||
|
||||
bytes.extend(vec![0, 0, 0, 0]); // we dont know the hash yet so fill in a fake
|
||||
|
||||
bytes.push(BLOCK_TYPE_CHANGE);
|
||||
|
||||
leb128::write::unsigned(&mut bytes, chunk.bytes.len() as u64).unwrap();
|
||||
|
||||
let body_start = bytes.len();
|
||||
|
||||
increment_range(&mut chunk.body, bytes.len());
|
||||
increment_range(&mut chunk.message, bytes.len());
|
||||
increment_range(&mut chunk.extra_bytes, bytes.len());
|
||||
increment_range_map(&mut chunk.ops, bytes.len());
|
||||
|
||||
bytes.extend(&chunk.bytes);
|
||||
|
||||
let hash_result = Sha256::digest(&bytes[CHUNK_START..bytes.len()]);
|
||||
let hash: amp::ChangeHash = hash_result[..].try_into().unwrap();
|
||||
|
||||
bytes.splice(HASH_RANGE, hash_result[0..4].iter().copied());
|
||||
|
||||
// any time I make changes to the encoder decoder its a good idea
|
||||
// to run it through a round trip to detect errors the tests might not
|
||||
// catch
|
||||
// let c0 = Change::from_bytes(bytes.clone()).unwrap();
|
||||
// std::assert_eq!(c1, c0);
|
||||
// perhaps we should add something like this to the test suite
|
||||
|
||||
let bytes = ChangeBytes::Uncompressed(bytes);
|
||||
|
||||
Change {
|
||||
bytes,
|
||||
body_start,
|
||||
hash,
|
||||
seq: change.seq,
|
||||
start_op: change.start_op,
|
||||
time: change.time,
|
||||
actors: chunk.actors,
|
||||
message: chunk.message,
|
||||
deps,
|
||||
ops: chunk.ops,
|
||||
extra_bytes: chunk.extra_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
struct ChunkIntermediate {
|
||||
bytes: Vec<u8>,
|
||||
body: Range<usize>,
|
||||
actors: Vec<ActorId>,
|
||||
message: Range<usize>,
|
||||
ops: HashMap<u32, Range<usize>>,
|
||||
extra_bytes: Range<usize>,
|
||||
}
|
||||
|
||||
fn encode_chunk(change: &::Change, deps: &[amp::ChangeHash]) -> ChunkIntermediate {
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
// All these unwraps are okay because we're writing to an in memory buffer so io erros should
|
||||
// not happen
|
||||
|
||||
// encode deps
|
||||
deps.len().encode(&mut bytes).unwrap();
|
||||
for hash in deps.iter() {
|
||||
bytes.write_all(&hash.0).unwrap();
|
||||
}
|
||||
|
||||
// encode first actor
|
||||
let mut actors = vec![change.actor_id.clone()];
|
||||
change.actor_id.to_bytes().encode(&mut bytes).unwrap();
|
||||
|
||||
// encode seq, start_op, time, message
|
||||
change.seq.encode(&mut bytes).unwrap();
|
||||
change.start_op.encode(&mut bytes).unwrap();
|
||||
change.time.encode(&mut bytes).unwrap();
|
||||
let message = bytes.len() + 1;
|
||||
change.message.encode(&mut bytes).unwrap();
|
||||
let message = message..bytes.len();
|
||||
|
||||
// encode ops into a side buffer - collect all other actors
|
||||
let (ops_buf, mut ops) = ColumnEncoder::encode_ops(&change.operations, &mut actors);
|
||||
|
||||
// encode all other actors
|
||||
actors[1..].encode(&mut bytes).unwrap();
|
||||
|
||||
// now we know how many bytes ops are offset by so we can adjust the ranges
|
||||
increment_range_map(&mut ops, bytes.len());
|
||||
|
||||
// write out the ops
|
||||
|
||||
bytes.write_all(&ops_buf).unwrap();
|
||||
|
||||
// write out the extra bytes
|
||||
let extra_bytes = bytes.len()..(bytes.len() + change.extra_bytes.len());
|
||||
bytes.write_all(&change.extra_bytes).unwrap();
|
||||
let body = 0..bytes.len();
|
||||
|
||||
ChunkIntermediate {
|
||||
bytes,
|
||||
body,
|
||||
actors,
|
||||
message,
|
||||
ops,
|
||||
extra_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
enum ChangeBytes {
|
||||
Compressed {
|
||||
compressed: Vec<u8>,
|
||||
uncompressed: Vec<u8>,
|
||||
},
|
||||
Uncompressed(Vec<u8>),
|
||||
}
|
||||
|
||||
impl ChangeBytes {
|
||||
fn uncompressed(&self) -> &[u8] {
|
||||
match self {
|
||||
ChangeBytes::Compressed { uncompressed, .. } => &uncompressed[..],
|
||||
ChangeBytes::Uncompressed(b) => &b[..],
|
||||
}
|
||||
}
|
||||
|
||||
fn compress(&mut self, body_start: usize) {
|
||||
match self {
|
||||
ChangeBytes::Compressed { .. } => {}
|
||||
ChangeBytes::Uncompressed(uncompressed) => {
|
||||
if uncompressed.len() > DEFLATE_MIN_SIZE {
|
||||
let mut result = Vec::with_capacity(uncompressed.len());
|
||||
result.extend(&uncompressed[0..8]);
|
||||
result.push(BLOCK_TYPE_DEFLATE);
|
||||
let mut deflater =
|
||||
DeflateEncoder::new(&uncompressed[body_start..], Compression::default());
|
||||
let mut deflated = Vec::new();
|
||||
let deflated_len = deflater.read_to_end(&mut deflated).unwrap();
|
||||
leb128::write::unsigned(&mut result, deflated_len as u64).unwrap();
|
||||
result.extend(&deflated[..]);
|
||||
*self = ChangeBytes::Compressed {
|
||||
compressed: result,
|
||||
uncompressed: std::mem::take(uncompressed),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn raw(&self) -> &[u8] {
|
||||
match self {
|
||||
ChangeBytes::Compressed { compressed, .. } => &compressed[..],
|
||||
ChangeBytes::Uncompressed(b) => &b[..],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub struct Change {
|
||||
bytes: ChangeBytes,
|
||||
body_start: usize,
|
||||
pub hash: amp::ChangeHash,
|
||||
pub seq: u64,
|
||||
pub start_op: u64,
|
||||
pub time: i64,
|
||||
message: Range<usize>,
|
||||
actors: Vec<ActorId>,
|
||||
pub deps: Vec<amp::ChangeHash>,
|
||||
ops: HashMap<u32, Range<usize>>,
|
||||
extra_bytes: Range<usize>,
|
||||
}
|
||||
|
||||
impl Change {
|
||||
pub fn actor_id(&self) -> &ActorId {
|
||||
&self.actors[0]
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(bytes))]
|
||||
pub fn load_document(bytes: &[u8]) -> Result<Vec<Change>, AutomergeError> {
|
||||
load_blocks(bytes)
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: Vec<u8>) -> Result<Change, decoding::Error> {
|
||||
decode_change(bytes)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
// TODO - this could be a lot more efficient
|
||||
self.iter_ops().count()
|
||||
}
|
||||
|
||||
pub fn max_op(&self) -> u64 {
|
||||
self.start_op + (self.len() as u64) - 1
|
||||
}
|
||||
|
||||
fn message(&self) -> Option<String> {
|
||||
let m = &self.bytes.uncompressed()[self.message.clone()];
|
||||
if m.is_empty() {
|
||||
None
|
||||
} else {
|
||||
std::str::from_utf8(m).map(ToString::to_string).ok()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&self) -> amp::Change {
|
||||
amp::Change {
|
||||
start_op: self.start_op,
|
||||
seq: self.seq,
|
||||
time: self.time,
|
||||
hash: Some(self.hash),
|
||||
message: self.message(),
|
||||
actor_id: self.actors[0].clone(),
|
||||
deps: self.deps.clone(),
|
||||
operations: self
|
||||
.iter_ops()
|
||||
.map(|op| amp::Op {
|
||||
action: op.action.clone(),
|
||||
obj: op.obj.clone(),
|
||||
key: op.key.clone(),
|
||||
pred: op.pred.clone(),
|
||||
insert: op.insert,
|
||||
})
|
||||
.collect(),
|
||||
extra_bytes: self.extra_bytes().into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn iter_ops(&self) -> OperationIterator {
|
||||
OperationIterator::new(self.bytes.uncompressed(), self.actors.as_slice(), &self.ops)
|
||||
}
|
||||
|
||||
pub fn extra_bytes(&self) -> &[u8] {
|
||||
&self.bytes.uncompressed()[self.extra_bytes.clone()]
|
||||
}
|
||||
|
||||
pub fn compress(&mut self) {
|
||||
self.bytes.compress(self.body_start);
|
||||
}
|
||||
|
||||
pub fn raw_bytes(&self) -> &[u8] {
|
||||
self.bytes.raw()
|
||||
}
|
||||
}
|
||||
|
||||
fn read_leb128(bytes: &mut &[u8]) -> Result<(usize, usize), decoding::Error> {
|
||||
let mut buf = &bytes[..];
|
||||
let val = leb128::read::unsigned(&mut buf)? as usize;
|
||||
let leb128_bytes = bytes.len() - buf.len();
|
||||
Ok((val, leb128_bytes))
|
||||
}
|
||||
|
||||
fn read_slice<T: Decodable + Debug>(
|
||||
bytes: &[u8],
|
||||
cursor: &mut Range<usize>,
|
||||
) -> Result<T, decoding::Error> {
|
||||
let mut view = &bytes[cursor.clone()];
|
||||
let init_len = view.len();
|
||||
let val = T::decode::<&[u8]>(&mut view).ok_or(decoding::Error::NoDecodedValue);
|
||||
let bytes_read = init_len - view.len();
|
||||
*cursor = (cursor.start + bytes_read)..cursor.end;
|
||||
val
|
||||
}
|
||||
|
||||
fn slice_bytes(bytes: &[u8], cursor: &mut Range<usize>) -> Result<Range<usize>, decoding::Error> {
|
||||
let (val, len) = read_leb128(&mut &bytes[cursor.clone()])?;
|
||||
let start = cursor.start + len;
|
||||
let end = start + val;
|
||||
*cursor = end..cursor.end;
|
||||
Ok(start..end)
|
||||
}
|
||||
|
||||
fn increment_range(range: &mut Range<usize>, len: usize) {
|
||||
range.end += len;
|
||||
range.start += len;
|
||||
}
|
||||
|
||||
fn increment_range_map(ranges: &mut HashMap<u32, Range<usize>>, len: usize) {
|
||||
for range in ranges.values_mut() {
|
||||
increment_range(range, len);
|
||||
}
|
||||
}
|
||||
|
||||
fn export_objid(id: &ObjId, actors: &IndexedCache<ActorId>) -> amp::ObjectId {
|
||||
match id {
|
||||
ObjId::Root => amp::ObjectId::Root,
|
||||
ObjId::Op(op) => export_opid(op, actors).into()
|
||||
}
|
||||
}
|
||||
|
||||
fn export_elemid(id: &ElemId, actors: &IndexedCache<ActorId>) -> amp::ElementId {
|
||||
if id == &HEAD {
|
||||
amp::ElementId::Head
|
||||
} else {
|
||||
export_opid(&id.0, actors).into()
|
||||
}
|
||||
}
|
||||
|
||||
fn export_opid(id: &OpId, actors: &IndexedCache<ActorId>) -> amp::OpId {
|
||||
amp::OpId(id.counter(), actors.get(id.actor()).clone())
|
||||
}
|
||||
|
||||
fn export_op(op: &Op, actors: &IndexedCache<ActorId>, props: &IndexedCache<String>) -> amp::Op {
|
||||
let action = op.action.clone();
|
||||
let key = match &op.key {
|
||||
Key::Map(n) => amp::Key::Map(props.get(*n).clone().into()),
|
||||
Key::Seq(id) => amp::Key::Seq(export_elemid(id, actors)),
|
||||
};
|
||||
let obj = export_objid(&op.obj, actors);
|
||||
let pred = op.pred.iter().map(|id| export_opid(id, actors)).collect();
|
||||
amp::Op {
|
||||
action,
|
||||
obj,
|
||||
insert: op.insert,
|
||||
pred,
|
||||
key,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn export_change(
|
||||
change: &Transaction,
|
||||
actors: &IndexedCache<ActorId>,
|
||||
props: &IndexedCache<String>,
|
||||
) -> Change {
|
||||
amp::Change {
|
||||
actor_id: actors.get(change.actor).clone(),
|
||||
seq: change.seq,
|
||||
start_op: change.start_op,
|
||||
time: change.time,
|
||||
deps: change.deps.clone(),
|
||||
message: change.message.clone(),
|
||||
hash: change.hash,
|
||||
operations: change
|
||||
.operations
|
||||
.iter()
|
||||
.map(|op| export_op(op, actors, props))
|
||||
.collect(),
|
||||
extra_bytes: change.extra_bytes.clone(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
pub fn decode_change(bytes: Vec<u8>) -> Result<Change, decoding::Error> {
|
||||
let (chunktype, body) = decode_header_without_hash(&bytes)?;
|
||||
let bytes = if chunktype == BLOCK_TYPE_DEFLATE {
|
||||
decompress_chunk(0..PREAMBLE_BYTES, body, bytes)?
|
||||
} else {
|
||||
ChangeBytes::Uncompressed(bytes)
|
||||
};
|
||||
|
||||
let (chunktype, hash, body) = decode_header(bytes.uncompressed())?;
|
||||
|
||||
if chunktype != BLOCK_TYPE_CHANGE {
|
||||
return Err(decoding::Error::WrongType {
|
||||
expected_one_of: vec![BLOCK_TYPE_CHANGE],
|
||||
found: chunktype,
|
||||
});
|
||||
}
|
||||
|
||||
let body_start = body.start;
|
||||
let mut cursor = body;
|
||||
|
||||
let deps = decode_hashes(bytes.uncompressed(), &mut cursor)?;
|
||||
|
||||
let actor =
|
||||
ActorId::from(&bytes.uncompressed()[slice_bytes(bytes.uncompressed(), &mut cursor)?]);
|
||||
let seq = read_slice(bytes.uncompressed(), &mut cursor)?;
|
||||
let start_op = read_slice(bytes.uncompressed(), &mut cursor)?;
|
||||
let time = read_slice(bytes.uncompressed(), &mut cursor)?;
|
||||
let message = slice_bytes(bytes.uncompressed(), &mut cursor)?;
|
||||
|
||||
let actors = decode_actors(bytes.uncompressed(), &mut cursor, Some(actor))?;
|
||||
|
||||
let ops_info = decode_column_info(bytes.uncompressed(), &mut cursor, false)?;
|
||||
let ops = decode_columns(&mut cursor, &ops_info);
|
||||
|
||||
Ok(Change {
|
||||
bytes,
|
||||
body_start,
|
||||
hash,
|
||||
seq,
|
||||
start_op,
|
||||
time,
|
||||
actors,
|
||||
message,
|
||||
deps,
|
||||
ops,
|
||||
extra_bytes: cursor,
|
||||
})
|
||||
}
|
||||
|
||||
fn decompress_chunk(
|
||||
preamble: Range<usize>,
|
||||
body: Range<usize>,
|
||||
compressed: Vec<u8>,
|
||||
) -> Result<ChangeBytes, decoding::Error> {
|
||||
let mut decoder = DeflateDecoder::new(&compressed[body]);
|
||||
let mut decompressed = Vec::new();
|
||||
decoder.read_to_end(&mut decompressed)?;
|
||||
let mut result = Vec::with_capacity(decompressed.len() + preamble.len());
|
||||
result.extend(&compressed[preamble]);
|
||||
result.push(BLOCK_TYPE_CHANGE);
|
||||
leb128::write::unsigned::<Vec<u8>>(&mut result, decompressed.len() as u64).unwrap();
|
||||
result.extend(decompressed);
|
||||
Ok(ChangeBytes::Compressed {
|
||||
uncompressed: result,
|
||||
compressed,
|
||||
})
|
||||
}
|
||||
|
||||
fn decode_hashes(
|
||||
bytes: &[u8],
|
||||
cursor: &mut Range<usize>,
|
||||
) -> Result<Vec<amp::ChangeHash>, decoding::Error> {
|
||||
let num_hashes = read_slice(bytes, cursor)?;
|
||||
let mut hashes = Vec::with_capacity(num_hashes);
|
||||
for _ in 0..num_hashes {
|
||||
let hash = cursor.start..(cursor.start + HASH_BYTES);
|
||||
*cursor = hash.end..cursor.end;
|
||||
hashes.push(
|
||||
bytes
|
||||
.get(hash)
|
||||
.ok_or(decoding::Error::NotEnoughBytes)?
|
||||
.try_into()
|
||||
.map_err(InvalidChangeError::from)?,
|
||||
);
|
||||
}
|
||||
Ok(hashes)
|
||||
}
|
||||
|
||||
fn decode_actors(
|
||||
bytes: &[u8],
|
||||
cursor: &mut Range<usize>,
|
||||
first: Option<ActorId>,
|
||||
) -> Result<Vec<ActorId>, decoding::Error> {
|
||||
let num_actors: usize = read_slice(bytes, cursor)?;
|
||||
let mut actors = Vec::with_capacity(num_actors + 1);
|
||||
if let Some(actor) = first {
|
||||
actors.push(actor);
|
||||
}
|
||||
for _ in 0..num_actors {
|
||||
actors.push(ActorId::from(
|
||||
bytes
|
||||
.get(slice_bytes(bytes, cursor)?)
|
||||
.ok_or(decoding::Error::NotEnoughBytes)?,
|
||||
));
|
||||
}
|
||||
Ok(actors)
|
||||
}
|
||||
|
||||
fn decode_column_info(
|
||||
bytes: &[u8],
|
||||
cursor: &mut Range<usize>,
|
||||
allow_compressed_column: bool,
|
||||
) -> Result<Vec<(u32, usize)>, decoding::Error> {
|
||||
let num_columns = read_slice(bytes, cursor)?;
|
||||
let mut columns = Vec::with_capacity(num_columns);
|
||||
let mut last_id = 0;
|
||||
for _ in 0..num_columns {
|
||||
let id: u32 = read_slice(bytes, cursor)?;
|
||||
if (id & !COLUMN_TYPE_DEFLATE) <= (last_id & !COLUMN_TYPE_DEFLATE) {
|
||||
return Err(decoding::Error::ColumnsNotInAscendingOrder {
|
||||
last: last_id,
|
||||
found: id,
|
||||
});
|
||||
}
|
||||
if id & COLUMN_TYPE_DEFLATE != 0 && !allow_compressed_column {
|
||||
return Err(decoding::Error::ChangeContainedCompressedColumns);
|
||||
}
|
||||
last_id = id;
|
||||
let length = read_slice(bytes, cursor)?;
|
||||
columns.push((id, length));
|
||||
}
|
||||
Ok(columns)
|
||||
}
|
||||
|
||||
fn decode_columns(
|
||||
cursor: &mut Range<usize>,
|
||||
columns: &[(u32, usize)],
|
||||
) -> HashMap<u32, Range<usize>> {
|
||||
let mut ops = HashMap::new();
|
||||
for (id, length) in columns {
|
||||
let start = cursor.start;
|
||||
let end = start + length;
|
||||
*cursor = end..cursor.end;
|
||||
ops.insert(*id, start..end);
|
||||
}
|
||||
ops
|
||||
}
|
||||
|
||||
fn decode_header(bytes: &[u8]) -> Result<(u8, amp::ChangeHash, Range<usize>), decoding::Error> {
|
||||
let (chunktype, body) = decode_header_without_hash(bytes)?;
|
||||
|
||||
let calculated_hash = Sha256::digest(&bytes[PREAMBLE_BYTES..]);
|
||||
|
||||
let checksum = &bytes[4..8];
|
||||
if checksum != &calculated_hash[0..4] {
|
||||
return Err(decoding::Error::InvalidChecksum {
|
||||
found: checksum.try_into().unwrap(),
|
||||
calculated: calculated_hash[0..4].try_into().unwrap(),
|
||||
});
|
||||
}
|
||||
|
||||
let hash = calculated_hash[..]
|
||||
.try_into()
|
||||
.map_err(InvalidChangeError::from)?;
|
||||
|
||||
Ok((chunktype, hash, body))
|
||||
}
|
||||
|
||||
fn decode_header_without_hash(bytes: &[u8]) -> Result<(u8, Range<usize>), decoding::Error> {
|
||||
if bytes.len() <= HEADER_BYTES {
|
||||
return Err(decoding::Error::NotEnoughBytes);
|
||||
}
|
||||
|
||||
if bytes[0..4] != MAGIC_BYTES {
|
||||
return Err(decoding::Error::WrongMagicBytes);
|
||||
}
|
||||
|
||||
let (val, len) = read_leb128(&mut &bytes[HEADER_BYTES..])?;
|
||||
let body = (HEADER_BYTES + len)..(HEADER_BYTES + len + val);
|
||||
if bytes.len() != body.end {
|
||||
return Err(decoding::Error::WrongByteLength {
|
||||
expected: body.end,
|
||||
found: bytes.len(),
|
||||
});
|
||||
}
|
||||
|
||||
let chunktype = bytes[PREAMBLE_BYTES];
|
||||
|
||||
Ok((chunktype, body))
|
||||
}
|
||||
|
||||
fn load_blocks(bytes: &[u8]) -> Result<Vec<Change>, AutomergeError> {
|
||||
let mut changes = Vec::new();
|
||||
for slice in split_blocks(bytes)? {
|
||||
decode_block(slice, &mut changes)?;
|
||||
}
|
||||
Ok(changes)
|
||||
}
|
||||
|
||||
fn split_blocks(bytes: &[u8]) -> Result<Vec<&[u8]>, decoding::Error> {
|
||||
// split off all valid blocks - ignore the rest if its corrupted or truncated
|
||||
let mut blocks = Vec::new();
|
||||
let mut cursor = bytes;
|
||||
while let Some(block) = pop_block(cursor)? {
|
||||
blocks.push(&cursor[block.clone()]);
|
||||
if cursor.len() <= block.end {
|
||||
break;
|
||||
}
|
||||
cursor = &cursor[block.end..];
|
||||
}
|
||||
Ok(blocks)
|
||||
}
|
||||
|
||||
fn pop_block(bytes: &[u8]) -> Result<Option<Range<usize>>, decoding::Error> {
|
||||
if bytes.len() < 4 || bytes[0..4] != MAGIC_BYTES {
|
||||
// not reporting error here - file got corrupted?
|
||||
return Ok(None);
|
||||
}
|
||||
let (val, len) = read_leb128(
|
||||
&mut bytes
|
||||
.get(HEADER_BYTES..)
|
||||
.ok_or(decoding::Error::NotEnoughBytes)?,
|
||||
)?;
|
||||
// val is arbitrary so it could overflow
|
||||
let end = (HEADER_BYTES + len)
|
||||
.checked_add(val)
|
||||
.ok_or(decoding::Error::Overflow)?;
|
||||
if end > bytes.len() {
|
||||
// not reporting error here - file got truncated?
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(0..end))
|
||||
}
|
||||
|
||||
fn decode_block(bytes: &[u8], changes: &mut Vec<Change>) -> Result<(), decoding::Error> {
|
||||
match bytes[PREAMBLE_BYTES] {
|
||||
BLOCK_TYPE_DOC => {
|
||||
changes.extend(decode_document(bytes)?);
|
||||
Ok(())
|
||||
}
|
||||
BLOCK_TYPE_CHANGE | BLOCK_TYPE_DEFLATE => {
|
||||
changes.push(decode_change(bytes.to_vec())?);
|
||||
Ok(())
|
||||
}
|
||||
found => Err(decoding::Error::WrongType {
|
||||
expected_one_of: vec![BLOCK_TYPE_DOC, BLOCK_TYPE_CHANGE, BLOCK_TYPE_DEFLATE],
|
||||
found,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_document(bytes: &[u8]) -> Result<Vec<Change>, decoding::Error> {
|
||||
let (chunktype, _hash, mut cursor) = decode_header(bytes)?;
|
||||
|
||||
// chunktype == 0 is a document, chunktype = 1 is a change
|
||||
if chunktype > 0 {
|
||||
return Err(decoding::Error::WrongType {
|
||||
expected_one_of: vec![0],
|
||||
found: chunktype,
|
||||
});
|
||||
}
|
||||
|
||||
let actors = decode_actors(bytes, &mut cursor, None)?;
|
||||
|
||||
let heads = decode_hashes(bytes, &mut cursor)?;
|
||||
|
||||
let changes_info = decode_column_info(bytes, &mut cursor, true)?;
|
||||
let ops_info = decode_column_info(bytes, &mut cursor, true)?;
|
||||
|
||||
let changes_data = decode_columns(&mut cursor, &changes_info);
|
||||
let mut doc_changes = ChangeIterator::new(bytes, &changes_data).collect::<Vec<_>>();
|
||||
let doc_changes_deps = DepsIterator::new(bytes, &changes_data);
|
||||
|
||||
let doc_changes_len = doc_changes.len();
|
||||
|
||||
let ops_data = decode_columns(&mut cursor, &ops_info);
|
||||
let doc_ops: Vec<_> = DocOpIterator::new(bytes, &actors, &ops_data).collect();
|
||||
|
||||
group_doc_change_and_doc_ops(&mut doc_changes, doc_ops, &actors)?;
|
||||
|
||||
let uncompressed_changes =
|
||||
doc_changes_to_uncompressed_changes(doc_changes.into_iter(), &actors);
|
||||
|
||||
let changes = compress_doc_changes(uncompressed_changes, doc_changes_deps, doc_changes_len)
|
||||
.ok_or(decoding::Error::NoDocChanges)?;
|
||||
|
||||
let mut calculated_heads = HashSet::new();
|
||||
for change in &changes {
|
||||
for dep in &change.deps {
|
||||
calculated_heads.remove(dep);
|
||||
}
|
||||
calculated_heads.insert(change.hash);
|
||||
}
|
||||
|
||||
if calculated_heads != heads.into_iter().collect::<HashSet<_>>() {
|
||||
return Err(decoding::Error::MismatchedHeads);
|
||||
}
|
||||
|
||||
Ok(changes)
|
||||
}
|
||||
|
||||
fn compress_doc_changes(
|
||||
uncompressed_changes: impl Iterator<Item = amp::Change>,
|
||||
doc_changes_deps: impl Iterator<Item = Vec<usize>>,
|
||||
num_changes: usize,
|
||||
) -> Option<Vec<Change>> {
|
||||
let mut changes: Vec<Change> = Vec::with_capacity(num_changes);
|
||||
|
||||
// fill out the hashes as we go
|
||||
for (deps, mut uncompressed_change) in doc_changes_deps.zip_eq(uncompressed_changes) {
|
||||
for idx in deps {
|
||||
uncompressed_change.deps.push(changes.get(idx)?.hash);
|
||||
}
|
||||
changes.push(uncompressed_change.into());
|
||||
}
|
||||
|
||||
Some(changes)
|
||||
}
|
||||
|
||||
fn group_doc_change_and_doc_ops(
|
||||
changes: &mut [DocChange],
|
||||
mut ops: Vec<DocOp>,
|
||||
actors: &[ActorId],
|
||||
) -> Result<(), decoding::Error> {
|
||||
let mut changes_by_actor: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
|
||||
for (i, change) in changes.iter().enumerate() {
|
||||
let actor_change_index = changes_by_actor.entry(change.actor).or_default();
|
||||
if change.seq != (actor_change_index.len() + 1) as u64 {
|
||||
return Err(decoding::Error::ChangeDecompressFailed(
|
||||
"Doc Seq Invalid".into(),
|
||||
));
|
||||
}
|
||||
if change.actor >= actors.len() {
|
||||
return Err(decoding::Error::ChangeDecompressFailed(
|
||||
"Doc Actor Invalid".into(),
|
||||
));
|
||||
}
|
||||
actor_change_index.push(i);
|
||||
}
|
||||
|
||||
let mut op_by_id = HashMap::new();
|
||||
ops.iter().enumerate().for_each(|(i, op)| {
|
||||
op_by_id.insert((op.ctr, op.actor), i);
|
||||
});
|
||||
|
||||
for i in 0..ops.len() {
|
||||
let op = ops[i].clone(); // this is safe - avoid borrow checker issues
|
||||
//let id = (op.ctr, op.actor);
|
||||
//op_by_id.insert(id, i);
|
||||
for succ in &op.succ {
|
||||
if let Some(index) = op_by_id.get(succ) {
|
||||
ops[*index].pred.push((op.ctr, op.actor));
|
||||
} else {
|
||||
let key = if op.insert {
|
||||
amp::OpId(op.ctr, actors[op.actor].clone()).into()
|
||||
} else {
|
||||
op.key.clone()
|
||||
};
|
||||
let del = DocOp {
|
||||
actor: succ.1,
|
||||
ctr: succ.0,
|
||||
action: OpType::Del,
|
||||
obj: op.obj.clone(),
|
||||
key,
|
||||
succ: Vec::new(),
|
||||
pred: vec![(op.ctr, op.actor)],
|
||||
insert: false,
|
||||
};
|
||||
op_by_id.insert(*succ, ops.len());
|
||||
ops.push(del);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for op in ops {
|
||||
// binary search for our change
|
||||
let actor_change_index = changes_by_actor.entry(op.actor).or_default();
|
||||
let mut left = 0;
|
||||
let mut right = actor_change_index.len();
|
||||
while left < right {
|
||||
let seq = (left + right) / 2;
|
||||
if changes[actor_change_index[seq]].max_op < op.ctr {
|
||||
left = seq + 1;
|
||||
} else {
|
||||
right = seq;
|
||||
}
|
||||
}
|
||||
if left >= actor_change_index.len() {
|
||||
return Err(decoding::Error::ChangeDecompressFailed(
|
||||
"Doc MaxOp Invalid".into(),
|
||||
));
|
||||
}
|
||||
changes[actor_change_index[left]].ops.push(op);
|
||||
}
|
||||
|
||||
changes
|
||||
.iter_mut()
|
||||
.for_each(|change| change.ops.sort_unstable());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn doc_changes_to_uncompressed_changes<'a>(
|
||||
changes: impl Iterator<Item = DocChange> + 'a,
|
||||
actors: &'a [ActorId],
|
||||
) -> impl Iterator<Item = amp::Change> + 'a {
|
||||
changes.map(move |change| amp::Change {
|
||||
// we've already confirmed that all change.actor's are valid
|
||||
actor_id: actors[change.actor].clone(),
|
||||
seq: change.seq,
|
||||
time: change.time,
|
||||
start_op: change.max_op - change.ops.len() as u64 + 1,
|
||||
hash: None,
|
||||
message: change.message,
|
||||
operations: change
|
||||
.ops
|
||||
.into_iter()
|
||||
.map(|op| amp::Op {
|
||||
action: op.action.clone(),
|
||||
insert: op.insert,
|
||||
key: op.key,
|
||||
obj: op.obj,
|
||||
// we've already confirmed that all op.actor's are valid
|
||||
pred: pred_into(op.pred.into_iter(), actors),
|
||||
})
|
||||
.collect(),
|
||||
deps: Vec::new(),
|
||||
extra_bytes: change.extra_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
fn pred_into(
|
||||
pred: impl Iterator<Item = (u64, usize)>,
|
||||
actors: &[ActorId],
|
||||
) -> amp::SortedVec<amp::OpId> {
|
||||
pred.map(|(ctr, actor)| amp::OpId(ctr, actors[actor].clone()))
|
||||
.collect()
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
use crate::types::OpId;
|
||||
use fxhash::FxBuildHasher;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Clock(HashMap<usize, u64, FxBuildHasher>);
|
||||
|
||||
impl Clock {
|
||||
pub fn new() -> Self {
|
||||
Clock(Default::default())
|
||||
}
|
||||
|
||||
pub fn include(&mut self, key: usize, n: u64) {
|
||||
self.0
|
||||
.entry(key)
|
||||
.and_modify(|m| *m = cmp::max(n, *m))
|
||||
.or_insert(n);
|
||||
}
|
||||
|
||||
pub fn covers(&self, id: &OpId) -> bool {
|
||||
if let Some(val) = self.0.get(&id.actor()) {
|
||||
val >= &id.counter()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn covers() {
|
||||
let mut clock = Clock::new();
|
||||
|
||||
clock.include(1, 20);
|
||||
clock.include(2, 10);
|
||||
|
||||
assert!(clock.covers(&OpId::new(10, 1)));
|
||||
assert!(clock.covers(&OpId::new(20, 1)));
|
||||
assert!(!clock.covers(&OpId::new(30, 1)));
|
||||
|
||||
assert!(clock.covers(&OpId::new(5, 2)));
|
||||
assert!(clock.covers(&OpId::new(10, 2)));
|
||||
assert!(!clock.covers(&OpId::new(15, 2)));
|
||||
|
||||
assert!(!clock.covers(&OpId::new(1, 3)));
|
||||
assert!(!clock.covers(&OpId::new(100, 3)));
|
||||
}
|
||||
}
|
|
@ -1,376 +0,0 @@
|
|||
use core::fmt::Debug;
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Write},
|
||||
mem,
|
||||
};
|
||||
|
||||
use flate2::{bufread::DeflateEncoder, Compression};
|
||||
use smol_str::SmolStr;
|
||||
|
||||
use crate::columnar::COLUMN_TYPE_DEFLATE;
|
||||
use crate::ActorId;
|
||||
|
||||
pub(crate) const DEFLATE_MIN_SIZE: usize = 256;
|
||||
|
||||
/// The error type for encoding operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
/// Encodes booleans by storing the count of the same value.
|
||||
///
|
||||
/// The sequence of numbers describes the count of false values on even indices (0-indexed) and the
|
||||
/// count of true values on odd indices (0-indexed).
|
||||
///
|
||||
/// Counts are encoded as usize.
|
||||
pub(crate) struct BooleanEncoder {
|
||||
buf: Vec<u8>,
|
||||
last: bool,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl BooleanEncoder {
|
||||
pub fn new() -> BooleanEncoder {
|
||||
BooleanEncoder {
|
||||
buf: Vec::new(),
|
||||
last: false,
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append(&mut self, value: bool) {
|
||||
if value == self.last {
|
||||
self.count += 1;
|
||||
} else {
|
||||
self.count.encode(&mut self.buf).ok();
|
||||
self.last = value;
|
||||
self.count = 1;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(mut self, col: u32) -> ColData {
|
||||
if self.count > 0 {
|
||||
self.count.encode(&mut self.buf).ok();
|
||||
}
|
||||
ColData::new(col, self.buf)
|
||||
}
|
||||
}
|
||||
|
||||
/// Encodes integers as the change since the previous value.
|
||||
///
|
||||
/// The initial value is 0 encoded as u64. Deltas are encoded as i64.
|
||||
///
|
||||
/// Run length encoding is then applied to the resulting sequence.
|
||||
pub(crate) struct DeltaEncoder {
|
||||
rle: RleEncoder<i64>,
|
||||
absolute_value: u64,
|
||||
}
|
||||
|
||||
impl DeltaEncoder {
|
||||
pub fn new() -> DeltaEncoder {
|
||||
DeltaEncoder {
|
||||
rle: RleEncoder::new(),
|
||||
absolute_value: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_value(&mut self, value: u64) {
|
||||
self.rle
|
||||
.append_value(value as i64 - self.absolute_value as i64);
|
||||
self.absolute_value = value;
|
||||
}
|
||||
|
||||
pub fn append_null(&mut self) {
|
||||
self.rle.append_null();
|
||||
}
|
||||
|
||||
pub fn finish(self, col: u32) -> ColData {
|
||||
self.rle.finish(col)
|
||||
}
|
||||
}
|
||||
|
||||
enum RleState<T> {
|
||||
Empty,
|
||||
NullRun(usize),
|
||||
LiteralRun(T, Vec<T>),
|
||||
LoneVal(T),
|
||||
Run(T, usize),
|
||||
}
|
||||
|
||||
/// Encodes data in run lengh encoding format. This is very efficient for long repeats of data
|
||||
///
|
||||
/// There are 3 types of 'run' in this encoder:
|
||||
/// - a normal run (compresses repeated values)
|
||||
/// - a null run (compresses repeated nulls)
|
||||
/// - a literal run (no compression)
|
||||
///
|
||||
/// A normal run consists of the length of the run (encoded as an i64) followed by the encoded value that this run contains.
|
||||
///
|
||||
/// A null run consists of a zero value (encoded as an i64) followed by the length of the null run (encoded as a usize).
|
||||
///
|
||||
/// A literal run consists of the **negative** length of the run (encoded as an i64) followed by the values in the run.
|
||||
///
|
||||
/// Therefore all the types start with an encoded i64, the value of which determines the type of the following data.
|
||||
pub(crate) struct RleEncoder<T>
|
||||
where
|
||||
T: Encodable + PartialEq + Clone,
|
||||
{
|
||||
buf: Vec<u8>,
|
||||
state: RleState<T>,
|
||||
}
|
||||
|
||||
impl<T> RleEncoder<T>
|
||||
where
|
||||
T: Encodable + PartialEq + Clone,
|
||||
{
|
||||
pub fn new() -> RleEncoder<T> {
|
||||
RleEncoder {
|
||||
buf: Vec::new(),
|
||||
state: RleState::Empty,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(mut self, col: u32) -> ColData {
|
||||
match self.take_state() {
|
||||
// this covers `only_nulls`
|
||||
RleState::NullRun(size) => {
|
||||
if !self.buf.is_empty() {
|
||||
self.flush_null_run(size);
|
||||
}
|
||||
}
|
||||
RleState::LoneVal(value) => self.flush_lit_run(vec![value]),
|
||||
RleState::Run(value, len) => self.flush_run(&value, len),
|
||||
RleState::LiteralRun(last, mut run) => {
|
||||
run.push(last);
|
||||
self.flush_lit_run(run);
|
||||
}
|
||||
RleState::Empty => {}
|
||||
}
|
||||
ColData::new(col, self.buf)
|
||||
}
|
||||
|
||||
fn flush_run(&mut self, val: &T, len: usize) {
|
||||
self.encode(&(len as i64));
|
||||
self.encode(val);
|
||||
}
|
||||
|
||||
fn flush_null_run(&mut self, len: usize) {
|
||||
self.encode::<i64>(&0);
|
||||
self.encode(&len);
|
||||
}
|
||||
|
||||
fn flush_lit_run(&mut self, run: Vec<T>) {
|
||||
self.encode(&-(run.len() as i64));
|
||||
for val in run {
|
||||
self.encode(&val);
|
||||
}
|
||||
}
|
||||
|
||||
fn take_state(&mut self) -> RleState<T> {
|
||||
let mut state = RleState::Empty;
|
||||
mem::swap(&mut self.state, &mut state);
|
||||
state
|
||||
}
|
||||
|
||||
pub fn append_null(&mut self) {
|
||||
self.state = match self.take_state() {
|
||||
RleState::Empty => RleState::NullRun(1),
|
||||
RleState::NullRun(size) => RleState::NullRun(size + 1),
|
||||
RleState::LoneVal(other) => {
|
||||
self.flush_lit_run(vec![other]);
|
||||
RleState::NullRun(1)
|
||||
}
|
||||
RleState::Run(other, len) => {
|
||||
self.flush_run(&other, len);
|
||||
RleState::NullRun(1)
|
||||
}
|
||||
RleState::LiteralRun(last, mut run) => {
|
||||
run.push(last);
|
||||
self.flush_lit_run(run);
|
||||
RleState::NullRun(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_value(&mut self, value: T) {
|
||||
self.state = match self.take_state() {
|
||||
RleState::Empty => RleState::LoneVal(value),
|
||||
RleState::LoneVal(other) => {
|
||||
if other == value {
|
||||
RleState::Run(value, 2)
|
||||
} else {
|
||||
let mut v = Vec::with_capacity(2);
|
||||
v.push(other);
|
||||
RleState::LiteralRun(value, v)
|
||||
}
|
||||
}
|
||||
RleState::Run(other, len) => {
|
||||
if other == value {
|
||||
RleState::Run(other, len + 1)
|
||||
} else {
|
||||
self.flush_run(&other, len);
|
||||
RleState::LoneVal(value)
|
||||
}
|
||||
}
|
||||
RleState::LiteralRun(last, mut run) => {
|
||||
if last == value {
|
||||
self.flush_lit_run(run);
|
||||
RleState::Run(value, 2)
|
||||
} else {
|
||||
run.push(last);
|
||||
RleState::LiteralRun(value, run)
|
||||
}
|
||||
}
|
||||
RleState::NullRun(size) => {
|
||||
self.flush_null_run(size);
|
||||
RleState::LoneVal(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn encode<V>(&mut self, val: &V)
|
||||
where
|
||||
V: Encodable,
|
||||
{
|
||||
val.encode(&mut self.buf).ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait Encodable {
|
||||
fn encode_with_actors_to_vec(&self, actors: &mut Vec<ActorId>) -> io::Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
self.encode_with_actors(&mut buf, actors)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn encode_with_actors<R: Write>(
|
||||
&self,
|
||||
buf: &mut R,
|
||||
_actors: &mut Vec<ActorId>,
|
||||
) -> io::Result<usize> {
|
||||
self.encode(buf)
|
||||
}
|
||||
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize>;
|
||||
}
|
||||
|
||||
impl Encodable for SmolStr {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
let bytes = self.as_bytes();
|
||||
let head = bytes.len().encode(buf)?;
|
||||
buf.write_all(bytes)?;
|
||||
Ok(head + bytes.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for String {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
let bytes = self.as_bytes();
|
||||
let head = bytes.len().encode(buf)?;
|
||||
buf.write_all(bytes)?;
|
||||
Ok(head + bytes.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for Option<String> {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
if let Some(s) = self {
|
||||
s.encode(buf)
|
||||
} else {
|
||||
0.encode(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for u64 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
leb128::write::unsigned(buf, *self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for f64 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
let bytes = self.to_le_bytes();
|
||||
buf.write_all(&bytes)?;
|
||||
Ok(bytes.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for f32 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
let bytes = self.to_le_bytes();
|
||||
buf.write_all(&bytes)?;
|
||||
Ok(bytes.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for i64 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
leb128::write::signed(buf, *self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for usize {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
(*self as u64).encode(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for u32 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
u64::from(*self).encode(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for i32 {
|
||||
fn encode<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
i64::from(*self).encode(buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ColData {
|
||||
pub col: u32,
|
||||
pub data: Vec<u8>,
|
||||
#[cfg(debug_assertions)]
|
||||
has_been_deflated: bool,
|
||||
}
|
||||
|
||||
impl ColData {
|
||||
pub fn new(col_id: u32, data: Vec<u8>) -> ColData {
|
||||
ColData {
|
||||
col: col_id,
|
||||
data,
|
||||
#[cfg(debug_assertions)]
|
||||
has_been_deflated: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_col_len<R: Write>(&self, buf: &mut R) -> io::Result<usize> {
|
||||
let mut len = 0;
|
||||
if !self.data.is_empty() {
|
||||
len += self.col.encode(buf)?;
|
||||
len += self.data.len().encode(buf)?;
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
pub fn deflate(&mut self) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
debug_assert!(!self.has_been_deflated);
|
||||
self.has_been_deflated = true;
|
||||
}
|
||||
if self.data.len() > DEFLATE_MIN_SIZE {
|
||||
let mut deflated = Vec::new();
|
||||
let mut deflater = DeflateEncoder::new(&self.data[..], Compression::default());
|
||||
//This unwrap should be okay as we're reading and writing to in memory buffers
|
||||
deflater.read_to_end(&mut deflated).unwrap();
|
||||
self.col |= COLUMN_TYPE_DEFLATE;
|
||||
self.data = deflated;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
use crate::decoding;
|
||||
use crate::value::DataType;
|
||||
use crate::ScalarValue;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AutomergeError {
|
||||
#[error("invalid opid format `{0}`")]
|
||||
InvalidOpId(String),
|
||||
#[error("there was an ecoding problem")]
|
||||
Encoding,
|
||||
#[error("there was a decoding problem")]
|
||||
Decoding,
|
||||
#[error("key must not be an empty string")]
|
||||
EmptyStringKey,
|
||||
#[error("invalid seq {0}")]
|
||||
InvalidSeq(u64),
|
||||
#[error("index {0} is out of bounds")]
|
||||
InvalidIndex(usize),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for AutomergeError {
|
||||
fn from(_: std::io::Error) -> Self {
|
||||
AutomergeError::Encoding
|
||||
}
|
||||
}
|
||||
|
||||
impl From<decoding::Error> for AutomergeError {
|
||||
fn from(_: decoding::Error) -> Self {
|
||||
AutomergeError::Decoding
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Invalid actor ID: {0}")]
|
||||
pub struct InvalidActorId(pub String);
|
||||
|
||||
#[derive(Error, Debug, PartialEq)]
|
||||
#[error("Invalid scalar value, expected {expected} but received {unexpected}")]
|
||||
pub(crate) struct InvalidScalarValue {
|
||||
pub raw_value: ScalarValue,
|
||||
pub datatype: DataType,
|
||||
pub unexpected: String,
|
||||
pub expected: String,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, PartialEq)]
|
||||
#[error("Invalid change hash slice: {0:?}")]
|
||||
pub struct InvalidChangeHashSlice(pub Vec<u8>);
|
||||
|
||||
#[derive(Error, Debug, PartialEq)]
|
||||
#[error("Invalid object ID: {0}")]
|
||||
pub struct InvalidObjectId(pub String);
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Invalid element ID: {0}")]
|
||||
pub struct InvalidElementId(pub String);
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Invalid OpID: {0}")]
|
||||
pub struct InvalidOpId(pub String);
|
|
@ -1,109 +0,0 @@
|
|||
use std::{borrow::Cow, fmt::Display, str::FromStr};
|
||||
|
||||
use crate::{op_tree::OpSetMetadata, types::OpId, ActorId};
|
||||
|
||||
const ROOT_STR: &str = "_root";
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Hash, Eq)]
|
||||
pub struct ExternalOpId {
|
||||
counter: u64,
|
||||
actor: ActorId,
|
||||
}
|
||||
|
||||
impl ExternalOpId {
|
||||
pub(crate) fn from_internal(opid: &OpId, metadata: &OpSetMetadata) -> Option<ExternalOpId> {
|
||||
metadata
|
||||
.actors
|
||||
.get_safe(opid.actor())
|
||||
.map(|actor| ExternalOpId {
|
||||
counter: opid.counter(),
|
||||
actor: actor.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn counter(&self) -> u64 {
|
||||
self.counter
|
||||
}
|
||||
|
||||
pub(crate) fn actor(&self) -> &ActorId {
|
||||
&self.actor
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Hash, Eq)]
|
||||
pub enum ExternalObjId<'a> {
|
||||
Root,
|
||||
Op(Cow<'a, ExternalOpId>),
|
||||
}
|
||||
|
||||
impl<'a> ExternalObjId<'a> {
|
||||
pub fn into_owned(self) -> ExternalObjId<'static> {
|
||||
match self {
|
||||
Self::Root => ExternalObjId::Root,
|
||||
Self::Op(cow) => ExternalObjId::Op(Cow::<'static, _>::Owned(cow.into_owned().into())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a ExternalOpId> for ExternalObjId<'a> {
|
||||
fn from(op: &'a ExternalOpId) -> Self {
|
||||
ExternalObjId::Op(Cow::Borrowed(op))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ExternalOpId> for ExternalObjId<'static> {
|
||||
fn from(op: ExternalOpId) -> Self {
|
||||
ExternalObjId::Op(Cow::Owned(op))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ParseError {
|
||||
#[error("op IDs should have the format <counter>@<hex encoded actor>")]
|
||||
BadFormat,
|
||||
#[error("the counter of an opid should be a positive integer")]
|
||||
InvalidCounter,
|
||||
#[error("the actor of an opid should be valid hex encoded bytes")]
|
||||
InvalidActor,
|
||||
}
|
||||
|
||||
impl FromStr for ExternalOpId {
|
||||
type Err = ParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut parts = s.split("@");
|
||||
let first_part = parts.next().ok_or(ParseError::BadFormat)?;
|
||||
let second_part = parts.next().ok_or(ParseError::BadFormat)?;
|
||||
let counter: u64 = first_part.parse().map_err(|_| ParseError::InvalidCounter)?;
|
||||
let actor: ActorId = second_part.parse().map_err(|_| ParseError::InvalidActor)?;
|
||||
Ok(ExternalOpId { counter, actor })
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ExternalObjId<'static> {
|
||||
type Err = ParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == ROOT_STR {
|
||||
Ok(ExternalObjId::Root)
|
||||
} else {
|
||||
let op = s.parse::<ExternalOpId>()?.into();
|
||||
Ok(ExternalObjId::Op(Cow::Owned(op)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ExternalOpId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}@{}", self.counter, self.actor)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for ExternalObjId<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Root => write!(f, "{}", ROOT_STR),
|
||||
Self::Op(op) => write!(f, "{}", op),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
use itertools::Itertools;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
use std::ops::Index;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct IndexedCache<T> {
|
||||
pub cache: Vec<T>,
|
||||
lookup: HashMap<T, usize>,
|
||||
}
|
||||
|
||||
impl<T> IndexedCache<T>
|
||||
where
|
||||
T: Clone + Eq + Hash + Ord,
|
||||
{
|
||||
pub fn new() -> Self {
|
||||
IndexedCache {
|
||||
cache: Default::default(),
|
||||
lookup: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cache(&mut self, item: T) -> usize {
|
||||
if let Some(n) = self.lookup.get(&item) {
|
||||
*n
|
||||
} else {
|
||||
let n = self.cache.len();
|
||||
self.cache.push(item.clone());
|
||||
self.lookup.insert(item, n);
|
||||
n
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lookup(&self, item: T) -> Option<usize> {
|
||||
self.lookup.get(&item).cloned()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.cache.len()
|
||||
}
|
||||
|
||||
pub fn get(&self, index: usize) -> &T {
|
||||
&self.cache[index]
|
||||
}
|
||||
|
||||
// Todo replace all uses of `get` with this
|
||||
pub fn get_safe(&self, index: usize) -> Option<&T> {
|
||||
self.cache.get(index)
|
||||
}
|
||||
|
||||
pub fn sorted(&self) -> IndexedCache<T> {
|
||||
let mut sorted = Self::new();
|
||||
self.cache.iter().sorted().cloned().for_each(|item| {
|
||||
let n = sorted.cache.len();
|
||||
sorted.cache.push(item.clone());
|
||||
sorted.lookup.insert(item, n);
|
||||
});
|
||||
sorted
|
||||
}
|
||||
|
||||
pub fn encode_index(&self) -> Vec<usize> {
|
||||
let sorted: Vec<_> = self.cache.iter().sorted().cloned().collect();
|
||||
self.cache
|
||||
.iter()
|
||||
.map(|a| sorted.iter().position(|r| r == a).unwrap())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> IntoIterator for IndexedCache<T> {
|
||||
type Item = T;
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.cache.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Index<usize> for IndexedCache<T> {
|
||||
type Output = T;
|
||||
fn index(&self, i: usize) -> &T {
|
||||
&self.cache[i]
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
use std::fmt;
|
||||
|
||||
use smol_str::SmolStr;
|
||||
|
||||
use crate::legacy::ScalarValue;
|
||||
|
||||
impl From<&str> for ScalarValue {
|
||||
fn from(s: &str) -> Self {
|
||||
ScalarValue::Str(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for ScalarValue {
|
||||
fn from(n: i64) -> Self {
|
||||
ScalarValue::Int(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for ScalarValue {
|
||||
fn from(n: u64) -> Self {
|
||||
ScalarValue::Uint(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i32> for ScalarValue {
|
||||
fn from(n: i32) -> Self {
|
||||
ScalarValue::Int(n as i64)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bool> for ScalarValue {
|
||||
fn from(b: bool) -> Self {
|
||||
ScalarValue::Boolean(b)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<char> for ScalarValue {
|
||||
fn from(c: char) -> Self {
|
||||
ScalarValue::Str(SmolStr::new(c.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ScalarValue {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ScalarValue::Bytes(b) => write!(f, "\"{:?}\"", b),
|
||||
ScalarValue::Str(s) => write!(f, "\"{}\"", s),
|
||||
ScalarValue::Int(i) => write!(f, "{}", i),
|
||||
ScalarValue::Uint(i) => write!(f, "{}", i),
|
||||
ScalarValue::F64(n) => write!(f, "{:.324}", n),
|
||||
ScalarValue::Counter(c) => write!(f, "Counter: {}", c),
|
||||
ScalarValue::Timestamp(i) => write!(f, "Timestamp: {}", i),
|
||||
ScalarValue::Boolean(b) => write!(f, "{}", b),
|
||||
ScalarValue::Null => write!(f, "null"),
|
||||
}
|
||||
}
|
||||
}
|
1432
automerge/src/lib.rs
|
@ -1,224 +0,0 @@
|
|||
use crate::op_tree::OpTreeInternal;
|
||||
use crate::query::TreeQuery;
|
||||
use crate::{ActorId, IndexedCache, Key, types::{ObjId, OpId}, Op};
|
||||
use crate::external_types::ExternalOpId;
|
||||
use fxhash::FxBuildHasher;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::rc::Rc;
|
||||
use std::cell::RefCell;
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub(crate) type OpSet = OpSetInternal<16>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct OpSetInternal<const B: usize> {
|
||||
trees: HashMap<ObjId, OpTreeInternal<B>, FxBuildHasher>,
|
||||
objs: Vec<ObjId>,
|
||||
length: usize,
|
||||
pub m: Rc<RefCell<OpSetMetadata>>,
|
||||
}
|
||||
|
||||
impl<const B: usize> OpSetInternal<B> {
|
||||
pub fn new() -> Self {
|
||||
OpSetInternal {
|
||||
trees: Default::default(),
|
||||
objs: Default::default(),
|
||||
length: 0,
|
||||
m: Rc::new(RefCell::new(OpSetMetadata {
|
||||
actors: IndexedCache::new(),
|
||||
props: IndexedCache::new(),
|
||||
last_opid: None,
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> Iter<'_, B> {
|
||||
Iter {
|
||||
inner: self,
|
||||
index: 0,
|
||||
sub_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn search<Q>(&self, obj: ObjId, query: Q) -> Q
|
||||
where
|
||||
Q: TreeQuery<B>,
|
||||
{
|
||||
if let Some(tree) = self.trees.get(&obj) {
|
||||
tree.search(query, &*self.m.borrow())
|
||||
} else {
|
||||
query
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replace<F>(&mut self, obj: ObjId, index: usize, f: F) -> Option<Op>
|
||||
where
|
||||
F: FnMut(&mut Op),
|
||||
{
|
||||
if let Some(tree) = self.trees.get_mut(&obj) {
|
||||
tree.replace(index, f)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, obj: ObjId, index: usize) -> Op {
|
||||
let tree = self.trees.get_mut(&obj).unwrap();
|
||||
self.length -= 1;
|
||||
let op = tree.remove(index);
|
||||
if tree.is_empty() {
|
||||
self.trees.remove(&obj);
|
||||
}
|
||||
op
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.length
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, index: usize, element: Op) {
|
||||
let Self {
|
||||
ref mut trees,
|
||||
ref mut objs,
|
||||
ref mut m,
|
||||
..
|
||||
} = self;
|
||||
trees
|
||||
.entry(element.obj)
|
||||
.or_insert_with(|| {
|
||||
let pos = objs
|
||||
.binary_search_by(|probe| m.borrow().lamport_cmp(probe, &element.obj))
|
||||
.unwrap_err();
|
||||
objs.insert(pos, element.obj);
|
||||
Default::default()
|
||||
})
|
||||
.insert(index, element);
|
||||
self.length += 1;
|
||||
}
|
||||
|
||||
#[cfg(feature = "optree-visualisation")]
|
||||
pub fn visualise(&self) -> String {
|
||||
let mut out = Vec::new();
|
||||
let graph = super::visualisation::GraphVisualisation::construct(&self.trees, &self.m);
|
||||
dot::render(&graph, &mut out).unwrap();
|
||||
String::from_utf8_lossy(&out[..]).to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> Default for OpSetInternal<B> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, const B: usize> IntoIterator for &'a OpSetInternal<B> {
|
||||
type Item = &'a Op;
|
||||
|
||||
type IntoIter = Iter<'a, B>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
Iter {
|
||||
inner: self,
|
||||
index: 0,
|
||||
sub_index: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Iter<'a, const B: usize> {
|
||||
inner: &'a OpSetInternal<B>,
|
||||
index: usize,
|
||||
sub_index: usize,
|
||||
}
|
||||
|
||||
impl<'a, const B: usize> Iterator for Iter<'a, B> {
|
||||
type Item = &'a Op;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let obj = self.inner.objs.get(self.index)?;
|
||||
let tree = self.inner.trees.get(obj)?;
|
||||
self.sub_index += 1;
|
||||
if let Some(op) = tree.get(self.sub_index - 1) {
|
||||
Some(op)
|
||||
} else {
|
||||
self.index += 1;
|
||||
self.sub_index = 1;
|
||||
// FIXME is it possible that a rolled back transaction could break the iterator by
|
||||
// having an empty tree?
|
||||
let obj = self.inner.objs.get(self.index)?;
|
||||
let tree = self.inner.trees.get(obj)?;
|
||||
tree.get(self.sub_index - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct OpSetMetadata {
|
||||
pub actors: IndexedCache<ActorId>,
|
||||
pub props: IndexedCache<String>,
|
||||
// For the common case of many subsequent operations on the same object we cache the last
|
||||
// object we looked up
|
||||
last_opid: Option<(ExternalOpId, OpId)>,
|
||||
}
|
||||
|
||||
|
||||
impl OpSetMetadata {
|
||||
pub fn key_cmp(&self, left: &Key, right: &Key) -> Ordering {
|
||||
match (left, right) {
|
||||
(Key::Map(a), Key::Map(b)) => self.props[*a].cmp(&self.props[*b]),
|
||||
_ => panic!("can only compare map keys"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lamport_cmp<S: SuccinctLamport>(&self, left: S, right: S) -> Ordering {
|
||||
S::cmp(self, left, right)
|
||||
}
|
||||
|
||||
pub fn import_opid(&mut self, ext_opid: &ExternalOpId) -> OpId {
|
||||
if let Some((last_ext, last_int)) = &self.last_opid {
|
||||
if last_ext == ext_opid {
|
||||
return *last_int;
|
||||
}
|
||||
}
|
||||
let actor = self.actors.cache(ext_opid.actor().clone());
|
||||
let opid = OpId::new(ext_opid.counter(), actor);
|
||||
self.last_opid = Some((ext_opid.clone(), opid));
|
||||
opid
|
||||
}
|
||||
}
|
||||
|
||||
/// Lamport timestamps which don't contain their actor ID directly and therefore need access to
|
||||
/// some metadata to compare their actor ID parts
|
||||
pub(crate) trait SuccinctLamport {
|
||||
fn cmp(m: &OpSetMetadata, left: Self, right: Self) -> Ordering;
|
||||
}
|
||||
|
||||
impl SuccinctLamport for OpId {
|
||||
fn cmp(m: &OpSetMetadata, left: Self, right: Self) -> Ordering {
|
||||
match (left.counter(), right.counter()) {
|
||||
(0, 0) => Ordering::Equal,
|
||||
(0, _) => Ordering::Less,
|
||||
(_, 0) => Ordering::Greater,
|
||||
(a, b) if a == b => m.actors[right.actor()].cmp(&m.actors[left.actor()]),
|
||||
(a, b) => a.cmp(&b),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SuccinctLamport for ObjId {
|
||||
fn cmp(m: &OpSetMetadata, left: Self, right: Self) -> Ordering {
|
||||
match (left, right) {
|
||||
(ObjId::Root, ObjId::Root) => Ordering::Equal,
|
||||
(ObjId::Root, ObjId::Op(_)) => Ordering::Less,
|
||||
(ObjId::Op(_), ObjId::Root) => Ordering::Greater,
|
||||
(ObjId::Op(left_op), ObjId::Op(right_op)) => <OpId as SuccinctLamport>::cmp(m, left_op, right_op),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SuccinctLamport for &ObjId {
|
||||
fn cmp(m: &OpSetMetadata, left: Self, right: Self) -> Ordering {
|
||||
<ObjId as SuccinctLamport>::cmp(m, *left, *right)
|
||||
}
|
||||
}
|
|
@ -1,361 +0,0 @@
|
|||
use crate::op_tree::{OpSetMetadata, OpTreeNode};
|
||||
use crate::{Clock, ElemId, Op, ScalarValue, types::{OpId, OpType}};
|
||||
use fxhash::FxBuildHasher;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
|
||||
mod insert;
|
||||
mod keys;
|
||||
mod keys_at;
|
||||
mod len;
|
||||
mod len_at;
|
||||
mod list_vals;
|
||||
mod list_vals_at;
|
||||
mod nth;
|
||||
mod nth_at;
|
||||
mod prop;
|
||||
mod prop_at;
|
||||
mod seek_op;
|
||||
|
||||
pub(crate) use insert::InsertNth;
|
||||
pub(crate) use keys::Keys;
|
||||
pub(crate) use keys_at::KeysAt;
|
||||
pub(crate) use len::Len;
|
||||
pub(crate) use len_at::LenAt;
|
||||
pub(crate) use list_vals::ListVals;
|
||||
pub(crate) use list_vals_at::ListValsAt;
|
||||
pub(crate) use nth::Nth;
|
||||
pub(crate) use nth_at::NthAt;
|
||||
pub(crate) use prop::Prop;
|
||||
pub(crate) use prop_at::PropAt;
|
||||
pub(crate) use seek_op::SeekOp;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct CounterData {
|
||||
pos: usize,
|
||||
val: i64,
|
||||
succ: HashSet<OpId>,
|
||||
op: Op,
|
||||
}
|
||||
|
||||
pub(crate) trait TreeQuery<const B: usize> {
|
||||
#[inline(always)]
|
||||
fn query_node_with_metadata(
|
||||
&mut self,
|
||||
child: &OpTreeNode<B>,
|
||||
_m: &OpSetMetadata,
|
||||
) -> QueryResult {
|
||||
self.query_node(child)
|
||||
}
|
||||
|
||||
fn query_node(&mut self, _child: &OpTreeNode<B>) -> QueryResult {
|
||||
QueryResult::Decend
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn query_element_with_metadata(&mut self, element: &Op, _m: &OpSetMetadata) -> QueryResult {
|
||||
self.query_element(element)
|
||||
}
|
||||
|
||||
fn query_element(&mut self, _element: &Op) -> QueryResult {
|
||||
panic!("invalid element query")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) enum QueryResult {
|
||||
Next,
|
||||
Decend,
|
||||
Finish,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) struct Index {
|
||||
pub len: usize,
|
||||
pub visible: HashMap<ElemId, usize, FxBuildHasher>,
|
||||
pub ops: HashSet<OpId, FxBuildHasher>,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub fn new() -> Self {
|
||||
Index {
|
||||
len: 0,
|
||||
visible: Default::default(),
|
||||
ops: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has(&self, e: &Option<ElemId>) -> bool {
|
||||
if let Some(seen) = e {
|
||||
self.visible.contains_key(seen)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replace(&mut self, old: &Op, new: &Op) {
|
||||
if old.id != new.id {
|
||||
self.ops.remove(&old.id);
|
||||
self.ops.insert(new.id);
|
||||
}
|
||||
|
||||
assert!(new.key == old.key);
|
||||
|
||||
match (new.succ.is_empty(), old.succ.is_empty(), new.elemid()) {
|
||||
(false, true, Some(elem)) => match self.visible.get(&elem).copied() {
|
||||
Some(n) if n == 1 => {
|
||||
self.len -= 1;
|
||||
self.visible.remove(&elem);
|
||||
}
|
||||
Some(n) => {
|
||||
self.visible.insert(elem, n - 1);
|
||||
}
|
||||
None => panic!("remove overun in index"),
|
||||
},
|
||||
(true, false, Some(elem)) => match self.visible.get(&elem).copied() {
|
||||
Some(n) => {
|
||||
self.visible.insert(elem, n + 1);
|
||||
}
|
||||
None => {
|
||||
self.len += 1;
|
||||
self.visible.insert(elem, 1);
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, op: &Op) {
|
||||
self.ops.insert(op.id);
|
||||
if op.succ.is_empty() {
|
||||
if let Some(elem) = op.elemid() {
|
||||
match self.visible.get(&elem).copied() {
|
||||
Some(n) => {
|
||||
self.visible.insert(elem, n + 1);
|
||||
}
|
||||
None => {
|
||||
self.len += 1;
|
||||
self.visible.insert(elem, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, op: &Op) {
|
||||
self.ops.remove(&op.id);
|
||||
if op.succ.is_empty() {
|
||||
if let Some(elem) = op.elemid() {
|
||||
match self.visible.get(&elem).copied() {
|
||||
Some(n) if n == 1 => {
|
||||
self.len -= 1;
|
||||
self.visible.remove(&elem);
|
||||
}
|
||||
Some(n) => {
|
||||
self.visible.insert(elem, n - 1);
|
||||
}
|
||||
None => panic!("remove overun in index"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge(&mut self, other: &Index) {
|
||||
for id in &other.ops {
|
||||
self.ops.insert(*id);
|
||||
}
|
||||
for (elem, n) in other.visible.iter() {
|
||||
match self.visible.get(elem).cloned() {
|
||||
None => {
|
||||
self.visible.insert(*elem, 1);
|
||||
self.len += 1;
|
||||
}
|
||||
Some(m) => {
|
||||
self.visible.insert(*elem, m + n);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Index {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
pub(crate) struct VisWindow {
|
||||
counters: HashMap<OpId, CounterData>,
|
||||
}
|
||||
|
||||
impl VisWindow {
|
||||
fn visible(&mut self, op: &Op, pos: usize) -> bool {
|
||||
let mut visible = false;
|
||||
match op.action {
|
||||
OpType::Set(ScalarValue::Counter(val)) => {
|
||||
self.counters.insert(
|
||||
op.id,
|
||||
CounterData {
|
||||
pos,
|
||||
val,
|
||||
succ: op.succ.iter().cloned().collect(),
|
||||
op: op.clone(),
|
||||
},
|
||||
);
|
||||
if op.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
OpType::Inc(inc_val) => {
|
||||
for id in &op.pred {
|
||||
if let Some(mut entry) = self.counters.get_mut(id) {
|
||||
entry.succ.remove(&op.id);
|
||||
entry.val += inc_val;
|
||||
entry.op.action = OpType::Set(ScalarValue::Counter(entry.val));
|
||||
if entry.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if op.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
visible
|
||||
}
|
||||
|
||||
fn visible_at(&mut self, op: &Op, pos: usize, clock: &Clock) -> bool {
|
||||
if !clock.covers(&op.id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut visible = false;
|
||||
match op.action {
|
||||
OpType::Set(ScalarValue::Counter(val)) => {
|
||||
self.counters.insert(
|
||||
op.id,
|
||||
CounterData {
|
||||
pos,
|
||||
val,
|
||||
succ: op.succ.iter().cloned().collect(),
|
||||
op: op.clone(),
|
||||
},
|
||||
);
|
||||
if !op.succ.iter().any(|i| clock.covers(i)) {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
OpType::Inc(inc_val) => {
|
||||
for id in &op.pred {
|
||||
// pred is always before op.id so we can see them
|
||||
if let Some(mut entry) = self.counters.get_mut(id) {
|
||||
entry.succ.remove(&op.id);
|
||||
entry.val += inc_val;
|
||||
entry.op.action = OpType::Set(ScalarValue::Counter(entry.val));
|
||||
if !entry.succ.iter().any(|i| clock.covers(i)) {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if !op.succ.iter().any(|i| clock.covers(i)) {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
visible
|
||||
}
|
||||
|
||||
pub fn seen_op(&self, op: &Op, pos: usize) -> Vec<(usize, Op)> {
|
||||
let mut result = vec![];
|
||||
for pred in &op.pred {
|
||||
if let Some(entry) = self.counters.get(pred) {
|
||||
result.push((entry.pos, entry.op.clone()));
|
||||
}
|
||||
}
|
||||
if result.is_empty() {
|
||||
vec![(pos, op.clone())]
|
||||
} else {
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_visible(op: &Op, pos: usize, counters: &mut HashMap<OpId, CounterData>) -> bool {
|
||||
let mut visible = false;
|
||||
match op.action {
|
||||
OpType::Set(ScalarValue::Counter(val)) => {
|
||||
counters.insert(
|
||||
op.id,
|
||||
CounterData {
|
||||
pos,
|
||||
val,
|
||||
succ: op.succ.iter().cloned().collect(),
|
||||
op: op.clone(),
|
||||
},
|
||||
);
|
||||
if op.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
OpType::Inc(inc_val) => {
|
||||
for id in &op.pred {
|
||||
if let Some(mut entry) = counters.get_mut(id) {
|
||||
entry.succ.remove(&op.id);
|
||||
entry.val += inc_val;
|
||||
entry.op.action = OpType::Set(ScalarValue::Counter(entry.val));
|
||||
if entry.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if op.succ.is_empty() {
|
||||
visible = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
visible
|
||||
}
|
||||
|
||||
pub(crate) fn visible_op(
|
||||
op: &Op,
|
||||
pos: usize,
|
||||
counters: &HashMap<OpId, CounterData>,
|
||||
) -> Vec<(usize, Op)> {
|
||||
let mut result = vec![];
|
||||
for pred in &op.pred {
|
||||
if let Some(entry) = counters.get(pred) {
|
||||
result.push((entry.pos, entry.op.clone()));
|
||||
}
|
||||
}
|
||||
if result.is_empty() {
|
||||
vec![(pos, op.clone())]
|
||||
} else {
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn binary_search_by<F, const B: usize>(node: &OpTreeNode<B>, f: F) -> usize
|
||||
where
|
||||
F: Fn(&Op) -> Ordering,
|
||||
{
|
||||
let mut right = node.len();
|
||||
let mut left = 0;
|
||||
while left < right {
|
||||
let seq = (left + right) / 2;
|
||||
if f(node.get(seq).unwrap()) == Ordering::Less {
|
||||
left = seq + 1;
|
||||
} else {
|
||||
right = seq;
|
||||
}
|
||||
}
|
||||
left
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
use crate::op_tree::OpTreeNode;
|
||||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{AutomergeError, ElemId, Key, Op, HEAD};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct InsertNth<const B: usize> {
|
||||
target: usize,
|
||||
seen: usize,
|
||||
pub pos: usize,
|
||||
last_seen: Option<ElemId>,
|
||||
last_insert: Option<ElemId>,
|
||||
window: VisWindow,
|
||||
}
|
||||
|
||||
impl<const B: usize> InsertNth<B> {
|
||||
pub fn new(target: usize) -> Self {
|
||||
InsertNth {
|
||||
target,
|
||||
seen: 0,
|
||||
pos: 0,
|
||||
last_seen: None,
|
||||
last_insert: None,
|
||||
window: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key(&self) -> Result<Key, AutomergeError> {
|
||||
if self.target == 0 {
|
||||
Ok(HEAD.into())
|
||||
} else if self.seen == self.target && self.last_insert.is_some() {
|
||||
Ok(Key::Seq(self.last_insert.unwrap()))
|
||||
} else {
|
||||
Err(AutomergeError::InvalidIndex(self.target))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for InsertNth<B> {
|
||||
fn query_node(&mut self, child: &OpTreeNode<B>) -> QueryResult {
|
||||
if self.target == 0 {
|
||||
// insert at the start of the obj all inserts are lesser b/c this is local
|
||||
self.pos = 0;
|
||||
return QueryResult::Finish;
|
||||
}
|
||||
let mut num_vis = child.index.len;
|
||||
if num_vis > 0 {
|
||||
if child.index.has(&self.last_seen) {
|
||||
num_vis -= 1;
|
||||
}
|
||||
if self.seen + num_vis >= self.target {
|
||||
QueryResult::Decend
|
||||
} else {
|
||||
self.pos += child.len();
|
||||
self.seen += num_vis;
|
||||
self.last_seen = child.last().elemid();
|
||||
QueryResult::Next
|
||||
}
|
||||
} else {
|
||||
self.pos += child.len();
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
||||
|
||||
fn query_element(&mut self, element: &Op) -> QueryResult {
|
||||
if element.insert {
|
||||
if self.seen >= self.target {
|
||||
return QueryResult::Finish;
|
||||
};
|
||||
self.last_seen = None;
|
||||
self.last_insert = element.elemid();
|
||||
}
|
||||
if self.last_seen.is_none() && self.window.visible(element, self.pos) {
|
||||
self.seen += 1;
|
||||
self.last_seen = element.elemid()
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
use crate::op_tree::OpTreeNode;
|
||||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::Key;
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Keys<const B: usize> {
|
||||
pub keys: Vec<Key>,
|
||||
window: VisWindow,
|
||||
}
|
||||
|
||||
impl<const B: usize> Keys<B> {
|
||||
pub fn new() -> Self {
|
||||
Keys {
|
||||
keys: vec![],
|
||||
window: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for Keys<B> {
|
||||
fn query_node(&mut self, child: &OpTreeNode<B>) -> QueryResult {
|
||||
let mut last = None;
|
||||
for i in 0..child.len() {
|
||||
let op = child.get(i).unwrap();
|
||||
let visible = self.window.visible(op, i);
|
||||
if Some(op.key) != last && visible {
|
||||
self.keys.push(op.key);
|
||||
last = Some(op.key);
|
||||
}
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{Clock, Key, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct KeysAt<const B: usize> {
|
||||
clock: Clock,
|
||||
pub keys: Vec<Key>,
|
||||
last: Option<Key>,
|
||||
window: VisWindow,
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl<const B: usize> KeysAt<B> {
|
||||
pub fn new(clock: Clock) -> Self {
|
||||
KeysAt {
|
||||
clock,
|
||||
pos: 0,
|
||||
last: None,
|
||||
keys: vec![],
|
||||
window: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for KeysAt<B> {
|
||||
fn query_element(&mut self, op: &Op) -> QueryResult {
|
||||
let visible = self.window.visible_at(op, self.pos, &self.clock);
|
||||
if Some(op.key) != self.last && visible {
|
||||
self.keys.push(op.key);
|
||||
self.last = Some(op.key);
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
use crate::op_tree::OpTreeNode;
|
||||
use crate::query::{QueryResult, TreeQuery};
|
||||
use crate::types::ObjId;
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Len<const B: usize> {
|
||||
obj: ObjId,
|
||||
pub len: usize,
|
||||
}
|
||||
|
||||
impl<const B: usize> Len<B> {
|
||||
pub fn new(obj: ObjId) -> Self {
|
||||
Len { obj, len: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for Len<B> {
|
||||
fn query_node(&mut self, child: &OpTreeNode<B>) -> QueryResult {
|
||||
self.len = child.index.len;
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
use crate::op_tree::{OpSetMetadata, OpTreeNode};
|
||||
use crate::query::{binary_search_by, is_visible, visible_op, QueryResult, TreeQuery};
|
||||
use crate::{ElemId, types::ObjId, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct ListVals {
|
||||
obj: ObjId,
|
||||
last_elem: Option<ElemId>,
|
||||
pub ops: Vec<Op>,
|
||||
}
|
||||
|
||||
impl ListVals {
|
||||
pub fn new(obj: ObjId) -> Self {
|
||||
ListVals {
|
||||
obj,
|
||||
last_elem: None,
|
||||
ops: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for ListVals {
|
||||
fn query_node_with_metadata(
|
||||
&mut self,
|
||||
child: &OpTreeNode<B>,
|
||||
m: &OpSetMetadata,
|
||||
) -> QueryResult {
|
||||
let start = binary_search_by(child, |op| m.lamport_cmp(op.obj, self.obj));
|
||||
let mut counters = Default::default();
|
||||
for pos in start..child.len() {
|
||||
let op = child.get(pos).unwrap();
|
||||
if op.obj != self.obj {
|
||||
break;
|
||||
}
|
||||
if op.insert {
|
||||
self.last_elem = None;
|
||||
}
|
||||
if self.last_elem.is_none() && is_visible(op, pos, &mut counters) {
|
||||
for (_, vop) in visible_op(op, pos, &counters) {
|
||||
self.last_elem = vop.elemid();
|
||||
self.ops.push(vop);
|
||||
}
|
||||
}
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{Clock, ElemId, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct ListValsAt {
|
||||
clock: Clock,
|
||||
last_elem: Option<ElemId>,
|
||||
pub ops: Vec<Op>,
|
||||
window: VisWindow,
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl ListValsAt {
|
||||
pub fn new(clock: Clock) -> Self {
|
||||
ListValsAt {
|
||||
clock,
|
||||
last_elem: None,
|
||||
ops: vec![],
|
||||
window: Default::default(),
|
||||
pos: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for ListValsAt {
|
||||
fn query_element(&mut self, op: &Op) -> QueryResult {
|
||||
if op.insert {
|
||||
self.last_elem = None;
|
||||
}
|
||||
if self.last_elem.is_none() && self.window.visible_at(op, self.pos, &self.clock) {
|
||||
for (_, vop) in self.window.seen_op(op, self.pos) {
|
||||
self.last_elem = vop.elemid();
|
||||
self.ops.push(vop);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
use crate::op_tree::OpTreeNode;
|
||||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{AutomergeError, ElemId, Key, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Nth<const B: usize> {
|
||||
target: usize,
|
||||
seen: usize,
|
||||
last_seen: Option<ElemId>,
|
||||
last_elem: Option<ElemId>,
|
||||
window: VisWindow,
|
||||
pub ops: Vec<Op>,
|
||||
pub ops_pos: Vec<usize>,
|
||||
pub pos: usize,
|
||||
}
|
||||
|
||||
impl<const B: usize> Nth<B> {
|
||||
pub fn new(target: usize) -> Self {
|
||||
Nth {
|
||||
target,
|
||||
seen: 0,
|
||||
last_seen: None,
|
||||
ops: vec![],
|
||||
ops_pos: vec![],
|
||||
pos: 0,
|
||||
last_elem: None,
|
||||
window: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key(&self) -> Result<Key, AutomergeError> {
|
||||
if let Some(e) = self.last_elem {
|
||||
Ok(Key::Seq(e))
|
||||
} else {
|
||||
Err(AutomergeError::InvalidIndex(self.target))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for Nth<B> {
|
||||
fn query_node(&mut self, child: &OpTreeNode<B>) -> QueryResult {
|
||||
let mut num_vis = child.index.len;
|
||||
if num_vis > 0 {
|
||||
// num vis is the number of keys in the index
|
||||
// minus one if we're counting last_seen
|
||||
// let mut num_vis = s.keys().count();
|
||||
if child.index.has(&self.last_seen) {
|
||||
num_vis -= 1;
|
||||
}
|
||||
if self.seen + num_vis > self.target {
|
||||
QueryResult::Decend
|
||||
} else {
|
||||
self.pos += child.len();
|
||||
self.seen += num_vis;
|
||||
self.last_seen = child.last().elemid();
|
||||
QueryResult::Next
|
||||
}
|
||||
} else {
|
||||
self.pos += child.len();
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
||||
|
||||
fn query_element(&mut self, element: &Op) -> QueryResult {
|
||||
if element.insert {
|
||||
if self.seen > self.target {
|
||||
return QueryResult::Finish;
|
||||
};
|
||||
self.last_elem = element.elemid();
|
||||
self.last_seen = None
|
||||
}
|
||||
let visible = self.window.visible(element, self.pos);
|
||||
if visible && self.last_seen.is_none() {
|
||||
self.seen += 1;
|
||||
self.last_seen = element.elemid()
|
||||
}
|
||||
if self.seen == self.target + 1 && visible {
|
||||
for (vpos, vop) in self.window.seen_op(element, self.pos) {
|
||||
self.ops.push(vop);
|
||||
self.ops_pos.push(vpos);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
use crate::query::{QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{Clock, ElemId, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct NthAt<const B: usize> {
|
||||
clock: Clock,
|
||||
target: usize,
|
||||
seen: usize,
|
||||
last_seen: Option<ElemId>,
|
||||
last_elem: Option<ElemId>,
|
||||
window: VisWindow,
|
||||
pub ops: Vec<Op>,
|
||||
pub ops_pos: Vec<usize>,
|
||||
pub pos: usize,
|
||||
}
|
||||
|
||||
impl<const B: usize> NthAt<B> {
|
||||
pub fn new(target: usize, clock: Clock) -> Self {
|
||||
NthAt {
|
||||
clock,
|
||||
target,
|
||||
seen: 0,
|
||||
last_seen: None,
|
||||
ops: vec![],
|
||||
ops_pos: vec![],
|
||||
pos: 0,
|
||||
last_elem: None,
|
||||
window: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for NthAt<B> {
|
||||
fn query_element(&mut self, element: &Op) -> QueryResult {
|
||||
if element.insert {
|
||||
if self.seen > self.target {
|
||||
return QueryResult::Finish;
|
||||
};
|
||||
self.last_elem = element.elemid();
|
||||
self.last_seen = None
|
||||
}
|
||||
let visible = self.window.visible_at(element, self.pos, &self.clock);
|
||||
if visible && self.last_seen.is_none() {
|
||||
self.seen += 1;
|
||||
self.last_seen = element.elemid()
|
||||
}
|
||||
if self.seen == self.target + 1 && visible {
|
||||
for (vpos, vop) in self.window.seen_op(element, self.pos) {
|
||||
self.ops.push(vop);
|
||||
self.ops_pos.push(vpos);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
use crate::op_tree::{OpSetMetadata, OpTreeNode};
|
||||
use crate::query::{binary_search_by, is_visible, visible_op, QueryResult, TreeQuery};
|
||||
use crate::{Key, types::ObjId, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Prop {
|
||||
obj: ObjId,
|
||||
key: Key,
|
||||
pub ops: Vec<Op>,
|
||||
pub ops_pos: Vec<usize>,
|
||||
pub pos: usize,
|
||||
}
|
||||
|
||||
impl Prop {
|
||||
pub fn new(obj: ObjId, prop: usize) -> Self {
|
||||
Prop {
|
||||
obj,
|
||||
key: Key::Map(prop),
|
||||
ops: vec![],
|
||||
ops_pos: vec![],
|
||||
pos: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for Prop {
|
||||
fn query_node_with_metadata(
|
||||
&mut self,
|
||||
child: &OpTreeNode<B>,
|
||||
m: &OpSetMetadata,
|
||||
) -> QueryResult {
|
||||
let start = binary_search_by(child, |op| {
|
||||
m.lamport_cmp(op.obj, self.obj)
|
||||
.then_with(|| m.key_cmp(&op.key, &self.key))
|
||||
});
|
||||
let mut counters = Default::default();
|
||||
self.pos = start;
|
||||
for pos in start..child.len() {
|
||||
let op = child.get(pos).unwrap();
|
||||
if !(op.obj == self.obj && op.key == self.key) {
|
||||
break;
|
||||
}
|
||||
if is_visible(op, pos, &mut counters) {
|
||||
for (vpos, vop) in visible_op(op, pos, &counters) {
|
||||
self.ops.push(vop);
|
||||
self.ops_pos.push(vpos);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
use crate::op_tree::{OpSetMetadata, OpTreeNode};
|
||||
use crate::query::{binary_search_by, QueryResult, TreeQuery, VisWindow};
|
||||
use crate::{Clock, Key, Op};
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct PropAt {
|
||||
clock: Clock,
|
||||
key: Key,
|
||||
pub ops: Vec<Op>,
|
||||
pub ops_pos: Vec<usize>,
|
||||
pub pos: usize,
|
||||
}
|
||||
|
||||
impl PropAt {
|
||||
pub fn new(prop: usize, clock: Clock) -> Self {
|
||||
PropAt {
|
||||
clock,
|
||||
key: Key::Map(prop),
|
||||
ops: vec![],
|
||||
ops_pos: vec![],
|
||||
pos: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for PropAt {
|
||||
fn query_node_with_metadata(
|
||||
&mut self,
|
||||
child: &OpTreeNode<B>,
|
||||
m: &OpSetMetadata,
|
||||
) -> QueryResult {
|
||||
let start = binary_search_by(child, |op| m.key_cmp(&op.key, &self.key));
|
||||
let mut window: VisWindow = Default::default();
|
||||
self.pos = start;
|
||||
for pos in start..child.len() {
|
||||
let op = child.get(pos).unwrap();
|
||||
if op.key != self.key {
|
||||
break;
|
||||
}
|
||||
if window.visible_at(op, pos, &self.clock) {
|
||||
for (vpos, vop) in window.seen_op(op, pos) {
|
||||
self.ops.push(vop);
|
||||
self.ops_pos.push(vpos);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
use crate::op_tree::{OpSetMetadata, OpTreeNode};
|
||||
use crate::query::{binary_search_by, QueryResult, TreeQuery};
|
||||
use crate::{Key, Op, HEAD};
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Debug;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct SeekOp<const B: usize> {
|
||||
op: Op,
|
||||
pub pos: usize,
|
||||
pub succ: Vec<usize>,
|
||||
found: bool,
|
||||
}
|
||||
|
||||
impl<const B: usize> SeekOp<B> {
|
||||
pub fn new(op: &Op) -> Self {
|
||||
SeekOp {
|
||||
op: op.clone(),
|
||||
succ: vec![],
|
||||
pos: 0,
|
||||
found: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn different_obj(&self, op: &Op) -> bool {
|
||||
op.obj != self.op.obj
|
||||
}
|
||||
|
||||
fn lesser_insert(&self, op: &Op, m: &OpSetMetadata) -> bool {
|
||||
op.insert && m.lamport_cmp(op.id, self.op.id) == Ordering::Less
|
||||
}
|
||||
|
||||
fn greater_opid(&self, op: &Op, m: &OpSetMetadata) -> bool {
|
||||
m.lamport_cmp(op.id, self.op.id) == Ordering::Greater
|
||||
}
|
||||
|
||||
fn is_target_insert(&self, op: &Op) -> bool {
|
||||
if !op.insert {
|
||||
return false;
|
||||
}
|
||||
if self.op.insert {
|
||||
op.elemid() == self.op.key.elemid()
|
||||
} else {
|
||||
op.elemid() == self.op.elemid()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const B: usize> TreeQuery<B> for SeekOp<B> {
|
||||
fn query_node_with_metadata(
|
||||
&mut self,
|
||||
child: &OpTreeNode<B>,
|
||||
m: &OpSetMetadata,
|
||||
) -> QueryResult {
|
||||
if self.found {
|
||||
return QueryResult::Decend;
|
||||
}
|
||||
match self.op.key {
|
||||
Key::Seq(e) if e == HEAD => {
|
||||
while self.pos < child.len() {
|
||||
let op = child.get(self.pos).unwrap();
|
||||
if self.op.overwrites(op) {
|
||||
self.succ.push(self.pos);
|
||||
}
|
||||
if op.insert && m.lamport_cmp(op.id, self.op.id) == Ordering::Less {
|
||||
break;
|
||||
}
|
||||
self.pos += 1;
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
Key::Seq(e) => {
|
||||
if self.found || child.index.ops.contains(&e.0) {
|
||||
QueryResult::Decend
|
||||
} else {
|
||||
self.pos += child.len();
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
||||
Key::Map(_) => {
|
||||
self.pos = binary_search_by(child, |op| m.key_cmp(&op.key, &self.op.key));
|
||||
while self.pos < child.len() {
|
||||
let op = child.get(self.pos).unwrap();
|
||||
if op.key != self.op.key {
|
||||
break;
|
||||
}
|
||||
if self.op.overwrites(op) {
|
||||
self.succ.push(self.pos);
|
||||
}
|
||||
if m.lamport_cmp(op.id, self.op.id) == Ordering::Greater {
|
||||
break;
|
||||
}
|
||||
self.pos += 1;
|
||||
}
|
||||
QueryResult::Finish
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn query_element_with_metadata(&mut self, e: &Op, m: &OpSetMetadata) -> QueryResult {
|
||||
if !self.found {
|
||||
if self.is_target_insert(e) {
|
||||
self.found = true;
|
||||
if self.op.overwrites(e) {
|
||||
self.succ.push(self.pos);
|
||||
}
|
||||
}
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
} else {
|
||||
if self.op.overwrites(e) {
|
||||
self.succ.push(self.pos);
|
||||
}
|
||||
if self.op.insert {
|
||||
if self.different_obj(e) || self.lesser_insert(e, m) {
|
||||
QueryResult::Finish
|
||||
} else {
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
} else if e.insert || self.different_obj(e) || self.greater_opid(e, m) {
|
||||
QueryResult::Finish
|
||||
} else {
|
||||
self.pos += 1;
|
||||
QueryResult::Next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,381 +0,0 @@
|
|||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{HashMap, HashSet},
|
||||
convert::TryFrom,
|
||||
io,
|
||||
io::Write,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
decoding, decoding::Decoder, encoding, encoding::Encodable, Automerge, AutomergeError, Change,
|
||||
ChangeHash, Patch,
|
||||
};
|
||||
|
||||
mod bloom;
|
||||
mod state;
|
||||
|
||||
pub use bloom::BloomFilter;
|
||||
pub use state::{SyncHave, SyncState};
|
||||
|
||||
const HASH_SIZE: usize = 32; // 256 bits = 32 bytes
|
||||
const MESSAGE_TYPE_SYNC: u8 = 0x42; // first byte of a sync message, for identification
|
||||
|
||||
impl Automerge {
|
||||
pub fn generate_sync_message(&mut self, sync_state: &mut SyncState) -> Option<SyncMessage> {
|
||||
self.ensure_transaction_closed();
|
||||
self._generate_sync_message(sync_state)
|
||||
}
|
||||
|
||||
fn _generate_sync_message(&self, sync_state: &mut SyncState) -> Option<SyncMessage> {
|
||||
let our_heads = self._get_heads();
|
||||
|
||||
let our_need = self._get_missing_deps(sync_state.their_heads.as_ref().unwrap_or(&vec![]));
|
||||
|
||||
let their_heads_set = if let Some(ref heads) = sync_state.their_heads {
|
||||
heads.iter().collect::<HashSet<_>>()
|
||||
} else {
|
||||
HashSet::new()
|
||||
};
|
||||
let our_have = if our_need.iter().all(|hash| their_heads_set.contains(hash)) {
|
||||
vec![self.make_bloom_filter(sync_state.shared_heads.clone())]
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
if let Some(ref their_have) = sync_state.their_have {
|
||||
if let Some(first_have) = their_have.first().as_ref() {
|
||||
if !first_have
|
||||
.last_sync
|
||||
.iter()
|
||||
.all(|hash| self._get_change_by_hash(hash).is_some())
|
||||
{
|
||||
let reset_msg = SyncMessage {
|
||||
heads: our_heads,
|
||||
need: Vec::new(),
|
||||
have: vec![SyncHave::default()],
|
||||
changes: Vec::new(),
|
||||
};
|
||||
return Some(reset_msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut changes_to_send = if let (Some(their_have), Some(their_need)) = (
|
||||
sync_state.their_have.as_ref(),
|
||||
sync_state.their_need.as_ref(),
|
||||
) {
|
||||
self.get_changes_to_send(their_have.clone(), their_need)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let heads_unchanged = if let Some(last_sent_heads) = sync_state.last_sent_heads.as_ref() {
|
||||
last_sent_heads == &our_heads
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let heads_equal = if let Some(their_heads) = sync_state.their_heads.as_ref() {
|
||||
their_heads == &our_heads
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if heads_unchanged && heads_equal && changes_to_send.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// deduplicate the changes to send with those we have already sent
|
||||
changes_to_send.retain(|change| !sync_state.sent_hashes.contains(&change.hash));
|
||||
|
||||
sync_state.last_sent_heads = Some(our_heads.clone());
|
||||
sync_state
|
||||
.sent_hashes
|
||||
.extend(changes_to_send.iter().map(|c| c.hash));
|
||||
|
||||
let sync_message = SyncMessage {
|
||||
heads: our_heads,
|
||||
have: our_have,
|
||||
need: our_need,
|
||||
changes: changes_to_send.into_iter().cloned().collect(),
|
||||
};
|
||||
|
||||
Some(sync_message)
|
||||
}
|
||||
|
||||
pub fn receive_sync_message(
|
||||
&mut self,
|
||||
sync_state: &mut SyncState,
|
||||
message: SyncMessage,
|
||||
) -> Result<Option<Patch>, AutomergeError> {
|
||||
self.ensure_transaction_closed();
|
||||
self._receive_sync_message(sync_state, message)
|
||||
}
|
||||
|
||||
fn _receive_sync_message(
|
||||
&mut self,
|
||||
sync_state: &mut SyncState,
|
||||
message: SyncMessage,
|
||||
) -> Result<Option<Patch>, AutomergeError> {
|
||||
let mut patch = None;
|
||||
|
||||
let before_heads = self.get_heads();
|
||||
|
||||
let SyncMessage {
|
||||
heads: message_heads,
|
||||
changes: message_changes,
|
||||
need: message_need,
|
||||
have: message_have,
|
||||
} = message;
|
||||
|
||||
let changes_is_empty = message_changes.is_empty();
|
||||
if !changes_is_empty {
|
||||
patch = Some(self.apply_changes(&message_changes)?);
|
||||
sync_state.shared_heads = advance_heads(
|
||||
&before_heads.iter().collect(),
|
||||
&self.get_heads().into_iter().collect(),
|
||||
&sync_state.shared_heads,
|
||||
);
|
||||
}
|
||||
|
||||
// trim down the sent hashes to those that we know they haven't seen
|
||||
self.filter_changes(&message_heads, &mut sync_state.sent_hashes);
|
||||
|
||||
if changes_is_empty && message_heads == before_heads {
|
||||
sync_state.last_sent_heads = Some(message_heads.clone());
|
||||
}
|
||||
|
||||
let known_heads = message_heads
|
||||
.iter()
|
||||
.filter(|head| self.get_change_by_hash(head).is_some())
|
||||
.collect::<Vec<_>>();
|
||||
if known_heads.len() == message_heads.len() {
|
||||
sync_state.shared_heads = message_heads.clone();
|
||||
} else {
|
||||
sync_state.shared_heads = sync_state
|
||||
.shared_heads
|
||||
.iter()
|
||||
.chain(known_heads)
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
sync_state.shared_heads.sort();
|
||||
}
|
||||
|
||||
sync_state.their_have = Some(message_have);
|
||||
sync_state.their_heads = Some(message_heads);
|
||||
sync_state.their_need = Some(message_need);
|
||||
|
||||
Ok(patch)
|
||||
}
|
||||
|
||||
fn make_bloom_filter(&self, last_sync: Vec<ChangeHash>) -> SyncHave {
|
||||
let new_changes = self._get_changes(&last_sync);
|
||||
let hashes = new_changes
|
||||
.into_iter()
|
||||
.map(|change| change.hash)
|
||||
.collect::<Vec<_>>();
|
||||
SyncHave {
|
||||
last_sync,
|
||||
bloom: BloomFilter::from(&hashes[..]),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_changes_to_send(&self, have: Vec<SyncHave>, need: &[ChangeHash]) -> Vec<&Change> {
|
||||
if have.is_empty() {
|
||||
need.iter()
|
||||
.filter_map(|hash| self._get_change_by_hash(hash))
|
||||
.collect()
|
||||
} else {
|
||||
let mut last_sync_hashes = HashSet::new();
|
||||
let mut bloom_filters = Vec::with_capacity(have.len());
|
||||
|
||||
for h in have {
|
||||
let SyncHave { last_sync, bloom } = h;
|
||||
for hash in last_sync {
|
||||
last_sync_hashes.insert(hash);
|
||||
}
|
||||
bloom_filters.push(bloom);
|
||||
}
|
||||
let last_sync_hashes = last_sync_hashes.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let changes = self._get_changes(&last_sync_hashes);
|
||||
|
||||
let mut change_hashes = HashSet::with_capacity(changes.len());
|
||||
let mut dependents: HashMap<ChangeHash, Vec<ChangeHash>> = HashMap::new();
|
||||
let mut hashes_to_send = HashSet::new();
|
||||
|
||||
for change in &changes {
|
||||
change_hashes.insert(change.hash);
|
||||
|
||||
for dep in &change.deps {
|
||||
dependents.entry(*dep).or_default().push(change.hash);
|
||||
}
|
||||
|
||||
if bloom_filters
|
||||
.iter()
|
||||
.all(|bloom| !bloom.contains_hash(&change.hash))
|
||||
{
|
||||
hashes_to_send.insert(change.hash);
|
||||
}
|
||||
}
|
||||
|
||||
let mut stack = hashes_to_send.iter().copied().collect::<Vec<_>>();
|
||||
while let Some(hash) = stack.pop() {
|
||||
if let Some(deps) = dependents.get(&hash) {
|
||||
for dep in deps {
|
||||
if hashes_to_send.insert(*dep) {
|
||||
stack.push(*dep);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut changes_to_send = Vec::new();
|
||||
for hash in need {
|
||||
hashes_to_send.insert(*hash);
|
||||
if !change_hashes.contains(hash) {
|
||||
let change = self._get_change_by_hash(hash);
|
||||
if let Some(change) = change {
|
||||
changes_to_send.push(change);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for change in changes {
|
||||
if hashes_to_send.contains(&change.hash) {
|
||||
changes_to_send.push(change);
|
||||
}
|
||||
}
|
||||
changes_to_send
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SyncMessage {
|
||||
pub heads: Vec<ChangeHash>,
|
||||
pub need: Vec<ChangeHash>,
|
||||
pub have: Vec<SyncHave>,
|
||||
pub changes: Vec<Change>,
|
||||
}
|
||||
|
||||
impl SyncMessage {
|
||||
pub fn encode(self) -> Result<Vec<u8>, encoding::Error> {
|
||||
let mut buf = vec![MESSAGE_TYPE_SYNC];
|
||||
|
||||
encode_hashes(&mut buf, &self.heads)?;
|
||||
encode_hashes(&mut buf, &self.need)?;
|
||||
(self.have.len() as u32).encode(&mut buf)?;
|
||||
for have in self.have {
|
||||
encode_hashes(&mut buf, &have.last_sync)?;
|
||||
have.bloom.into_bytes()?.encode(&mut buf)?;
|
||||
}
|
||||
|
||||
(self.changes.len() as u32).encode(&mut buf)?;
|
||||
for mut change in self.changes {
|
||||
change.compress();
|
||||
change.raw_bytes().encode(&mut buf)?;
|
||||
}
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn decode(bytes: &[u8]) -> Result<SyncMessage, decoding::Error> {
|
||||
let mut decoder = Decoder::new(Cow::Borrowed(bytes));
|
||||
|
||||
let message_type = decoder.read::<u8>()?;
|
||||
if message_type != MESSAGE_TYPE_SYNC {
|
||||
return Err(decoding::Error::WrongType {
|
||||
expected_one_of: vec![MESSAGE_TYPE_SYNC],
|
||||
found: message_type,
|
||||
});
|
||||
}
|
||||
|
||||
let heads = decode_hashes(&mut decoder)?;
|
||||
let need = decode_hashes(&mut decoder)?;
|
||||
let have_count = decoder.read::<u32>()?;
|
||||
let mut have = Vec::with_capacity(have_count as usize);
|
||||
for _ in 0..have_count {
|
||||
let last_sync = decode_hashes(&mut decoder)?;
|
||||
let bloom_bytes: Vec<u8> = decoder.read()?;
|
||||
let bloom = BloomFilter::try_from(bloom_bytes.as_slice())?;
|
||||
have.push(SyncHave { last_sync, bloom });
|
||||
}
|
||||
|
||||
let change_count = decoder.read::<u32>()?;
|
||||
let mut changes = Vec::with_capacity(change_count as usize);
|
||||
for _ in 0..change_count {
|
||||
let change = decoder.read()?;
|
||||
changes.push(Change::from_bytes(change)?);
|
||||
}
|
||||
|
||||
Ok(SyncMessage {
|
||||
heads,
|
||||
need,
|
||||
have,
|
||||
changes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_hashes(buf: &mut Vec<u8>, hashes: &[ChangeHash]) -> Result<(), encoding::Error> {
|
||||
debug_assert!(
|
||||
hashes.windows(2).all(|h| h[0] <= h[1]),
|
||||
"hashes were not sorted"
|
||||
);
|
||||
hashes.encode(buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Encodable for &[ChangeHash] {
|
||||
fn encode<W: Write>(&self, buf: &mut W) -> io::Result<usize> {
|
||||
let head = self.len().encode(buf)?;
|
||||
let mut body = 0;
|
||||
for hash in self.iter() {
|
||||
buf.write_all(&hash.0)?;
|
||||
body += hash.0.len();
|
||||
}
|
||||
Ok(head + body)
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_hashes(decoder: &mut Decoder) -> Result<Vec<ChangeHash>, decoding::Error> {
|
||||
let length = decoder.read::<u32>()?;
|
||||
let mut hashes = Vec::with_capacity(length as usize);
|
||||
|
||||
for _ in 0..length {
|
||||
let hash_bytes = decoder.read_bytes(HASH_SIZE)?;
|
||||
let hash = ChangeHash::try_from(hash_bytes).map_err(decoding::Error::BadChangeFormat)?;
|
||||
hashes.push(hash);
|
||||
}
|
||||
|
||||
Ok(hashes)
|
||||
}
|
||||
|
||||
fn advance_heads(
|
||||
my_old_heads: &HashSet<&ChangeHash>,
|
||||
my_new_heads: &HashSet<ChangeHash>,
|
||||
our_old_shared_heads: &[ChangeHash],
|
||||
) -> Vec<ChangeHash> {
|
||||
let new_heads = my_new_heads
|
||||
.iter()
|
||||
.filter(|head| !my_old_heads.contains(head))
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let common_heads = our_old_shared_heads
|
||||
.iter()
|
||||
.filter(|head| my_new_heads.contains(head))
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut advanced_heads = HashSet::with_capacity(new_heads.len() + common_heads.len());
|
||||
for head in new_heads.into_iter().chain(common_heads) {
|
||||
advanced_heads.insert(head);
|
||||
}
|
||||
let mut advanced_heads = advanced_heads.into_iter().collect::<Vec<_>>();
|
||||
advanced_heads.sort();
|
||||
advanced_heads
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
use std::{borrow::Cow, collections::HashSet};
|
||||
|
||||
use super::{decode_hashes, encode_hashes};
|
||||
use crate::{decoding, decoding::Decoder, encoding, BloomFilter, ChangeHash};
|
||||
|
||||
const SYNC_STATE_TYPE: u8 = 0x43; // first byte of an encoded sync state, for identification
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SyncState {
|
||||
pub shared_heads: Vec<ChangeHash>,
|
||||
pub last_sent_heads: Option<Vec<ChangeHash>>,
|
||||
pub their_heads: Option<Vec<ChangeHash>>,
|
||||
pub their_need: Option<Vec<ChangeHash>>,
|
||||
pub their_have: Option<Vec<SyncHave>>,
|
||||
pub sent_hashes: HashSet<ChangeHash>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SyncHave {
|
||||
pub last_sync: Vec<ChangeHash>,
|
||||
pub bloom: BloomFilter,
|
||||
}
|
||||
|
||||
impl SyncState {
|
||||
pub fn encode(&self) -> Result<Vec<u8>, encoding::Error> {
|
||||
let mut buf = vec![SYNC_STATE_TYPE];
|
||||
encode_hashes(&mut buf, &self.shared_heads)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn decode(bytes: &[u8]) -> Result<Self, decoding::Error> {
|
||||
let mut decoder = Decoder::new(Cow::Borrowed(bytes));
|
||||
|
||||
let record_type = decoder.read::<u8>()?;
|
||||
if record_type != SYNC_STATE_TYPE {
|
||||
return Err(decoding::Error::WrongType {
|
||||
expected_one_of: vec![SYNC_STATE_TYPE],
|
||||
found: record_type,
|
||||
});
|
||||
}
|
||||
|
||||
let shared_heads = decode_hashes(&mut decoder)?;
|
||||
Ok(Self {
|
||||
shared_heads,
|
||||
last_sent_heads: Some(Vec::new()),
|
||||
their_heads: None,
|
||||
their_need: None,
|
||||
their_have: Some(Vec::new()),
|
||||
sent_hashes: HashSet::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SyncState {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
shared_heads: Vec::new(),
|
||||
last_sent_heads: Some(Vec::new()),
|
||||
their_heads: None,
|
||||
their_need: None,
|
||||
their_have: None,
|
||||
sent_hashes: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,459 +0,0 @@
|
|||
use crate::error;
|
||||
use crate::legacy as amp;
|
||||
use crate::ScalarValue;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Eq;
|
||||
use std::convert::TryFrom;
|
||||
use std::convert::TryInto;
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use tinyvec::{ArrayVec, TinyVec};
|
||||
|
||||
pub(crate) const HEAD: ElemId = ElemId(OpId(0, 0));
|
||||
|
||||
const ROOT_STR: &str = "_root";
|
||||
const HEAD_STR: &str = "_head";
|
||||
|
||||
/// An actor id is a sequence of bytes. By default we use a uuid which can be nicely stack
|
||||
/// allocated.
|
||||
///
|
||||
/// In the event that users want to use their own type of identifier that is longer than a uuid
|
||||
/// then they will likely end up pushing it onto the heap which is still fine.
|
||||
///
|
||||
// Note that change encoding relies on the Ord implementation for the ActorId being implemented in
|
||||
// terms of the lexicographic ordering of the underlying bytes. Be aware of this if you are
|
||||
// changing the ActorId implementation in ways which might affect the Ord implementation
|
||||
#[derive(Eq, PartialEq, Hash, Clone, PartialOrd, Ord)]
|
||||
#[cfg_attr(feature = "derive-arbitrary", derive(arbitrary::Arbitrary))]
|
||||
pub struct ActorId(TinyVec<[u8; 16]>);
|
||||
|
||||
impl fmt::Debug for ActorId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_tuple("ActorID")
|
||||
.field(&hex::encode(&self.0))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActorId {
|
||||
pub fn random() -> ActorId {
|
||||
ActorId(TinyVec::from(*uuid::Uuid::new_v4().as_bytes()))
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
pub fn to_hex_string(&self) -> String {
|
||||
hex::encode(&self.0)
|
||||
}
|
||||
|
||||
pub fn op_id_at(&self, seq: u64) -> amp::OpId {
|
||||
amp::OpId(seq, self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for ActorId {
|
||||
type Error = error::InvalidActorId;
|
||||
|
||||
fn try_from(s: &str) -> Result<Self, Self::Error> {
|
||||
hex::decode(s)
|
||||
.map(ActorId::from)
|
||||
.map_err(|_| error::InvalidActorId(s.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<uuid::Uuid> for ActorId {
|
||||
fn from(u: uuid::Uuid) -> Self {
|
||||
ActorId(TinyVec::from(*u.as_bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&[u8]> for ActorId {
|
||||
fn from(b: &[u8]) -> Self {
|
||||
ActorId(TinyVec::from(b))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Vec<u8>> for ActorId {
|
||||
fn from(b: &Vec<u8>) -> Self {
|
||||
ActorId::from(b.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for ActorId {
|
||||
fn from(b: Vec<u8>) -> Self {
|
||||
let inner = if let Ok(arr) = ArrayVec::try_from(b.as_slice()) {
|
||||
TinyVec::Inline(arr)
|
||||
} else {
|
||||
TinyVec::Heap(b)
|
||||
};
|
||||
ActorId(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ActorId {
|
||||
type Err = error::InvalidActorId;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
ActorId::try_from(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ActorId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.to_hex_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Copy, Hash)]
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum ObjType {
|
||||
Map,
|
||||
Table,
|
||||
List,
|
||||
Text,
|
||||
}
|
||||
|
||||
impl ObjType {
|
||||
pub fn is_sequence(&self) -> bool {
|
||||
matches!(self, Self::List | Self::Text)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<amp::MapType> for ObjType {
|
||||
fn from(other: amp::MapType) -> Self {
|
||||
match other {
|
||||
amp::MapType::Map => Self::Map,
|
||||
amp::MapType::Table => Self::Table,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<amp::SequenceType> for ObjType {
|
||||
fn from(other: amp::SequenceType) -> Self {
|
||||
match other {
|
||||
amp::SequenceType::List => Self::List,
|
||||
amp::SequenceType::Text => Self::Text,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ObjType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ObjType::Map => write!(f, "map"),
|
||||
ObjType::Table => write!(f, "table"),
|
||||
ObjType::List => write!(f, "list"),
|
||||
ObjType::Text => write!(f, "text"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub enum OpType {
|
||||
Make(ObjType),
|
||||
/// Perform a deletion, expanding the operation to cover `n` deletions (multiOp).
|
||||
Del,
|
||||
Inc(i64),
|
||||
Set(ScalarValue),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Export {
|
||||
Id(OpId),
|
||||
Special(String),
|
||||
Prop(usize),
|
||||
}
|
||||
|
||||
pub(crate) trait Exportable {
|
||||
fn export(&self) -> Export;
|
||||
}
|
||||
|
||||
pub(crate) trait Importable {
|
||||
fn wrap(id: OpId) -> Self;
|
||||
fn from(s: &str) -> Option<Self>
|
||||
where
|
||||
Self: std::marker::Sized;
|
||||
}
|
||||
|
||||
impl OpId {
|
||||
pub(crate) fn new(counter: u64, actor: usize) -> OpId {
|
||||
OpId(counter, actor)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn counter(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
#[inline]
|
||||
pub(crate) fn actor(&self) -> usize {
|
||||
self.1
|
||||
}
|
||||
}
|
||||
|
||||
impl Exportable for ObjId {
|
||||
fn export(&self) -> Export {
|
||||
match self {
|
||||
ObjId::Root => Export::Special(ROOT_STR.to_owned()),
|
||||
ObjId::Op(o) => Export::Id(*o)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Exportable for &ObjId {
|
||||
fn export(&self) -> Export {
|
||||
(*self).export()
|
||||
}
|
||||
}
|
||||
|
||||
impl Exportable for ElemId {
|
||||
fn export(&self) -> Export {
|
||||
if self == &HEAD {
|
||||
Export::Special(HEAD_STR.to_owned())
|
||||
} else {
|
||||
Export::Id(self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Exportable for OpId {
|
||||
fn export(&self) -> Export {
|
||||
Export::Id(*self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Exportable for Key {
|
||||
fn export(&self) -> Export {
|
||||
match self {
|
||||
Key::Map(p) => Export::Prop(*p),
|
||||
Key::Seq(e) => e.export(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Importable for ObjId {
|
||||
fn wrap(id: OpId) -> Self {
|
||||
ObjId::Op(id)
|
||||
}
|
||||
fn from(s: &str) -> Option<Self> {
|
||||
if s == ROOT_STR {
|
||||
Some(ObjId::Root)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Importable for OpId {
|
||||
fn wrap(id: OpId) -> Self {
|
||||
id
|
||||
}
|
||||
fn from(_s: &str) -> Option<Self> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Importable for ElemId {
|
||||
fn wrap(id: OpId) -> Self {
|
||||
ElemId(id)
|
||||
}
|
||||
fn from(s: &str) -> Option<Self> {
|
||||
if s == HEAD_STR {
|
||||
Some(HEAD)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpId> for ObjId {
|
||||
fn from(o: OpId) -> Self {
|
||||
match (o.counter(), o.actor()) {
|
||||
(0,0) => ObjId::Root,
|
||||
(_,_) => ObjId::Op(o),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpId> for ElemId {
|
||||
fn from(o: OpId) -> Self {
|
||||
ElemId(o)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Prop {
|
||||
fn from(p: String) -> Self {
|
||||
Prop::Map(p)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&String> for Prop {
|
||||
fn from(p: &String) -> Self {
|
||||
Prop::Map(p.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for Prop {
|
||||
fn from(p: &str) -> Self {
|
||||
Prop::Map(p.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for Prop {
|
||||
fn from(index: usize) -> Self {
|
||||
Prop::Seq(index)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f64> for Prop {
|
||||
fn from(index: f64) -> Self {
|
||||
Prop::Seq(index as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpId> for Key {
|
||||
fn from(id: OpId) -> Self {
|
||||
Key::Seq(ElemId(id))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ElemId> for Key {
|
||||
fn from(e: ElemId) -> Self {
|
||||
Key::Seq(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Hash)]
|
||||
pub(crate) enum Key {
|
||||
Map(usize),
|
||||
Seq(ElemId),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone)]
|
||||
pub enum Prop {
|
||||
Map(String),
|
||||
Seq(usize),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone)]
|
||||
pub struct Patch {}
|
||||
|
||||
impl Key {
|
||||
pub fn elemid(&self) -> Option<ElemId> {
|
||||
match self {
|
||||
Key::Map(_) => None,
|
||||
Key::Seq(id) => Some(*id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialOrd, Ord, Eq, PartialEq, Copy, Hash, Default)]
|
||||
pub(crate) struct OpId(u64, usize);
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialOrd, Eq, PartialEq, Ord, Hash)]
|
||||
pub(crate) enum ObjId{
|
||||
Root,
|
||||
Op(OpId),
|
||||
}
|
||||
|
||||
impl Default for ObjId {
|
||||
fn default() -> Self {
|
||||
Self::Root
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialOrd, Eq, PartialEq, Ord, Hash, Default)]
|
||||
pub(crate) struct ElemId(pub OpId);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Op {
|
||||
pub change: usize,
|
||||
pub id: OpId,
|
||||
pub action: OpType,
|
||||
pub obj: ObjId,
|
||||
pub key: Key,
|
||||
pub succ: Vec<OpId>,
|
||||
pub pred: Vec<OpId>,
|
||||
pub insert: bool,
|
||||
}
|
||||
|
||||
impl Op {
|
||||
pub fn is_del(&self) -> bool {
|
||||
matches!(self.action, OpType::Del)
|
||||
}
|
||||
|
||||
pub fn overwrites(&self, other: &Op) -> bool {
|
||||
self.pred.iter().any(|i| i == &other.id)
|
||||
}
|
||||
|
||||
pub fn elemid(&self) -> Option<ElemId> {
|
||||
if self.insert {
|
||||
Some(ElemId(self.id))
|
||||
} else {
|
||||
self.key.elemid()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn dump(&self) -> String {
|
||||
match &self.action {
|
||||
OpType::Set(value) if self.insert => format!("i:{}", value),
|
||||
OpType::Set(value) => format!("s:{}", value),
|
||||
OpType::Make(obj) => format!("make{}", obj),
|
||||
OpType::Inc(val) => format!("inc:{}", val),
|
||||
OpType::Del => "del".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Peer {}
|
||||
|
||||
#[derive(Eq, PartialEq, Hash, Clone, PartialOrd, Ord, Copy)]
|
||||
pub struct ChangeHash(pub [u8; 32]);
|
||||
|
||||
impl fmt::Debug for ChangeHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_tuple("ChangeHash")
|
||||
.field(&hex::encode(&self.0))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ParseChangeHashError {
|
||||
#[error(transparent)]
|
||||
HexDecode(#[from] hex::FromHexError),
|
||||
#[error("incorrect length, change hash should be 32 bytes, got {actual}")]
|
||||
IncorrectLength { actual: usize },
|
||||
}
|
||||
|
||||
impl FromStr for ChangeHash {
|
||||
type Err = ParseChangeHashError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let bytes = hex::decode(s)?;
|
||||
if bytes.len() == 32 {
|
||||
Ok(ChangeHash(bytes.try_into().unwrap()))
|
||||
} else {
|
||||
Err(ParseChangeHashError::IncorrectLength {
|
||||
actual: bytes.len(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for ChangeHash {
|
||||
type Error = error::InvalidChangeHashSlice;
|
||||
|
||||
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
|
||||
if bytes.len() != 32 {
|
||||
Err(error::InvalidChangeHashSlice(Vec::from(bytes)))
|
||||
} else {
|
||||
let mut array = [0; 32];
|
||||
array.copy_from_slice(bytes);
|
||||
Ok(ChangeHash(array))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,295 +0,0 @@
|
|||
use crate::{error, ObjType, Op, types::OpId, OpType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use smol_str::SmolStr;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum Value {
|
||||
Object(ObjType),
|
||||
Scalar(ScalarValue),
|
||||
}
|
||||
|
||||
impl Value {
|
||||
pub fn to_string(&self) -> Option<String> {
|
||||
match self {
|
||||
Value::Scalar(val) => Some(val.to_string()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map() -> Value {
|
||||
Value::Object(ObjType::Map)
|
||||
}
|
||||
|
||||
pub fn list() -> Value {
|
||||
Value::Object(ObjType::List)
|
||||
}
|
||||
|
||||
pub fn text() -> Value {
|
||||
Value::Object(ObjType::Text)
|
||||
}
|
||||
|
||||
pub fn table() -> Value {
|
||||
Value::Object(ObjType::Table)
|
||||
}
|
||||
|
||||
pub fn str(s: &str) -> Value {
|
||||
Value::Scalar(ScalarValue::Str(s.into()))
|
||||
}
|
||||
|
||||
pub fn int(n: i64) -> Value {
|
||||
Value::Scalar(ScalarValue::Int(n))
|
||||
}
|
||||
|
||||
pub fn uint(n: u64) -> Value {
|
||||
Value::Scalar(ScalarValue::Uint(n))
|
||||
}
|
||||
|
||||
pub fn counter(n: i64) -> Value {
|
||||
Value::Scalar(ScalarValue::Counter(n))
|
||||
}
|
||||
|
||||
pub fn timestamp(n: i64) -> Value {
|
||||
Value::Scalar(ScalarValue::Timestamp(n))
|
||||
}
|
||||
|
||||
pub fn f64(n: f64) -> Value {
|
||||
Value::Scalar(ScalarValue::F64(n))
|
||||
}
|
||||
|
||||
pub fn bytes(b: Vec<u8>) -> Value {
|
||||
Value::Scalar(ScalarValue::Bytes(b))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for Value {
|
||||
fn from(s: &str) -> Self {
|
||||
Value::Scalar(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Value {
|
||||
fn from(s: String) -> Self {
|
||||
Value::Scalar(ScalarValue::Str(s.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i64> for Value {
|
||||
fn from(n: i64) -> Self {
|
||||
Value::Scalar(ScalarValue::Int(n))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i32> for Value {
|
||||
fn from(n: i32) -> Self {
|
||||
Value::Scalar(ScalarValue::Int(n.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for Value {
|
||||
fn from(n: u64) -> Self {
|
||||
Value::Scalar(ScalarValue::Uint(n))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bool> for Value {
|
||||
fn from(v: bool) -> Self {
|
||||
Value::Scalar(ScalarValue::Boolean(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ObjType> for Value {
|
||||
fn from(o: ObjType) -> Self {
|
||||
Value::Object(o)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ScalarValue> for Value {
|
||||
fn from(v: ScalarValue) -> Self {
|
||||
Value::Scalar(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Op> for (Value, OpId) {
|
||||
fn from(op: &Op) -> Self {
|
||||
match &op.action {
|
||||
OpType::Make(obj_type) => (Value::Object(*obj_type), op.id),
|
||||
OpType::Set(scalar) => (Value::Scalar(scalar.clone()), op.id),
|
||||
_ => panic!("cant convert op into a value - {:?}", op),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Op> for (Value, OpId) {
|
||||
fn from(op: Op) -> Self {
|
||||
match &op.action {
|
||||
OpType::Make(obj_type) => (Value::Object(*obj_type), op.id),
|
||||
OpType::Set(scalar) => (Value::Scalar(scalar.clone()), op.id),
|
||||
_ => panic!("cant convert op into a value - {:?}", op),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Value> for OpType {
|
||||
fn from(v: Value) -> Self {
|
||||
match v {
|
||||
Value::Object(o) => OpType::Make(o),
|
||||
Value::Scalar(s) => OpType::Set(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone, Copy)]
|
||||
pub(crate) enum DataType {
|
||||
#[serde(rename = "counter")]
|
||||
Counter,
|
||||
#[serde(rename = "timestamp")]
|
||||
Timestamp,
|
||||
#[serde(rename = "bytes")]
|
||||
Bytes,
|
||||
#[serde(rename = "uint")]
|
||||
Uint,
|
||||
#[serde(rename = "int")]
|
||||
Int,
|
||||
#[serde(rename = "float64")]
|
||||
F64,
|
||||
#[serde(rename = "undefined")]
|
||||
Undefined,
|
||||
}
|
||||
|
||||
#[derive(Serialize, PartialEq, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
pub enum ScalarValue {
|
||||
Bytes(Vec<u8>),
|
||||
Str(SmolStr),
|
||||
Int(i64),
|
||||
Uint(u64),
|
||||
F64(f64),
|
||||
Counter(i64),
|
||||
Timestamp(i64),
|
||||
Boolean(bool),
|
||||
Null,
|
||||
}
|
||||
|
||||
impl ScalarValue {
|
||||
pub(crate) fn as_datatype(
|
||||
&self,
|
||||
datatype: DataType,
|
||||
) -> Result<ScalarValue, error::InvalidScalarValue> {
|
||||
match (datatype, self) {
|
||||
(DataType::Counter, ScalarValue::Int(i)) => Ok(ScalarValue::Counter(*i)),
|
||||
(DataType::Counter, ScalarValue::Uint(u)) => match i64::try_from(*u) {
|
||||
Ok(i) => Ok(ScalarValue::Counter(i)),
|
||||
Err(_) => Err(error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an integer".to_string(),
|
||||
unexpected: "an integer larger than i64::max_value".to_string(),
|
||||
datatype,
|
||||
}),
|
||||
},
|
||||
(DataType::Bytes, ScalarValue::Bytes(bytes)) => Ok(ScalarValue::Bytes(bytes.clone())),
|
||||
(DataType::Bytes, v) => Err(error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "a vector of bytes".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
}),
|
||||
(DataType::Counter, v) => Err(error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an integer".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
}),
|
||||
(DataType::Timestamp, ScalarValue::Int(i)) => Ok(ScalarValue::Timestamp(*i)),
|
||||
(DataType::Timestamp, ScalarValue::Uint(u)) => match i64::try_from(*u) {
|
||||
Ok(i) => Ok(ScalarValue::Timestamp(i)),
|
||||
Err(_) => Err(error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an integer".to_string(),
|
||||
unexpected: "an integer larger than i64::max_value".to_string(),
|
||||
datatype,
|
||||
}),
|
||||
},
|
||||
(DataType::Timestamp, v) => Err(error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an integer".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
}),
|
||||
(DataType::Int, v) => Ok(ScalarValue::Int(v.to_i64().ok_or(
|
||||
error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an int".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
},
|
||||
)?)),
|
||||
(DataType::Uint, v) => Ok(ScalarValue::Uint(v.to_u64().ok_or(
|
||||
error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "a uint".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
},
|
||||
)?)),
|
||||
(DataType::F64, v) => Ok(ScalarValue::F64(v.to_f64().ok_or(
|
||||
error::InvalidScalarValue {
|
||||
raw_value: self.clone(),
|
||||
expected: "an f64".to_string(),
|
||||
unexpected: v.to_string(),
|
||||
datatype,
|
||||
},
|
||||
)?)),
|
||||
(DataType::Undefined, _) => Ok(self.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an Option containing a `DataType` if
|
||||
/// `self` represents a numerical scalar value
|
||||
/// This is necessary b/c numerical values are not self-describing
|
||||
/// (unlike strings / bytes / etc. )
|
||||
pub(crate) fn as_numerical_datatype(&self) -> Option<DataType> {
|
||||
match self {
|
||||
ScalarValue::Counter(..) => Some(DataType::Counter),
|
||||
ScalarValue::Timestamp(..) => Some(DataType::Timestamp),
|
||||
ScalarValue::Int(..) => Some(DataType::Int),
|
||||
ScalarValue::Uint(..) => Some(DataType::Uint),
|
||||
ScalarValue::F64(..) => Some(DataType::F64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// If this value can be coerced to an i64, return the i64 value
|
||||
pub fn to_i64(&self) -> Option<i64> {
|
||||
match self {
|
||||
ScalarValue::Int(n) => Some(*n),
|
||||
ScalarValue::Uint(n) => Some(*n as i64),
|
||||
ScalarValue::F64(n) => Some(*n as i64),
|
||||
ScalarValue::Counter(n) => Some(*n),
|
||||
ScalarValue::Timestamp(n) => Some(*n),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_u64(&self) -> Option<u64> {
|
||||
match self {
|
||||
ScalarValue::Int(n) => Some(*n as u64),
|
||||
ScalarValue::Uint(n) => Some(*n),
|
||||
ScalarValue::F64(n) => Some(*n as u64),
|
||||
ScalarValue::Counter(n) => Some(*n as u64),
|
||||
ScalarValue::Timestamp(n) => Some(*n as u64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_f64(&self) -> Option<f64> {
|
||||
match self {
|
||||
ScalarValue::Int(n) => Some(*n as f64),
|
||||
ScalarValue::Uint(n) => Some(*n as f64),
|
||||
ScalarValue::F64(n) => Some(*n),
|
||||
ScalarValue::Counter(n) => Some(*n as f64),
|
||||
ScalarValue::Timestamp(n) => Some(*n as f64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,378 +0,0 @@
|
|||
use automerge::ObjId;
|
||||
|
||||
use std::{collections::HashMap, convert::TryInto, hash::Hash};
|
||||
|
||||
use serde::ser::{SerializeMap, SerializeSeq};
|
||||
|
||||
pub fn new_doc() -> automerge::Automerge {
|
||||
automerge::Automerge::new_with_actor_id(automerge::ActorId::random())
|
||||
}
|
||||
|
||||
pub fn new_doc_with_actor(actor: automerge::ActorId) -> automerge::Automerge {
|
||||
automerge::Automerge::new_with_actor_id(actor)
|
||||
}
|
||||
|
||||
/// Returns two actor IDs, the first considered to be ordered before the second
|
||||
pub fn sorted_actors() -> (automerge::ActorId, automerge::ActorId) {
|
||||
let a = automerge::ActorId::random();
|
||||
let b = automerge::ActorId::random();
|
||||
if a > b {
|
||||
(b, a)
|
||||
} else {
|
||||
(a, b)
|
||||
}
|
||||
}
|
||||
|
||||
/// This macro makes it easy to make assertions about a document. It is called with two arguments,
|
||||
/// the first is a reference to an `automerge::Automerge`, the second is an instance of
|
||||
/// `RealizedObject`.
|
||||
///
|
||||
/// What - I hear you ask - is a `RealizedObject`? It's a fully hydrated version of the contents of
|
||||
/// an automerge document. You don't need to think about this too much though because you can
|
||||
/// easily construct one with the `map!` and `list!` macros. Here's an example:
|
||||
///
|
||||
/// ## Constructing documents
|
||||
///
|
||||
/// ```rust
|
||||
/// let mut doc = automerge::Automerge::new();
|
||||
/// let todos = doc.set(automerge::ROOT, "todos", automerge::Value::map()).unwrap().unwrap();
|
||||
/// let todo = doc.insert(todos, 0, automerge::Value::map()).unwrap();
|
||||
/// let title = doc.set(todo, "title", "water plants").unwrap().unwrap();
|
||||
///
|
||||
/// assert_doc!(
|
||||
/// &doc,
|
||||
/// map!{
|
||||
/// "todos" => {
|
||||
/// todos => list![
|
||||
/// { todo => map!{ title = "water plants" } }
|
||||
/// ]
|
||||
/// }
|
||||
/// }
|
||||
/// );
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// This might look more complicated than you were expecting. Why are there OpIds (`todos`, `todo`,
|
||||
/// `title`) in there? Well the `RealizedObject` contains all the changes in the document tagged by
|
||||
/// OpId. This makes it easy to test for conflicts:
|
||||
///
|
||||
/// ```rust
|
||||
/// let mut doc1 = automerge::Automerge::new();
|
||||
/// let mut doc2 = automerge::Automerge::new();
|
||||
/// let op1 = doc1.set(automerge::ROOT, "field", "one").unwrap().unwrap();
|
||||
/// let op2 = doc2.set(automerge::ROOT, "field", "two").unwrap().unwrap();
|
||||
/// doc1.merge(&mut doc2);
|
||||
/// assert_doc!(
|
||||
/// &doc1,
|
||||
/// map!{
|
||||
/// "field" => {
|
||||
/// op1 => "one",
|
||||
/// op2 => "two"
|
||||
/// }
|
||||
/// }
|
||||
/// );
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! assert_doc {
|
||||
($doc: expr, $expected: expr) => {{
|
||||
use $crate::helpers::realize;
|
||||
let realized = realize($doc);
|
||||
let exported: RealizedObject = $expected.into();
|
||||
if realized != exported {
|
||||
let serde_right = serde_json::to_string_pretty(&realized).unwrap();
|
||||
let serde_left = serde_json::to_string_pretty(&exported).unwrap();
|
||||
panic!(
|
||||
"documents didn't match\n expected\n{}\n got\n{}",
|
||||
&serde_left, &serde_right
|
||||
);
|
||||
}
|
||||
pretty_assertions::assert_eq!(realized, exported);
|
||||
}};
|
||||
}
|
||||
|
||||
/// Like `assert_doc` except that you can specify an object ID and property to select subsections
|
||||
/// of the document.
|
||||
#[macro_export]
|
||||
macro_rules! assert_obj {
|
||||
($doc: expr, $obj_id: expr, $prop: expr, $expected: expr) => {{
|
||||
use $crate::helpers::realize_prop;
|
||||
let realized = realize_prop($doc, $obj_id, $prop);
|
||||
let exported: RealizedObject = $expected.into();
|
||||
if realized != exported {
|
||||
let serde_right = serde_json::to_string_pretty(&realized).unwrap();
|
||||
let serde_left = serde_json::to_string_pretty(&exported).unwrap();
|
||||
panic!(
|
||||
"documents didn't match\n expected\n{}\n got\n{}",
|
||||
&serde_left, &serde_right
|
||||
);
|
||||
}
|
||||
pretty_assertions::assert_eq!(realized, exported);
|
||||
}};
|
||||
}
|
||||
|
||||
/// Construct `RealizedObject::Map`. This macro takes a nested set of curl braces. The outer set is
|
||||
/// the keys of the map, the inner set is the opid tagged values:
|
||||
///
|
||||
/// ```
|
||||
/// map!{
|
||||
/// "key" => {
|
||||
/// opid1 => "value1",
|
||||
/// opid2 => "value2",
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The map above would represent a map with a conflict on the "key" property. The values can be
|
||||
/// anything which implements `Into<RealizedObject<ExportableOpId<'_>>`. Including nested calls to
|
||||
/// `map!` or `list!`.
|
||||
#[macro_export]
|
||||
macro_rules! map {
|
||||
(@single $($x:tt)*) => (());
|
||||
(@count $($rest:expr),*) => (<[()]>::len(&[$(map!(@single $rest)),*]));
|
||||
|
||||
(@inner { $($opid:expr => $value:expr,)+ }) => { map!(@inner { $($opid => $value),+ }) };
|
||||
(@inner { $($opid:expr => $value:expr),* }) => {
|
||||
{
|
||||
use std::collections::HashMap;
|
||||
let mut inner: HashMap<ObjId, RealizedObject> = HashMap::new();
|
||||
$(
|
||||
let _ = inner.insert(ObjId::from((&$opid)).into_owned(), $value.into());
|
||||
)*
|
||||
inner
|
||||
}
|
||||
};
|
||||
//(&inner $map:expr, $opid:expr => $value:expr, $($tail:tt),*) => {
|
||||
//$map.insert($opid.into(), $value.into());
|
||||
//}
|
||||
($($key:expr => $inner:tt,)+) => { map!($($key => $inner),+) };
|
||||
($($key:expr => $inner:tt),*) => {
|
||||
{
|
||||
use std::collections::HashMap;
|
||||
let _cap = map!(@count $($key),*);
|
||||
let mut _map: HashMap<String, HashMap<ObjId, RealizedObject>> = ::std::collections::HashMap::with_capacity(_cap);
|
||||
$(
|
||||
let inner = map!(@inner $inner);
|
||||
let _ = _map.insert($key.to_string(), inner);
|
||||
)*
|
||||
RealizedObject::Map(_map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct `RealizedObject::Sequence`. This macro represents a sequence of opid tagged values
|
||||
///
|
||||
/// ```
|
||||
/// list![
|
||||
/// {
|
||||
/// opid1 => "value1",
|
||||
/// opid2 => "value2",
|
||||
/// }
|
||||
/// ]
|
||||
/// ```
|
||||
///
|
||||
/// The list above would represent a list with a conflict on the 0 index. The values can be
|
||||
/// anything which implements `Into<RealizedObject<ExportableOpId<'_>>` including nested calls to
|
||||
/// `map!` or `list!`.
|
||||
#[macro_export]
|
||||
macro_rules! list {
|
||||
(@single $($x:tt)*) => (());
|
||||
(@count $($rest:tt),*) => (<[()]>::len(&[$(list!(@single $rest)),*]));
|
||||
|
||||
(@inner { $($opid:expr => $value:expr,)+ }) => { list!(@inner { $($opid => $value),+ }) };
|
||||
(@inner { $($opid:expr => $value:expr),* }) => {
|
||||
{
|
||||
use std::collections::HashMap;
|
||||
let mut inner: HashMap<ObjId, RealizedObject> = HashMap::new();
|
||||
$(
|
||||
let _ = inner.insert(ObjId::from(&$opid).into_owned(), $value.into());
|
||||
)*
|
||||
inner
|
||||
}
|
||||
};
|
||||
($($inner:tt,)+) => { list!($($inner),+) };
|
||||
($($inner:tt),*) => {
|
||||
{
|
||||
let _cap = list!(@count $($inner),*);
|
||||
let mut _list: Vec<HashMap<ObjId, RealizedObject>> = Vec::new();
|
||||
$(
|
||||
//println!("{}", stringify!($inner));
|
||||
let inner = list!(@inner $inner);
|
||||
let _ = _list.push(inner);
|
||||
)*
|
||||
RealizedObject::Sequence(_list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mk_counter(value: i64) -> automerge::ScalarValue {
|
||||
automerge::ScalarValue::Counter(value)
|
||||
}
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Debug)]
|
||||
pub struct ExportedOpId(String);
|
||||
|
||||
impl std::fmt::Display for ExportedOpId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A `RealizedObject` is a representation of all the current values in a document - including
|
||||
/// conflicts.
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum RealizedObject<'a> {
|
||||
Map(HashMap<String, HashMap<ObjId<'a>, RealizedObject<'a>>>),
|
||||
Sequence(Vec<HashMap<ObjId<'a>, RealizedObject<'a>>>),
|
||||
Value(automerge::ScalarValue),
|
||||
}
|
||||
|
||||
impl serde::Serialize for RealizedObject<'static> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Self::Map(kvs) => {
|
||||
let mut map_ser = serializer.serialize_map(Some(kvs.len()))?;
|
||||
for (k, kvs) in kvs {
|
||||
let kvs_serded = kvs
|
||||
.iter()
|
||||
.map(|(opid, value)| (opid.to_string(), value))
|
||||
.collect::<HashMap<String, &RealizedObject>>();
|
||||
map_ser.serialize_entry(k, &kvs_serded)?;
|
||||
}
|
||||
map_ser.end()
|
||||
}
|
||||
Self::Sequence(elems) => {
|
||||
let mut list_ser = serializer.serialize_seq(Some(elems.len()))?;
|
||||
for elem in elems {
|
||||
let kvs_serded = elem
|
||||
.iter()
|
||||
.map(|(opid, value)| (opid.to_string(), value))
|
||||
.collect::<HashMap<String, &RealizedObject>>();
|
||||
list_ser.serialize_element(&kvs_serded)?;
|
||||
}
|
||||
list_ser.end()
|
||||
}
|
||||
Self::Value(v) => v.serialize(serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn realize<'a>(doc: &automerge::Automerge) -> RealizedObject<'a> {
|
||||
realize_obj(doc, ObjId::Root, automerge::ObjType::Map)
|
||||
}
|
||||
|
||||
pub fn realize_prop<P: Into<automerge::Prop>>(
|
||||
doc: &automerge::Automerge,
|
||||
obj_id: automerge::ObjId,
|
||||
prop: P,
|
||||
) -> RealizedObject<'static> {
|
||||
let (val, obj_id) = doc.value(obj_id, prop).unwrap().unwrap();
|
||||
match val {
|
||||
automerge::Value::Object(obj_type) => realize_obj(doc, obj_id.into(), obj_type),
|
||||
automerge::Value::Scalar(v) => RealizedObject::Value(v),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn realize_obj(
|
||||
doc: &automerge::Automerge,
|
||||
obj_id: automerge::ObjId,
|
||||
objtype: automerge::ObjType,
|
||||
) -> RealizedObject<'static> {
|
||||
match objtype {
|
||||
automerge::ObjType::Map | automerge::ObjType::Table => {
|
||||
let mut result = HashMap::new();
|
||||
for key in doc.keys(obj_id.clone()) {
|
||||
result.insert(key.clone(), realize_values(doc, obj_id.clone(), key));
|
||||
}
|
||||
RealizedObject::Map(result)
|
||||
}
|
||||
automerge::ObjType::List | automerge::ObjType::Text => {
|
||||
let length = doc.length(obj_id.clone());
|
||||
let mut result = Vec::with_capacity(length);
|
||||
for i in 0..length {
|
||||
result.push(realize_values(doc, obj_id.clone(), i));
|
||||
}
|
||||
RealizedObject::Sequence(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn realize_values<K: Into<automerge::Prop>>(
|
||||
doc: &automerge::Automerge,
|
||||
obj_id: automerge::ObjId,
|
||||
key: K,
|
||||
) -> HashMap<ObjId<'static>, RealizedObject<'static>> {
|
||||
let mut values_by_objid: HashMap<ObjId, RealizedObject> = HashMap::new();
|
||||
for (value, opid) in doc.values(obj_id, key).unwrap() {
|
||||
let realized = match value {
|
||||
automerge::Value::Object(objtype) => realize_obj(doc, opid.clone().into(), objtype),
|
||||
automerge::Value::Scalar(v) => RealizedObject::Value(v),
|
||||
};
|
||||
values_by_objid.insert(opid.into(), realized);
|
||||
}
|
||||
values_by_objid
|
||||
}
|
||||
|
||||
|
||||
impl<'a, I: Into<RealizedObject<'a>>>
|
||||
From<HashMap<&str, HashMap<ObjId<'a>, I>>> for RealizedObject<'a>
|
||||
{
|
||||
fn from(values: HashMap<&str, HashMap<ObjId<'a>, I>>) -> Self {
|
||||
let intoed = values
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k.to_string(),
|
||||
v.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
RealizedObject::Map(intoed)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, I: Into<RealizedObject<'a>>>
|
||||
From<Vec<HashMap<ObjId<'a>, I>>> for RealizedObject<'a>
|
||||
{
|
||||
fn from(values: Vec<HashMap<ObjId<'a>, I>>) -> Self {
|
||||
RealizedObject::Sequence(
|
||||
values
|
||||
.into_iter()
|
||||
.map(|v| v.into_iter().map(|(k, v)| (k, v.into())).collect())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bool> for RealizedObject<'static> {
|
||||
fn from(b: bool) -> Self {
|
||||
RealizedObject::Value(b.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for RealizedObject<'static> {
|
||||
fn from(u: usize) -> Self {
|
||||
let v = u.try_into().unwrap();
|
||||
RealizedObject::Value(automerge::ScalarValue::Int(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<automerge::ScalarValue> for RealizedObject<'static> {
|
||||
fn from(s: automerge::ScalarValue) -> Self {
|
||||
RealizedObject::Value(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for RealizedObject<'static> {
|
||||
fn from(s: &str) -> Self {
|
||||
RealizedObject::Value(automerge::ScalarValue::Str(s.into()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Pretty print the contents of a document
|
||||
#[allow(dead_code)]
|
||||
pub fn pretty_print(doc: &automerge::Automerge) {
|
||||
println!("{}", serde_json::to_string_pretty(&realize(doc)).unwrap())
|
||||
}
|
|
@ -1,966 +0,0 @@
|
|||
use automerge::{Automerge, ObjId};
|
||||
|
||||
mod helpers;
|
||||
#[allow(unused_imports)]
|
||||
use helpers::{
|
||||
mk_counter, new_doc, new_doc_with_actor, pretty_print, realize, realize_obj, sorted_actors,
|
||||
RealizedObject,
|
||||
};
|
||||
#[test]
|
||||
fn no_conflict_on_repeated_assignment() {
|
||||
let mut doc = Automerge::new();
|
||||
doc.set(ObjId::Root, "foo", 1).unwrap();
|
||||
let op = doc.set(ObjId::Root, "foo", 2).unwrap().unwrap();
|
||||
assert_doc!(
|
||||
&doc,
|
||||
map! {
|
||||
"foo" => { op => 2},
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_change_on_repeated_map_set() {
|
||||
let mut doc = new_doc();
|
||||
doc.set(ObjId::Root, "foo", 1).unwrap();
|
||||
assert!(doc.set(ObjId::Root, "foo", 1).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_change_on_repeated_list_set() {
|
||||
let mut doc = new_doc();
|
||||
let list_id = doc
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap().into();
|
||||
doc.insert(&list_id, 0, 1).unwrap();
|
||||
doc.set(&list_id, 0, 1).unwrap();
|
||||
assert!(doc.set(list_id, 0, 1).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_change_on_list_insert_followed_by_set_of_same_value() {
|
||||
let mut doc = new_doc();
|
||||
let list_id = doc
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc.insert(&list_id, 0, 1).unwrap();
|
||||
assert!(doc.set(&list_id, 0, 1).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn repeated_map_assignment_which_resolves_conflict_not_ignored() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
doc1.set(ObjId::Root, "field", 123).unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
doc2.set(ObjId::Root, "field", 456).unwrap();
|
||||
doc1.set(ObjId::Root, "field", 789).unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
assert_eq!(doc1.values(ObjId::Root, "field").unwrap().len(), 2);
|
||||
|
||||
let op = doc1.set(ObjId::Root, "field", 123).unwrap().unwrap();
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"field" => {
|
||||
op => 123
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn repeated_list_assignment_which_resolves_conflict_not_ignored() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.insert(&list_id, 0, 123).unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
doc2.set(&list_id, 0, 456).unwrap().unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
let doc1_op = doc1.set(&list_id, 0, 789).unwrap().unwrap();
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"list" => {
|
||||
list_id => list![
|
||||
{ doc1_op => 789 },
|
||||
]
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn list_deletion() {
|
||||
let mut doc = new_doc();
|
||||
let list_id = doc
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let op1 = doc.insert(&list_id, 0, 123).unwrap();
|
||||
doc.insert(&list_id, 1, 456).unwrap();
|
||||
let op3 = doc.insert(&list_id.clone(), 2, 789).unwrap();
|
||||
doc.del(&list_id, 1).unwrap();
|
||||
assert_doc!(
|
||||
&doc,
|
||||
map! {
|
||||
"list" => {list_id => list![
|
||||
{ op1 => 123 },
|
||||
{ op3 => 789 },
|
||||
]}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_concurrent_map_prop_updates() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let op1 = doc1.set(ObjId::Root, "foo", "bar").unwrap().unwrap();
|
||||
let hello = doc2
|
||||
.set(ObjId::Root, "hello", "world")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
assert_eq!(
|
||||
doc1.value(ObjId::Root, "foo").unwrap().unwrap().0,
|
||||
"bar".into()
|
||||
);
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"foo" => { op1 => "bar" },
|
||||
"hello" => { hello => "world" },
|
||||
}
|
||||
);
|
||||
doc2.merge(&mut doc1);
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"foo" => { op1 => "bar" },
|
||||
"hello" => { hello => "world" },
|
||||
}
|
||||
);
|
||||
assert_eq!(realize(&doc1), realize(&doc2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_concurrent_increments_of_same_property() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let counter_id = doc1
|
||||
.set(ObjId::Root, "counter", mk_counter(0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
doc1.inc(ObjId::Root, "counter", 1).unwrap();
|
||||
doc2.inc(ObjId::Root, "counter", 2).unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"counter" => {
|
||||
counter_id => mk_counter(3)
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_increments_only_to_preceeded_values() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
|
||||
// create a counter in doc1
|
||||
let doc1_counter_id = doc1
|
||||
.set(ObjId::Root, "counter", mk_counter(0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.inc(ObjId::Root, "counter", 1).unwrap();
|
||||
|
||||
// create a counter in doc2
|
||||
let doc2_counter_id = doc2
|
||||
.set(ObjId::Root, "counter", mk_counter(0))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc2.inc(ObjId::Root, "counter", 3).unwrap();
|
||||
|
||||
// The two values should be conflicting rather than added
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"counter" => {
|
||||
doc1_counter_id => mk_counter(1),
|
||||
doc2_counter_id => mk_counter(3),
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_updates_of_same_field() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let set_one_opid = doc1.set(ObjId::Root, "field", "one").unwrap().unwrap();
|
||||
let set_two_opid = doc2.set(ObjId::Root, "field", "two").unwrap().unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"field" => {
|
||||
set_one_opid => "one",
|
||||
set_two_opid => "two",
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_updates_of_same_list_element() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.insert(list_id.clone(), 0, "finch").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
let set_one_op = doc1.set(&list_id, 0, "greenfinch").unwrap().unwrap();
|
||||
let set_op_two = doc2.set(&list_id, 0, "goldfinch").unwrap().unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {
|
||||
list_id => list![{
|
||||
set_one_op => "greenfinch",
|
||||
set_op_two => "goldfinch",
|
||||
}]
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn assignment_conflicts_of_different_types() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let mut doc3 = new_doc();
|
||||
let op_one = doc1
|
||||
.set(ObjId::Root, "field", "string")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let op_two = doc2
|
||||
.set(ObjId::Root, "field", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let op_three = doc3
|
||||
.set(ObjId::Root, "field", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
doc1.merge(&mut doc3);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"field" => {
|
||||
op_one => "string",
|
||||
op_two => list!{},
|
||||
op_three => map!{},
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn changes_within_conflicting_map_field() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let op_one = doc1
|
||||
.set(ObjId::Root, "field", "string")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let map_id = doc2
|
||||
.set(ObjId::Root, "field", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let set_in_doc2 = doc2.set(&map_id, "innerKey", 42).unwrap().unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"field" => {
|
||||
op_one => "string",
|
||||
map_id => map!{
|
||||
"innerKey" => {
|
||||
set_in_doc2 => 42,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn changes_within_conflicting_list_element() {
|
||||
let (actor1, actor2) = sorted_actors();
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.insert(&list_id, 0, "hello").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
let map_in_doc1 = doc1
|
||||
.set(&list_id, 0, automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let set_map1 = doc1.set(&map_in_doc1, "map1", true).unwrap().unwrap();
|
||||
let set_key1 = doc1.set(&map_in_doc1, "key", 1).unwrap().unwrap();
|
||||
|
||||
let map_in_doc2 = doc2
|
||||
.set(&list_id, 0, automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
let set_map2 = doc2.set(&map_in_doc2, "map2", true).unwrap().unwrap();
|
||||
let set_key2 = doc2.set(&map_in_doc2, "key", 2).unwrap().unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"list" => {
|
||||
list_id => list![
|
||||
{
|
||||
map_in_doc2 => map!{
|
||||
"map2" => { set_map2 => true },
|
||||
"key" => { set_key2 => 2 },
|
||||
},
|
||||
map_in_doc1 => map!{
|
||||
"key" => { set_key1 => 1 },
|
||||
"map1" => { set_map1 => true },
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrently_assigned_nested_maps_should_not_merge() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
|
||||
let doc1_map_id = doc1
|
||||
.set(ObjId::Root, "config", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let doc1_field = doc1
|
||||
.set(doc1_map_id.clone(), "background", "blue")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let doc2_map_id = doc2
|
||||
.set(ObjId::Root, "config", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let doc2_field = doc2
|
||||
.set(doc2_map_id.clone(), "logo_url", "logo.png")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"config" => {
|
||||
doc1_map_id => map!{
|
||||
"background" => {doc1_field => "blue"}
|
||||
},
|
||||
doc2_map_id => map!{
|
||||
"logo_url" => {doc2_field => "logo.png"}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_insertions_at_different_list_positions() {
|
||||
let (actor1, actor2) = sorted_actors();
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
assert!(doc1.maybe_get_actor().unwrap() < doc2.maybe_get_actor().unwrap());
|
||||
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let one = doc1.insert(&list_id, 0, "one").unwrap();
|
||||
let three = doc1.insert(&list_id, 1, "three").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
let two = doc1.splice(&list_id, 1, 0, vec!["two".into()]).unwrap()[0].clone();
|
||||
let four = doc2.insert(&list_id, 2, "four").unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"list" => {
|
||||
list_id => list![
|
||||
{one => "one"},
|
||||
{two => "two"},
|
||||
{three => "three"},
|
||||
{four => "four"},
|
||||
]
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_insertions_at_same_list_position() {
|
||||
let (actor1, actor2) = sorted_actors();
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
assert!(doc1.maybe_get_actor().unwrap() < doc2.maybe_get_actor().unwrap());
|
||||
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let parakeet = doc1.insert(&list_id, 0, "parakeet").unwrap();
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
let starling = doc1.insert(&list_id, 1, "starling").unwrap();
|
||||
let chaffinch = doc2.insert(&list_id, 1, "chaffinch").unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {
|
||||
list_id => list![
|
||||
{
|
||||
parakeet => "parakeet",
|
||||
},
|
||||
{
|
||||
starling => "starling",
|
||||
},
|
||||
{
|
||||
chaffinch => "chaffinch",
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_assignment_and_deletion_of_a_map_entry() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
doc1.set(ObjId::Root, "bestBird", "robin").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
doc1.del(ObjId::Root, "bestBird").unwrap();
|
||||
let set_two = doc2
|
||||
.set(ObjId::Root, "bestBird", "magpie")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"bestBird" => {
|
||||
set_two => "magpie",
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_assignment_and_deletion_of_list_entry() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let blackbird = doc1.insert(&list_id, 0, "blackbird").unwrap();
|
||||
doc1.insert(&list_id, 1, "thrush").unwrap();
|
||||
let goldfinch = doc1.insert(&list_id, 2, "goldfinch").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
let starling = doc1.set(&list_id, 1, "starling").unwrap().unwrap();
|
||||
|
||||
doc2.del(&list_id, 1).unwrap();
|
||||
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"birds" => {list_id => list![
|
||||
{ blackbird => "blackbird"},
|
||||
{ goldfinch => "goldfinch"},
|
||||
]}
|
||||
}
|
||||
);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {list_id.clone() => list![
|
||||
{ blackbird => "blackbird" },
|
||||
{ starling.clone() => "starling" },
|
||||
{ goldfinch => "goldfinch" },
|
||||
]}
|
||||
}
|
||||
);
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {list_id => list![
|
||||
{ blackbird => "blackbird" },
|
||||
{ starling => "starling" },
|
||||
{ goldfinch => "goldfinch" },
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insertion_after_a_deleted_list_element() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let blackbird = doc1.insert(list_id.clone(), 0, "blackbird").unwrap();
|
||||
doc1.insert(&list_id, 1, "thrush").unwrap();
|
||||
doc1.insert(&list_id, 2, "goldfinch").unwrap();
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
doc1.splice(&list_id, 1, 2, Vec::new()).unwrap();
|
||||
|
||||
let starling = doc2
|
||||
.splice(&list_id, 2, 0, vec!["starling".into()])
|
||||
.unwrap()[0].clone();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {list_id => list![
|
||||
{ blackbird => "blackbird" },
|
||||
{ starling => "starling" }
|
||||
]}
|
||||
}
|
||||
);
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"birds" => {list_id => list![
|
||||
{ blackbird => "blackbird" },
|
||||
{ starling => "starling" }
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_deletion_of_same_list_element() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
let list_id = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let albatross = doc1.insert(list_id.clone(), 0, "albatross").unwrap();
|
||||
doc1.insert(&list_id, 1, "buzzard").unwrap();
|
||||
let cormorant = doc1.insert(&list_id, 2, "cormorant").unwrap();
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
doc1.del(&list_id, 1).unwrap();
|
||||
|
||||
doc2.del(&list_id, 1).unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {list_id.clone() => list![
|
||||
{ albatross.clone() => "albatross" },
|
||||
{ cormorant.clone() => "cormorant" }
|
||||
]}
|
||||
}
|
||||
);
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"birds" => {list_id => list![
|
||||
{ albatross => "albatross" },
|
||||
{ cormorant => "cormorant" }
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_updates_at_different_levels() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
|
||||
let animals = doc1
|
||||
.set(ObjId::Root, "animals", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let birds = doc1
|
||||
.set(&animals, "birds", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.set(&birds, "pink", "flamingo").unwrap().unwrap();
|
||||
doc1.set(&birds, "black", "starling").unwrap().unwrap();
|
||||
|
||||
let mammals = doc1
|
||||
.set(&animals, "mammals", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let badger = doc1.insert(&mammals, 0, "badger").unwrap();
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
doc1.set(&birds, "brown", "sparrow").unwrap().unwrap();
|
||||
|
||||
doc2.del(&animals, "birds").unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_obj!(
|
||||
&doc1,
|
||||
ObjId::Root,
|
||||
"animals",
|
||||
map! {
|
||||
"mammals" => {
|
||||
mammals => list![{ badger => "badger" }],
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
assert_obj!(
|
||||
&doc2,
|
||||
ObjId::Root,
|
||||
"animals",
|
||||
map! {
|
||||
"mammals" => {
|
||||
mammals => list![{ badger => "badger" }],
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn concurrent_updates_of_concurrently_deleted_objects() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
|
||||
let birds = doc1
|
||||
.set(ObjId::Root, "birds", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let blackbird = doc1
|
||||
.set(&birds, "blackbird", automerge::Value::map())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.set(&blackbird, "feathers", "black").unwrap().unwrap();
|
||||
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
doc1.del(&birds, "blackbird").unwrap();
|
||||
|
||||
doc2.set(&blackbird, "beak", "orange").unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"birds" => {
|
||||
birds => map!{},
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn does_not_interleave_sequence_insertions_at_same_position() {
|
||||
let (actor1, actor2) = sorted_actors();
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
|
||||
let wisdom = doc1
|
||||
.set(ObjId::Root, "wisdom", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
let doc1elems = doc1
|
||||
.splice(
|
||||
&wisdom,
|
||||
0,
|
||||
0,
|
||||
vec![
|
||||
"to".into(),
|
||||
"be".into(),
|
||||
"is".into(),
|
||||
"to".into(),
|
||||
"do".into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let doc2elems = doc2
|
||||
.splice(
|
||||
&wisdom,
|
||||
0,
|
||||
0,
|
||||
vec![
|
||||
"to".into(),
|
||||
"do".into(),
|
||||
"is".into(),
|
||||
"to".into(),
|
||||
"be".into(),
|
||||
],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map! {
|
||||
"wisdom" => {wisdom => list![
|
||||
{doc1elems[0] => "to"},
|
||||
{doc1elems[1] => "be"},
|
||||
{doc1elems[2] => "is"},
|
||||
{doc1elems[3] => "to"},
|
||||
{doc1elems[4] => "do"},
|
||||
{doc2elems[0] => "to"},
|
||||
{doc2elems[1] => "do"},
|
||||
{doc2elems[2] => "is"},
|
||||
{doc2elems[3] => "to"},
|
||||
{doc2elems[4] => "be"},
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mutliple_insertions_at_same_list_position_with_insertion_by_greater_actor_id() {
|
||||
let (actor1, actor2) = sorted_actors();
|
||||
assert!(actor2 > actor1);
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
|
||||
let list = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let two = doc1.insert(&list, 0, "two").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
let one = doc2.insert(&list, 0, "one").unwrap();
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"list" => { list => list![
|
||||
{ one => "one" },
|
||||
{ two => "two" },
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mutliple_insertions_at_same_list_position_with_insertion_by_lesser_actor_id() {
|
||||
let (actor2, actor1) = sorted_actors();
|
||||
assert!(actor2 < actor1);
|
||||
let mut doc1 = new_doc_with_actor(actor1);
|
||||
let mut doc2 = new_doc_with_actor(actor2);
|
||||
|
||||
let list = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let two = doc1.insert(&list, 0, "two").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
|
||||
let one = doc2.insert(&list, 0, "one").unwrap();
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"list" => { list => list![
|
||||
{ one => "one" },
|
||||
{ two => "two" },
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insertion_consistent_with_causality() {
|
||||
let mut doc1 = new_doc();
|
||||
let mut doc2 = new_doc();
|
||||
|
||||
let list = doc1
|
||||
.set(ObjId::Root, "list", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let four = doc1.insert(&list, 0, "four").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
let three = doc2.insert(&list, 0, "three").unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
let two = doc1.insert(&list, 0, "two").unwrap();
|
||||
doc2.merge(&mut doc1);
|
||||
let one = doc2.insert(&list, 0, "one").unwrap();
|
||||
|
||||
assert_doc!(
|
||||
&doc2,
|
||||
map! {
|
||||
"list" => {list => list![
|
||||
{one => "one"},
|
||||
{two => "two"},
|
||||
{three => "three" },
|
||||
{four => "four"},
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_handle_arbitrary_depth_nesting() {
|
||||
let mut doc1 = new_doc();
|
||||
let a = doc1.set(ObjId::Root, "a", automerge::Value::map()).unwrap().unwrap();
|
||||
let b = doc1.set(&a, "b", automerge::Value::map()).unwrap().unwrap();
|
||||
let c = doc1.set(&b, "c", automerge::Value::map()).unwrap().unwrap();
|
||||
let d = doc1.set(&c, "d", automerge::Value::map()).unwrap().unwrap();
|
||||
let e = doc1.set(&d, "e", automerge::Value::map()).unwrap().unwrap();
|
||||
let f = doc1.set(&e, "f", automerge::Value::map()).unwrap().unwrap();
|
||||
let g = doc1.set(&f, "g", automerge::Value::map()).unwrap().unwrap();
|
||||
let h = doc1.set(&g, "h", "h").unwrap().unwrap();
|
||||
let j = doc1.set(&f, "i", "j").unwrap().unwrap();
|
||||
|
||||
assert_doc!(
|
||||
&doc1,
|
||||
map!{
|
||||
"a" => {a => map!{
|
||||
"b" => {b => map!{
|
||||
"c" => {c => map!{
|
||||
"d" => {d => map!{
|
||||
"e" => {e => map!{
|
||||
"f" => {f => map!{
|
||||
"g" => {g => map!{
|
||||
"h" => {h => "h"}
|
||||
}},
|
||||
"i" => {j => "j"},
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}
|
||||
);
|
||||
|
||||
Automerge::load(&doc1.save().unwrap()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn save_and_restore_empty() {
|
||||
let mut doc = new_doc();
|
||||
let loaded = Automerge::load(&doc.save().unwrap()).unwrap();
|
||||
|
||||
assert_doc!(&loaded, map! {});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn save_restore_complex() {
|
||||
let mut doc1 = new_doc();
|
||||
let todos = doc1
|
||||
.set(ObjId::Root, "todos", automerge::Value::list())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let first_todo = doc1.insert(todos.clone(), 0, automerge::Value::map()).unwrap();
|
||||
doc1.set(&first_todo, "title", "water plants")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let first_done = doc1.set(first_todo.clone(), "done", false).unwrap().unwrap();
|
||||
|
||||
let mut doc2 = new_doc();
|
||||
doc2.merge(&mut doc1);
|
||||
let weed_title = doc2
|
||||
.set(first_todo.clone(), "title", "weed plants")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let kill_title = doc1
|
||||
.set(&first_todo, "title", "kill plants")
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
doc1.merge(&mut doc2);
|
||||
|
||||
let reloaded = Automerge::load(&doc1.save().unwrap()).unwrap();
|
||||
|
||||
assert_doc!(
|
||||
&reloaded,
|
||||
map! {
|
||||
"todos" => {todos => list![
|
||||
{first_todo => map!{
|
||||
"title" => {
|
||||
weed_title => "weed plants",
|
||||
kill_title => "kill plants",
|
||||
},
|
||||
"done" => {first_done => false},
|
||||
}}
|
||||
]}
|
||||
}
|
||||
);
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
Try the different editing traces on different automerge implementations
|
||||
|
||||
### Automerge Experiement - pure rust
|
||||
|
||||
```code
|
||||
# cargo --release run
|
||||
```
|
||||
|
||||
#### Benchmarks
|
||||
|
||||
There are some criterion benchmarks in the `benches` folder which can be run with `cargo bench` or `cargo criterion`.
|
||||
For flamegraphing, `cargo flamegraph --bench main -- --bench "save" # or "load" or "replay" or nothing` can be useful.
|
||||
|
||||
### Automerge Experiement - wasm api
|
||||
|
||||
```code
|
||||
# node automerge-wasm.js
|
||||
```
|
||||
|
||||
### Automerge Experiment - JS wrapper
|
||||
|
||||
```code
|
||||
# node automerge-js.js
|
||||
```
|
||||
|
||||
### Automerge 1.0 pure javascript - new fast backend
|
||||
|
||||
This assume automerge has been checked out in a directory along side this repo
|
||||
|
||||
```code
|
||||
# node automerge-1.0.js
|
||||
```
|
||||
|
||||
### Automerge 1.0 with rust backend
|
||||
|
||||
This assume automerge has been checked out in a directory along side this repo
|
||||
|
||||
```code
|
||||
# node automerge-rs.js
|
||||
```
|
||||
|
||||
### Automerge Experiment - JS wrapper
|
||||
|
||||
```code
|
||||
# node automerge-js.js
|
||||
```
|
||||
|
||||
### Baseline Test. Javascript Array with no CRDT info
|
||||
|
||||
```code
|
||||
# node baseline.js
|
||||
```
|
|
@ -1,23 +0,0 @@
|
|||
// Apply the paper editing trace to an Automerge.Text object, one char at a time
|
||||
const { edits, finalText } = require('./editing-trace')
|
||||
const Automerge = require('../automerge-js')
|
||||
|
||||
const start = new Date()
|
||||
let state = Automerge.from({text: new Automerge.Text()})
|
||||
|
||||
state = Automerge.change(state, doc => {
|
||||
for (let i = 0; i < edits.length; i++) {
|
||||
if (i % 1000 === 0) {
|
||||
console.log(`Processed ${i} edits in ${new Date() - start} ms`)
|
||||
}
|
||||
if (edits[i][1] > 0) doc.text.deleteAt(edits[i][0], edits[i][1])
|
||||
if (edits[i].length > 2) doc.text.insertAt(edits[i][0], ...edits[i].slice(2))
|
||||
}
|
||||
})
|
||||
|
||||
let _ = Automerge.save(state)
|
||||
console.log(`Done in ${new Date() - start} ms`)
|
||||
|
||||
if (state.text.join('') !== finalText) {
|
||||
throw new RangeError('ERROR: final text did not match expectation')
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
|
||||
// this assumes that the automerge-rs folder is checked out along side this repo
|
||||
// and someone has run
|
||||
|
||||
// # cd automerge-rs/automerge-backend-wasm
|
||||
// # yarn release
|
||||
|
||||
const { edits, finalText } = require('./editing-trace')
|
||||
const Automerge = require('../../automerge')
|
||||
const path = require('path')
|
||||
const wasmBackend = require(path.resolve("../../automerge-rs/automerge-backend-wasm"))
|
||||
Automerge.setDefaultBackend(wasmBackend)
|
||||
|
||||
const start = new Date()
|
||||
let state = Automerge.from({text: new Automerge.Text()})
|
||||
|
||||
state = Automerge.change(state, doc => {
|
||||
for (let i = 0; i < edits.length; i++) {
|
||||
if (i % 1000 === 0) {
|
||||
console.log(`Processed ${i} edits in ${new Date() - start} ms`)
|
||||
}
|
||||
if (edits[i][1] > 0) doc.text.deleteAt(edits[i][0], edits[i][1])
|
||||
if (edits[i].length > 2) doc.text.insertAt(edits[i][0], ...edits[i].slice(2))
|
||||
}
|
||||
})
|
||||
|
||||
console.log(`Done in ${new Date() - start} ms`)
|
||||
|
||||
if (state.text.join('') !== finalText) {
|
||||
throw new RangeError('ERROR: final text did not match expectation')
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
|
||||
// make sure to
|
||||
|
||||
// # cd ../automerge-wasm
|
||||
// # yarn release
|
||||
// # yarn opt
|
||||
|
||||
const { edits, finalText } = require('./editing-trace')
|
||||
const Automerge = require('../automerge-wasm')
|
||||
|
||||
const start = new Date()
|
||||
|
||||
let doc = Automerge.init();
|
||||
let text = doc.set("_root", "text", Automerge.TEXT)
|
||||
|
||||
for (let i = 0; i < edits.length; i++) {
|
||||
let edit = edits[i]
|
||||
if (i % 1000 === 0) {
|
||||
console.log(`Processed ${i} edits in ${new Date() - start} ms`)
|
||||
}
|
||||
doc.splice(text, ...edit)
|
||||
}
|
||||
|
||||
let _ = doc.save()
|
||||
|
||||
console.log(`Done in ${new Date() - start} ms`)
|
||||
|
||||
if (doc.text(text) !== finalText) {
|
||||
throw new RangeError('ERROR: final text did not match expectation')
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
use automerge::{Automerge, Value, ObjId};
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use std::fs;
|
||||
|
||||
fn replay_trace(commands: Vec<(usize, usize, Vec<Value>)>) -> Automerge {
|
||||
let mut doc = Automerge::new();
|
||||
|
||||
let text = doc.set(ObjId::Root, "text", Value::text()).unwrap().unwrap();
|
||||
for (pos, del, vals) in commands {
|
||||
doc.splice(&text, pos, del, vals).unwrap();
|
||||
}
|
||||
doc.commit(None, None);
|
||||
doc
|
||||
}
|
||||
|
||||
fn save_trace(mut doc: Automerge) {
|
||||
doc.save().unwrap();
|
||||
}
|
||||
|
||||
fn load_trace(bytes: &[u8]) {
|
||||
Automerge::load(bytes).unwrap();
|
||||
}
|
||||
|
||||
fn bench(c: &mut Criterion) {
|
||||
let contents = fs::read_to_string("edits.json").expect("cannot read edits file");
|
||||
let edits = json::parse(&contents).expect("cant parse edits");
|
||||
let mut commands = vec![];
|
||||
for i in 0..edits.len() {
|
||||
let pos: usize = edits[i][0].as_usize().unwrap();
|
||||
let del: usize = edits[i][1].as_usize().unwrap();
|
||||
let mut vals = vec![];
|
||||
for j in 2..edits[i].len() {
|
||||
let v = edits[i][j].as_str().unwrap();
|
||||
vals.push(Value::str(v));
|
||||
}
|
||||
commands.push((pos, del, vals));
|
||||
}
|
||||
|
||||
let mut group = c.benchmark_group("edit trace");
|
||||
group.throughput(Throughput::Elements(commands.len() as u64));
|
||||
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("replay", commands.len()),
|
||||
&commands,
|
||||
|b, commands| {
|
||||
b.iter_batched(
|
||||
|| commands.clone(),
|
||||
replay_trace,
|
||||
criterion::BatchSize::LargeInput,
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
let commands_len = commands.len();
|
||||
let mut doc = replay_trace(commands);
|
||||
group.bench_with_input(BenchmarkId::new("save", commands_len), &doc, |b, doc| {
|
||||
b.iter_batched(|| doc.clone(), save_trace, criterion::BatchSize::LargeInput)
|
||||
});
|
||||
|
||||
let bytes = doc.save().unwrap();
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("load", commands_len),
|
||||
&bytes,
|
||||
|b, bytes| b.iter(|| load_trace(bytes)),
|
||||
);
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench);
|
||||
criterion_main!(benches);
|
|
@ -1,32 +0,0 @@
|
|||
use automerge::{Automerge, AutomergeError, Value, ObjId};
|
||||
use std::fs;
|
||||
use std::time::Instant;
|
||||
|
||||
fn main() -> Result<(), AutomergeError> {
|
||||
let contents = fs::read_to_string("edits.json").expect("cannot read edits file");
|
||||
let edits = json::parse(&contents).expect("cant parse edits");
|
||||
let mut commands = vec![];
|
||||
for i in 0..edits.len() {
|
||||
let pos: usize = edits[i][0].as_usize().unwrap();
|
||||
let del: usize = edits[i][1].as_usize().unwrap();
|
||||
let mut vals = vec![];
|
||||
for j in 2..edits[i].len() {
|
||||
let v = edits[i][j].as_str().unwrap();
|
||||
vals.push(Value::str(v));
|
||||
}
|
||||
commands.push((pos, del, vals));
|
||||
}
|
||||
let mut doc = Automerge::new();
|
||||
|
||||
let now = Instant::now();
|
||||
let text = doc.set(ObjId::Root, "text", Value::text()).unwrap().unwrap();
|
||||
for (i, (pos, del, vals)) in commands.into_iter().enumerate() {
|
||||
if i % 1000 == 0 {
|
||||
println!("Processed {} edits in {} ms", i, now.elapsed().as_millis());
|
||||
}
|
||||
doc.splice(&text, pos, del, vals)?;
|
||||
}
|
||||
let _ = doc.save();
|
||||
println!("Done in {} ms", now.elapsed().as_millis());
|
||||
Ok(())
|
||||
}
|
33
flake.lock
generated
|
@ -2,11 +2,11 @@
|
|||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1619345332,
|
||||
"narHash": "sha256-qHnQkEp1uklKTpx3MvKtY6xzgcqXDsz5nLilbbuL+3A=",
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "2ebf2558e5bf978c7fb8ea927dfaed8fefab2e28",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -17,11 +17,11 @@
|
|||
},
|
||||
"flake-utils_2": {
|
||||
"locked": {
|
||||
"lastModified": 1614513358,
|
||||
"narHash": "sha256-LakhOx3S1dRjnh0b5Dg3mbZyH0ToC9I8Y2wKSkBaTzU=",
|
||||
"lastModified": 1659877975,
|
||||
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5466c5bbece17adaab2d82fae80b46e807611bf3",
|
||||
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -32,11 +32,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1620340338,
|
||||
"narHash": "sha256-Op/4K0+Z9Sp5jtFH0s/zMM4H7VFZxrekcAmjQ6JpQ4w=",
|
||||
"lastModified": 1669542132,
|
||||
"narHash": "sha256-DRlg++NJAwPh8io3ExBJdNW7Djs3plVI5jgYQ+iXAZQ=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "63586475587d7e0e078291ad4b49b6f6a6885100",
|
||||
"rev": "a115bb9bd56831941be3776c8a94005867f316a7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -48,15 +48,16 @@
|
|||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1617325113,
|
||||
"narHash": "sha256-GksR0nvGxfZ79T91UUtWjjccxazv6Yh/MvEJ82v1Xmw=",
|
||||
"owner": "nixos",
|
||||
"lastModified": 1665296151,
|
||||
"narHash": "sha256-uOB0oxqxN9K7XGF1hcnY+PQnlQJ+3bP2vCn/+Ru/bbc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "54c1e44240d8a527a8f4892608c4bce5440c3ecb",
|
||||
"rev": "14ccaaedd95a488dd7ae142757884d8e125b3363",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
|
@ -74,11 +75,11 @@
|
|||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1620355527,
|
||||
"narHash": "sha256-mUTnUODiAtxH83gbv7uuvCbqZ/BNkYYk/wa3MkwrskE=",
|
||||
"lastModified": 1669775522,
|
||||
"narHash": "sha256-6xxGArBqssX38DdHpDoPcPvB/e79uXyQBwpBcaO/BwY=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d8efe70dc561c4bea0b7bf440d36ce98c497e054",
|
||||
"rev": "3158e47f6b85a288d12948aeb9a048e0ed4434d6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
56
flake.nix
|
@ -3,45 +3,49 @@
|
|||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
flake-utils = {
|
||||
url = "github:numtide/flake-utils";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, rust-overlay }:
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
rust-overlay,
|
||||
}:
|
||||
flake-utils.lib.eachDefaultSystem
|
||||
(system:
|
||||
let
|
||||
(system: let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ rust-overlay.overlay ];
|
||||
overlays = [rust-overlay.overlays.default];
|
||||
inherit system;
|
||||
};
|
||||
lib = pkgs.lib;
|
||||
rust = pkgs.rust-bin.stable.latest.rust;
|
||||
cargoNix = pkgs.callPackage ./Cargo.nix {
|
||||
inherit pkgs;
|
||||
release = true;
|
||||
rust = pkgs.rust-bin.stable.latest.default;
|
||||
in {
|
||||
formatter = pkgs.alejandra;
|
||||
|
||||
packages = {
|
||||
deadnix = pkgs.runCommand "deadnix" {} ''
|
||||
${pkgs.deadnix}/bin/deadnix --fail ${./.}
|
||||
mkdir $out
|
||||
'';
|
||||
};
|
||||
debugCargoNix = pkgs.callPackage ./Cargo.nix {
|
||||
inherit pkgs;
|
||||
release = false;
|
||||
|
||||
checks = {
|
||||
inherit (self.packages.${system}) deadnix;
|
||||
};
|
||||
in
|
||||
{
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = with pkgs;
|
||||
[
|
||||
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = with pkgs; [
|
||||
(rust.override {
|
||||
extensions = [ "rust-src" ];
|
||||
targets = [ "wasm32-unknown-unknown" ];
|
||||
extensions = ["rust-src"];
|
||||
targets = ["wasm32-unknown-unknown"];
|
||||
})
|
||||
cargo-edit
|
||||
cargo-watch
|
||||
cargo-criterion
|
||||
cargo-fuzz
|
||||
cargo-flamegraph
|
||||
cargo-deny
|
||||
crate2nix
|
||||
wasm-pack
|
||||
pkgconfig
|
||||
|
@ -50,6 +54,12 @@
|
|||
|
||||
nodejs
|
||||
yarn
|
||||
deno
|
||||
|
||||
# c deps
|
||||
cmake
|
||||
cmocka
|
||||
doxygen
|
||||
|
||||
rnix-lsp
|
||||
nixpkgs-fmt
|
||||
|
|
BIN
img/brandmark.png
Normal file
After Width: | Height: | Size: 1.4 KiB |
1
img/brandmark.svg
Normal file
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 80.46 80.46"><defs><style>.cls-1{fill:#fc3;}.cls-1,.cls-2{fill-rule:evenodd;}.cls-2{fill:#2a1e20;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><path class="cls-1" d="M79.59,38.12a3,3,0,0,1,0,4.21L42.34,79.58a3,3,0,0,1-4.22,0L.88,42.33a3,3,0,0,1,0-4.2L38.12.87a3,3,0,0,1,4.22,0"/><path class="cls-2" d="M76.87,38.76,41.71,3.59a2.09,2.09,0,0,0-2.93,0L3.62,38.76a2.07,2.07,0,0,0,0,2.93L38.78,76.85a2.07,2.07,0,0,0,2.93,0L76.87,41.69a2.07,2.07,0,0,0,0-2.93m-2,.79a.93.93,0,0,1,0,1.34l-33.94,34a1,1,0,0,1-1.33,0l-34-33.95a.94.94,0,0,1,0-1.32l34-34a1,1,0,0,1,1.33,0Z"/><path class="cls-2" d="M36.25,32.85v1.71c0,6.35-5.05,11.38-9.51,16.45l4.08,4.07c2.48-2.6,4.72-5.24,5.43-6.19V60.14h7.94V32.88l4.25,1.3a1.68,1.68,0,0,0,2.25-2.24L40.27,16.7,29.75,31.94A1.68,1.68,0,0,0,32,34.18"/></g></g></svg>
|
After Width: | Height: | Size: 885 B |
BIN
img/favicon.ico
Normal file
After Width: | Height: | Size: 254 KiB |
BIN
img/lockup.png
Normal file
After Width: | Height: | Size: 5.7 KiB |
1
img/lockup.svg
Normal file
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 400.72 80.46"><defs><style>.cls-1{fill:#fc3;}.cls-1,.cls-2{fill-rule:evenodd;}.cls-2{fill:#2a1e20;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><path class="cls-1" d="M79.59,38.12a3,3,0,0,1,0,4.21L42.34,79.58a3,3,0,0,1-4.22,0L.88,42.33a3,3,0,0,1,0-4.2L38.12.87a3,3,0,0,1,4.22,0"/><path class="cls-2" d="M76.87,38.76,41.71,3.59a2.09,2.09,0,0,0-2.93,0L3.62,38.76a2.07,2.07,0,0,0,0,2.93L38.78,76.85a2.07,2.07,0,0,0,2.93,0L76.87,41.69a2.07,2.07,0,0,0,0-2.93m-2,.79a.93.93,0,0,1,0,1.34l-33.94,34a1,1,0,0,1-1.33,0l-34-33.95a.94.94,0,0,1,0-1.32l34-34a1,1,0,0,1,1.33,0Z"/><path class="cls-2" d="M36.25,32.85v1.71c0,6.35-5.05,11.38-9.51,16.45l4.08,4.07c2.48-2.6,4.72-5.24,5.43-6.19V60.14h7.94V32.88l4.25,1.3a1.68,1.68,0,0,0,2.25-2.24L40.27,16.7,29.75,31.94A1.68,1.68,0,0,0,32,34.18"/><path d="M124.14,60.08,120.55,50h-17L100,60.08H93.34l15.34-42.61h6.75L131,60.08Zm-9-25.63c-1-3-2.74-8-3.22-9.8-.49,1.83-2,6.7-3.11,9.86l-3.41,9.74H118.6Z"/><path d="M156.7,60.08V57c-1.58,2.32-4.74,3.72-8,3.72-7.43,0-11.38-4.87-11.38-14.31V28.12h6.27V46.2c0,6.45,2.43,8.76,6.57,8.76s6.57-3,6.57-8.15V28.12H163v32Z"/><path d="M187.5,59.29a12.74,12.74,0,0,1-6.15,1.46c-4.44,0-7.18-2.74-7.18-8.46V33.84h-4.56V28.12h4.56V19l6.15-3.29V28.12h7.91v5.72h-7.91V51.19c0,3,1,3.83,3.29,3.83a10,10,0,0,0,4.62-1.27Z"/><path d="M208.08,60.75c-8,0-14.06-6.64-14.06-16.62,0-10.47,6.2-16.68,14.24-16.68S222.5,34,222.5,44C222.5,54.54,216.29,60.75,208.08,60.75ZM208,33.42c-4.75,0-7.67,4.2-7.67,10.53,0,7,3.22,10.83,8,10.83s7.85-4.81,7.85-10.65C216.17,37.62,213.07,33.42,208,33.42Z"/><path d="M267.36,60.08V42c0-6.45-2-8.77-6.15-8.77s-6.14,3-6.14,8.16V60.08H248.8V42c0-6.45-2-8.77-6.15-8.77s-6.15,3-6.15,8.16V60.08h-6.27v-32h6.27v3a9,9,0,0,1,7.61-3.71c4.32,0,7.06,1.65,8.76,4.69,2.32-2.86,4.81-4.69,9.8-4.69,7.43,0,11,4.87,11,14.31V60.08Z"/><path d="M308.39,46.32H287.27c.66,6.15,4.13,8.77,8,8.77a11.22,11.22,0,0,0,6.94-2.56l3.71,4a14.9,14.9,0,0,1-11,4.2c-7.48,0-13.81-6-13.81-16.62,0-10.84,5.72-16.68,14-16.68,9.07,0,13.45,7.37,13.45,16C308.57,44.62,308.45,45.65,308.39,46.32Zm-13.7-13.21c-4.2,0-6.76,2.92-7.3,8h14.85C301.93,36.76,299.86,33.11,294.69,33.11Z"/><path d="M333.71,34.76a9.37,9.37,0,0,0-4.81-1.16c-4,0-6.27,2.8-6.27,8.22V60.08h-6.27v-32h6.27v3a8.86,8.86,0,0,1,7.3-3.71,9.22,9.22,0,0,1,5.42,1.34Z"/><path d="M350.45,71.82l-2.14-4.74c9-.43,11-2.86,11-9.5V57c-2.31,2.13-4.93,3.72-8.28,3.72-6.81,0-12.29-5-12.29-17.17,0-10.95,6-16.13,12.6-16.13a11.11,11.11,0,0,1,8,3.65v-3h6.27V57C365.54,66.77,362,71.46,350.45,71.82Zm8.94-34.39c-1.4-1.88-4.32-4.2-7.48-4.2-4.51,0-6.94,3.41-6.94,10.17,0,8,2.55,11.56,7.18,11.56,3,0,5.6-2,7.24-4.07Z"/><path d="M400.54,46.32H379.42c.67,6.15,4.14,8.77,8,8.77a11.22,11.22,0,0,0,6.94-2.56l3.71,4a14.87,14.87,0,0,1-11,4.2c-7.49,0-13.82-6-13.82-16.62,0-10.84,5.72-16.68,14-16.68,9.07,0,13.45,7.37,13.45,16C400.72,44.62,400.6,45.65,400.54,46.32Zm-13.7-13.21c-4.2,0-6.75,2.92-7.3,8h14.85C394.09,36.76,392,33.11,386.84,33.11Z"/></g></g></svg>
|
After Width: | Height: | Size: 3 KiB |
BIN
img/sign.png
Normal file
After Width: | Height: | Size: 7.7 KiB |
1
img/sign.svg
Normal file
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 485 108"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#fc3;}.cls-3{fill:#2a1e20;fill-rule:evenodd;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><path class="cls-1" d="M465,5a15,15,0,0,1,15,15V88a15,15,0,0,1-15,15H20A15,15,0,0,1,5,88V20A15,15,0,0,1,20,5H465m0-5H20A20,20,0,0,0,0,20V88a20,20,0,0,0,20,20H465a20,20,0,0,0,20-20V20A20,20,0,0,0,465,0Z"/><rect class="cls-2" x="3.7" y="3.7" width="477.6" height="100.6" rx="16.3"/><path class="cls-2" d="M465,5a15,15,0,0,1,15,15V88a15,15,0,0,1-15,15H20A15,15,0,0,1,5,88V20A15,15,0,0,1,20,5H465m0-2.6H20A17.63,17.63,0,0,0,2.4,20V88A17.63,17.63,0,0,0,20,105.6H465A17.63,17.63,0,0,0,482.6,88V20A17.63,17.63,0,0,0,465,2.4Z"/><path d="M465,7.6A12.41,12.41,0,0,1,477.4,20V88A12.41,12.41,0,0,1,465,100.4H20A12.41,12.41,0,0,1,7.6,88V20A12.41,12.41,0,0,1,20,7.6H465M465,5H20A15,15,0,0,0,5,20V88a15,15,0,0,0,15,15H465a15,15,0,0,0,15-15V20A15,15,0,0,0,465,5Z"/><path class="cls-3" d="M106.1,51.48l-34-34a2,2,0,0,0-2.83,0l-34,34a2,2,0,0,0,0,2.82l34,34a2,2,0,0,0,2.83,0l34-34a2,2,0,0,0,0-2.82m-.76.74a.93.93,0,0,1,0,1.34L71.4,87.5a1,1,0,0,1-1.33,0l-34-33.94a.94.94,0,0,1,0-1.32l34-34a1,1,0,0,1,1.33,0Z"/><path class="cls-3" d="M67,45.62V47c0,6.2-5.1,11.11-9.59,16.06l4.11,4C64,64.52,66.28,61.94,67,61V72h8V45.37l4.29,1.27a1.67,1.67,0,0,0,2.27-2.19L71,29.56,60.45,44.45a1.67,1.67,0,0,0,2.27,2.19"/><path d="M162.62,72.74,159,62.64H142l-3.53,10.1h-6.63l15.34-42.61h6.75l15.53,42.61Zm-9-25.62c-1-3-2.74-8-3.22-9.8-.49,1.82-2,6.69-3.11,9.86l-3.41,9.73h13.15Z"/><path d="M195.18,72.74v-3c-1.58,2.31-4.74,3.71-8,3.71-7.43,0-11.38-4.87-11.38-14.3V40.78H182V58.86c0,6.45,2.43,8.77,6.57,8.77s6.57-3,6.57-8.16V40.78h6.27v32Z"/><path d="M226,72a12.74,12.74,0,0,1-6.15,1.46c-4.44,0-7.18-2.74-7.18-8.46V46.51h-4.56V40.78h4.56V31.65l6.15-3.28V40.78h7.91v5.73H218.8V63.85c0,3,1,3.84,3.29,3.84a10,10,0,0,0,4.62-1.28Z"/><path d="M246.56,73.41c-8,0-14.06-6.63-14.06-16.62,0-10.47,6.2-16.67,14.24-16.67S261,46.63,261,56.61C261,67.2,254.77,73.41,246.56,73.41Zm-.07-27.33c-4.74,0-7.66,4.2-7.66,10.53,0,7,3.22,10.83,8,10.83s7.85-4.8,7.85-10.65C254.65,50.28,251.55,46.08,246.49,46.08Z"/><path d="M305.84,72.74V54.66c0-6.45-2-8.76-6.15-8.76s-6.14,3-6.14,8.15V72.74h-6.27V54.66c0-6.45-2-8.76-6.15-8.76s-6.15,3-6.15,8.15V72.74h-6.27v-32H275v3a9,9,0,0,1,7.61-3.71c4.32,0,7.06,1.64,8.76,4.68,2.32-2.86,4.81-4.68,9.8-4.68,7.43,0,11,4.86,11,14.3V72.74Z"/><path d="M346.87,59H325.74c.67,6.15,4.14,8.77,8,8.77a11.16,11.16,0,0,0,6.94-2.56l3.71,4a14.86,14.86,0,0,1-11,4.2c-7.48,0-13.81-6-13.81-16.62,0-10.83,5.72-16.67,14-16.67,9.07,0,13.45,7.36,13.45,16C347.05,57.28,346.93,58.31,346.87,59Zm-13.7-13.2c-4.2,0-6.76,2.92-7.3,8h14.85C340.41,49.43,338.34,45.78,333.17,45.78Z"/><path d="M372.19,47.42a9.37,9.37,0,0,0-4.81-1.16c-4,0-6.27,2.8-6.27,8.22V72.74h-6.27v-32h6.27v3a8.86,8.86,0,0,1,7.3-3.71,9.22,9.22,0,0,1,5.42,1.33Z"/><path d="M388.92,84.49l-2.13-4.75c9-.43,11-2.86,11-9.5V69.7c-2.31,2.13-4.93,3.71-8.28,3.71-6.81,0-12.29-5-12.29-17.16,0-11,6-16.13,12.6-16.13a11.07,11.07,0,0,1,8,3.65v-3H404V69.7C404,79.44,400.49,84.12,388.92,84.49Zm8.95-34.39c-1.4-1.89-4.32-4.2-7.48-4.2-4.51,0-6.94,3.41-6.94,10.16,0,8,2.55,11.57,7.18,11.57,3,0,5.6-2,7.24-4.08Z"/><path d="M439,59H417.9c.67,6.15,4.14,8.77,8,8.77a11.16,11.16,0,0,0,6.94-2.56l3.71,4a14.84,14.84,0,0,1-11,4.2c-7.49,0-13.82-6-13.82-16.62,0-10.83,5.72-16.67,14-16.67,9.07,0,13.45,7.36,13.45,16C439.2,57.28,439.08,58.31,439,59Zm-13.7-13.2c-4.2,0-6.75,2.92-7.3,8h14.85C432.57,49.43,430.5,45.78,425.32,45.78Z"/></g></g></svg>
|
After Width: | Height: | Size: 3.5 KiB |
3
javascript/.denoifyrc.json
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"replacer": "scripts/denoify-replacer.mjs"
|
||||
}
|
2
javascript/.eslintignore
Normal file
|
@ -0,0 +1,2 @@
|
|||
dist
|
||||
examples
|
15
javascript/.eslintrc.cjs
Normal file
|
@ -0,0 +1,15 @@
|
|||
module.exports = {
|
||||
root: true,
|
||||
parser: "@typescript-eslint/parser",
|
||||
plugins: ["@typescript-eslint"],
|
||||
extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
|
||||
rules: {
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"error",
|
||||
{
|
||||
argsIgnorePattern: "^_",
|
||||
varsIgnorePattern: "^_",
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
6
javascript/.gitignore
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
/node_modules
|
||||
/yarn.lock
|
||||
dist
|
||||
docs/
|
||||
.vim
|
||||
deno_dist/
|
4
javascript/.prettierignore
Normal file
|
@ -0,0 +1,4 @@
|
|||
e2e/verdacciodb
|
||||
dist
|
||||
docs
|
||||
deno_dist
|
4
javascript/.prettierrc
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"semi": false,
|
||||
"arrowParens": "avoid"
|
||||
}
|
39
javascript/HACKING.md
Normal file
|
@ -0,0 +1,39 @@
|
|||
## Architecture
|
||||
|
||||
The `@automerge/automerge` package is a set of
|
||||
[`Proxy`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Proxy)
|
||||
objects which provide an idiomatic javascript interface built on top of the
|
||||
lower level `@automerge/automerge-wasm` package (which is in turn built from the
|
||||
Rust codebase and can be found in `~/automerge-wasm`). I.e. the responsibility
|
||||
of this codebase is
|
||||
|
||||
- To map from the javascript data model to the underlying `set`, `make`,
|
||||
`insert`, and `delete` operations of Automerge.
|
||||
- To expose a more convenient interface to functions in `automerge-wasm` which
|
||||
generate messages to send over the network or compressed file formats to store
|
||||
on disk
|
||||
|
||||
## Building and testing
|
||||
|
||||
Much of the functionality of this package depends on the
|
||||
`@automerge/automerge-wasm` package and frequently you will be working on both
|
||||
of them at the same time. It would be frustrating to have to push
|
||||
`automerge-wasm` to NPM every time you want to test a change but I (Alex) also
|
||||
don't trust `yarn link` to do the right thing here. Therefore, the `./e2e`
|
||||
folder contains a little yarn package which spins up a local NPM registry. See
|
||||
`./e2e/README` for details. In brief though:
|
||||
|
||||
To build `automerge-wasm` and install it in the local `node_modules`
|
||||
|
||||
```bash
|
||||
cd e2e && yarn install && yarn run e2e buildjs
|
||||
```
|
||||
|
||||
NOw that you've done this you can run the tests
|
||||
|
||||
```bash
|
||||
yarn test
|
||||
```
|
||||
|
||||
If you make changes to the `automerge-wasm` package you will need to re-run
|
||||
`yarn e2e buildjs`
|
10
javascript/LICENSE
Normal file
|
@ -0,0 +1,10 @@
|
|||
MIT License
|
||||
|
||||
Copyright 2022, Ink & Switch LLC
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
109
javascript/README.md
Normal file
|
@ -0,0 +1,109 @@
|
|||
## Automerge
|
||||
|
||||
Automerge is a library of data structures for building collaborative
|
||||
applications, this package is the javascript implementation.
|
||||
|
||||
Detailed documentation is available at [automerge.org](http://automerge.org/)
|
||||
but see the following for a short getting started guid.
|
||||
|
||||
## Quickstart
|
||||
|
||||
First, install the library.
|
||||
|
||||
```
|
||||
yarn add @automerge/automerge
|
||||
```
|
||||
|
||||
If you're writing a `node` application, you can skip straight to [Make some
|
||||
data](#make-some-data). If you're in a browser you need a bundler
|
||||
|
||||
### Bundler setup
|
||||
|
||||
`@automerge/automerge` is a wrapper around a core library which is written in
|
||||
rust, compiled to WebAssembly and distributed as a separate package called
|
||||
`@automerge/automerge-wasm`. Browsers don't currently support WebAssembly
|
||||
modules taking part in ESM module imports, so you must use a bundler to import
|
||||
`@automerge/automerge` in the browser. There are a lot of bundlers out there, we
|
||||
have examples for common bundlers in the `examples` folder. Here is a short
|
||||
example using Webpack 5.
|
||||
|
||||
Assuming a standard setup of a new webpack project, you'll need to enable the
|
||||
`asyncWebAssembly` experiment. In a typical webpack project that means adding
|
||||
something like this to `webpack.config.js`
|
||||
|
||||
```javascript
|
||||
module.exports = {
|
||||
...
|
||||
experiments: { asyncWebAssembly: true },
|
||||
performance: { // we dont want the wasm blob to generate warnings
|
||||
hints: false,
|
||||
maxEntrypointSize: 512000,
|
||||
maxAssetSize: 512000
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Make some data
|
||||
|
||||
Automerge allows to separate threads of execution to make changes to some data
|
||||
and always be able to merge their changes later.
|
||||
|
||||
```javascript
|
||||
import * as automerge from "@automerge/automerge"
|
||||
import * as assert from "assert"
|
||||
|
||||
let doc1 = automerge.from({
|
||||
tasks: [
|
||||
{ description: "feed fish", done: false },
|
||||
{ description: "water plants", done: false },
|
||||
],
|
||||
})
|
||||
|
||||
// Create a new thread of execution
|
||||
let doc2 = automerge.clone(doc1)
|
||||
|
||||
// Now we concurrently make changes to doc1 and doc2
|
||||
|
||||
// Complete a task in doc2
|
||||
doc2 = automerge.change(doc2, d => {
|
||||
d.tasks[0].done = true
|
||||
})
|
||||
|
||||
// Add a task in doc1
|
||||
doc1 = automerge.change(doc1, d => {
|
||||
d.tasks.push({
|
||||
description: "water fish",
|
||||
done: false,
|
||||
})
|
||||
})
|
||||
|
||||
// Merge changes from both docs
|
||||
doc1 = automerge.merge(doc1, doc2)
|
||||
doc2 = automerge.merge(doc2, doc1)
|
||||
|
||||
// Both docs are merged and identical
|
||||
assert.deepEqual(doc1, {
|
||||
tasks: [
|
||||
{ description: "feed fish", done: true },
|
||||
{ description: "water plants", done: false },
|
||||
{ description: "water fish", done: false },
|
||||
],
|
||||
})
|
||||
|
||||
assert.deepEqual(doc2, {
|
||||
tasks: [
|
||||
{ description: "feed fish", done: true },
|
||||
{ description: "water plants", done: false },
|
||||
{ description: "water fish", done: false },
|
||||
],
|
||||
})
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
See [HACKING.md](./HACKING.md)
|
||||
|
||||
## Meta
|
||||
|
||||
Copyright 2017–present, the Automerge contributors. Released under the terms of the
|
||||
MIT license (see `LICENSE`).
|
12
javascript/config/cjs.json
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"exclude": [
|
||||
"../dist/**/*",
|
||||
"../node_modules",
|
||||
"../test/**/*",
|
||||
"../src/**/*.deno.ts"
|
||||
],
|
||||
"compilerOptions": {
|
||||
"outDir": "../dist/cjs"
|
||||
}
|
||||
}
|
13
javascript/config/declonly.json
Normal file
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"exclude": [
|
||||
"../dist/**/*",
|
||||
"../node_modules",
|
||||
"../test/**/*",
|
||||
"../src/**/*.deno.ts"
|
||||
],
|
||||
"emitDeclarationOnly": true,
|
||||
"compilerOptions": {
|
||||
"outDir": "../dist"
|
||||
}
|
||||
}
|
14
javascript/config/mjs.json
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"exclude": [
|
||||
"../dist/**/*",
|
||||
"../node_modules",
|
||||
"../test/**/*",
|
||||
"../src/**/*.deno.ts"
|
||||
],
|
||||
"compilerOptions": {
|
||||
"target": "es6",
|
||||
"module": "es6",
|
||||
"outDir": "../dist/mjs"
|
||||
}
|
||||
}
|
10
javascript/deno-tests/deno.ts
Normal file
|
@ -0,0 +1,10 @@
|
|||
import * as Automerge from "../deno_dist/index.ts"
|
||||
|
||||
Deno.test("It should create, clone and free", () => {
|
||||
let doc1 = Automerge.init()
|
||||
let doc2 = Automerge.clone(doc1)
|
||||
|
||||
// this is only needed if weakrefs are not supported
|
||||
Automerge.free(doc1)
|
||||
Automerge.free(doc2)
|
||||
})
|
3
javascript/e2e/.gitignore
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
node_modules/
|
||||
verdacciodb/
|
||||
htpasswd
|
70
javascript/e2e/README.md
Normal file
|
@ -0,0 +1,70 @@
|
|||
#End to end testing for javascript packaging
|
||||
|
||||
The network of packages and bundlers we rely on to get the `automerge` package
|
||||
working is a little complex. We have the `automerge-wasm` package, which the
|
||||
`automerge` package depends upon, which means that anyone who depends on
|
||||
`automerge` needs to either a) be using node or b) use a bundler in order to
|
||||
load the underlying WASM module which is packaged in `automerge-wasm`.
|
||||
|
||||
The various bundlers involved are complicated and capricious and so we need an
|
||||
easy way of testing that everything is in fact working as expected. To do this
|
||||
we run a custom NPM registry (namely [Verdaccio](https://verdaccio.org/)) and
|
||||
build the `automerge-wasm` and `automerge` packages and publish them to this
|
||||
registry. Once we have this registry running we are able to build the example
|
||||
projects which depend on these packages and check that everything works as
|
||||
expected.
|
||||
|
||||
## Usage
|
||||
|
||||
First, install everything:
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
### Build `automerge-js`
|
||||
|
||||
This builds the `automerge-wasm` package and then runs `yarn build` in the
|
||||
`automerge-js` project with the `--registry` set to the verdaccio registry. The
|
||||
end result is that you can run `yarn test` in the resulting `automerge-js`
|
||||
directory in order to run tests against the current `automerge-wasm`.
|
||||
|
||||
```
|
||||
yarn e2e buildjs
|
||||
```
|
||||
|
||||
### Build examples
|
||||
|
||||
This either builds or the examples in `automerge-js/examples` or just a subset
|
||||
of them. Once this is complete you can run the relevant scripts (e.g. `vite dev`
|
||||
for the Vite example) to check everything works.
|
||||
|
||||
```
|
||||
yarn e2e buildexamples
|
||||
```
|
||||
|
||||
Or, to just build the webpack example
|
||||
|
||||
```
|
||||
yarn e2e buildexamples -e webpack
|
||||
```
|
||||
|
||||
### Run Registry
|
||||
|
||||
If you're experimenting with a project which is not in the `examples` folder
|
||||
you'll need a running registry. `run-registry` builds and publishes
|
||||
`automerge-js` and `automerge-wasm` and then runs the registry at
|
||||
`localhost:4873`.
|
||||
|
||||
```
|
||||
yarn e2e run-registry
|
||||
```
|
||||
|
||||
You can now run `yarn install --registry http://localhost:4873` to experiment
|
||||
with the built packages.
|
||||
|
||||
## Using the `dev` build of `automerge-wasm`
|
||||
|
||||
All the commands above take a `-p` flag which can be either `release` or
|
||||
`debug`. The `debug` builds with additional debug symbols which makes errors
|
||||
less cryptic.
|
534
javascript/e2e/index.ts
Normal file
|
@ -0,0 +1,534 @@
|
|||
import { once } from "events"
|
||||
import { setTimeout } from "timers/promises"
|
||||
import { spawn, ChildProcess } from "child_process"
|
||||
import * as child_process from "child_process"
|
||||
import {
|
||||
command,
|
||||
subcommands,
|
||||
run,
|
||||
array,
|
||||
multioption,
|
||||
option,
|
||||
Type,
|
||||
} from "cmd-ts"
|
||||
import * as path from "path"
|
||||
import * as fsPromises from "fs/promises"
|
||||
import fetch from "node-fetch"
|
||||
|
||||
const VERDACCIO_DB_PATH = path.normalize(`${__dirname}/verdacciodb`)
|
||||
const VERDACCIO_CONFIG_PATH = path.normalize(`${__dirname}/verdaccio.yaml`)
|
||||
const AUTOMERGE_WASM_PATH = path.normalize(
|
||||
`${__dirname}/../../rust/automerge-wasm`
|
||||
)
|
||||
const AUTOMERGE_JS_PATH = path.normalize(`${__dirname}/..`)
|
||||
const EXAMPLES_DIR = path.normalize(path.join(__dirname, "../", "examples"))
|
||||
|
||||
// The different example projects in "../examples"
|
||||
type Example = "webpack" | "vite" | "create-react-app"
|
||||
|
||||
// Type to parse strings to `Example` so the types line up for the `buildExamples` commmand
|
||||
const ReadExample: Type<string, Example> = {
|
||||
async from(str) {
|
||||
if (str === "webpack") {
|
||||
return "webpack"
|
||||
} else if (str === "vite") {
|
||||
return "vite"
|
||||
} else if (str === "create-react-app") {
|
||||
return "create-react-app"
|
||||
} else {
|
||||
throw new Error(`Unknown example type ${str}`)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type Profile = "dev" | "release"
|
||||
|
||||
const ReadProfile: Type<string, Profile> = {
|
||||
async from(str) {
|
||||
if (str === "dev") {
|
||||
return "dev"
|
||||
} else if (str === "release") {
|
||||
return "release"
|
||||
} else {
|
||||
throw new Error(`Unknown profile ${str}`)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const buildjs = command({
|
||||
name: "buildjs",
|
||||
args: {
|
||||
profile: option({
|
||||
type: ReadProfile,
|
||||
long: "profile",
|
||||
short: "p",
|
||||
defaultValue: () => "dev" as Profile,
|
||||
}),
|
||||
},
|
||||
handler: ({ profile }) => {
|
||||
console.log("building js")
|
||||
withPublishedWasm(profile, async (registryUrl: string) => {
|
||||
await buildAndPublishAutomergeJs(registryUrl)
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
const buildWasm = command({
|
||||
name: "buildwasm",
|
||||
args: {
|
||||
profile: option({
|
||||
type: ReadProfile,
|
||||
long: "profile",
|
||||
short: "p",
|
||||
defaultValue: () => "dev" as Profile,
|
||||
}),
|
||||
},
|
||||
handler: ({ profile }) => {
|
||||
console.log("building automerge-wasm")
|
||||
withRegistry(buildAutomergeWasm(profile))
|
||||
},
|
||||
})
|
||||
|
||||
const buildexamples = command({
|
||||
name: "buildexamples",
|
||||
args: {
|
||||
examples: multioption({
|
||||
long: "example",
|
||||
short: "e",
|
||||
type: array(ReadExample),
|
||||
}),
|
||||
profile: option({
|
||||
type: ReadProfile,
|
||||
long: "profile",
|
||||
short: "p",
|
||||
defaultValue: () => "dev" as Profile,
|
||||
}),
|
||||
},
|
||||
handler: ({ examples, profile }) => {
|
||||
if (examples.length === 0) {
|
||||
examples = ["webpack", "vite", "create-react-app"]
|
||||
}
|
||||
buildExamples(examples, profile)
|
||||
},
|
||||
})
|
||||
|
||||
const runRegistry = command({
|
||||
name: "run-registry",
|
||||
args: {
|
||||
profile: option({
|
||||
type: ReadProfile,
|
||||
long: "profile",
|
||||
short: "p",
|
||||
defaultValue: () => "dev" as Profile,
|
||||
}),
|
||||
},
|
||||
handler: ({ profile }) => {
|
||||
withPublishedWasm(profile, async (registryUrl: string) => {
|
||||
await buildAndPublishAutomergeJs(registryUrl)
|
||||
console.log("\n************************")
|
||||
console.log(` Verdaccio NPM registry is running at ${registryUrl}`)
|
||||
console.log(" press CTRL-C to exit ")
|
||||
console.log("************************")
|
||||
await once(process, "SIGINT")
|
||||
}).catch(e => {
|
||||
console.error(`Failed: ${e}`)
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
const app = subcommands({
|
||||
name: "e2e",
|
||||
cmds: {
|
||||
buildjs,
|
||||
buildexamples,
|
||||
buildwasm: buildWasm,
|
||||
"run-registry": runRegistry,
|
||||
},
|
||||
})
|
||||
|
||||
run(app, process.argv.slice(2))
|
||||
|
||||
async function buildExamples(examples: Array<Example>, profile: Profile) {
|
||||
await withPublishedWasm(profile, async registryUrl => {
|
||||
printHeader("building and publishing automerge")
|
||||
await buildAndPublishAutomergeJs(registryUrl)
|
||||
for (const example of examples) {
|
||||
printHeader(`building ${example} example`)
|
||||
if (example === "webpack") {
|
||||
const projectPath = path.join(EXAMPLES_DIR, example)
|
||||
await removeExistingAutomerge(projectPath)
|
||||
await fsPromises.rm(path.join(projectPath, "yarn.lock"), {
|
||||
force: true,
|
||||
})
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
[
|
||||
"--cwd",
|
||||
projectPath,
|
||||
"install",
|
||||
"--registry",
|
||||
registryUrl,
|
||||
"--check-files",
|
||||
],
|
||||
{ stdio: "inherit" }
|
||||
)
|
||||
await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {
|
||||
stdio: "inherit",
|
||||
})
|
||||
} else if (example === "vite") {
|
||||
const projectPath = path.join(EXAMPLES_DIR, example)
|
||||
await removeExistingAutomerge(projectPath)
|
||||
await fsPromises.rm(path.join(projectPath, "yarn.lock"), {
|
||||
force: true,
|
||||
})
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
[
|
||||
"--cwd",
|
||||
projectPath,
|
||||
"install",
|
||||
"--registry",
|
||||
registryUrl,
|
||||
"--check-files",
|
||||
],
|
||||
{ stdio: "inherit" }
|
||||
)
|
||||
await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {
|
||||
stdio: "inherit",
|
||||
})
|
||||
} else if (example === "create-react-app") {
|
||||
const projectPath = path.join(EXAMPLES_DIR, example)
|
||||
await removeExistingAutomerge(projectPath)
|
||||
await fsPromises.rm(path.join(projectPath, "yarn.lock"), {
|
||||
force: true,
|
||||
})
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
[
|
||||
"--cwd",
|
||||
projectPath,
|
||||
"install",
|
||||
"--registry",
|
||||
registryUrl,
|
||||
"--check-files",
|
||||
],
|
||||
{ stdio: "inherit" }
|
||||
)
|
||||
await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {
|
||||
stdio: "inherit",
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type WithRegistryAction = (registryUrl: string) => Promise<void>
|
||||
|
||||
async function withRegistry(
|
||||
action: WithRegistryAction,
|
||||
...actions: Array<WithRegistryAction>
|
||||
) {
|
||||
// First, start verdaccio
|
||||
printHeader("Starting verdaccio NPM server")
|
||||
const verd = await VerdaccioProcess.start()
|
||||
actions.unshift(action)
|
||||
|
||||
for (const action of actions) {
|
||||
try {
|
||||
type Step = "verd-died" | "action-completed"
|
||||
const verdDied: () => Promise<Step> = async () => {
|
||||
await verd.died()
|
||||
return "verd-died"
|
||||
}
|
||||
const actionComplete: () => Promise<Step> = async () => {
|
||||
await action("http://localhost:4873")
|
||||
return "action-completed"
|
||||
}
|
||||
const result = await Promise.race([verdDied(), actionComplete()])
|
||||
if (result === "verd-died") {
|
||||
throw new Error("verdaccio unexpectedly exited")
|
||||
}
|
||||
} catch (e) {
|
||||
await verd.kill()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
await verd.kill()
|
||||
}
|
||||
|
||||
async function withPublishedWasm(profile: Profile, action: WithRegistryAction) {
|
||||
await withRegistry(buildAutomergeWasm(profile), publishAutomergeWasm, action)
|
||||
}
|
||||
|
||||
function buildAutomergeWasm(profile: Profile): WithRegistryAction {
|
||||
return async (registryUrl: string) => {
|
||||
printHeader("building automerge-wasm")
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
["--cwd", AUTOMERGE_WASM_PATH, "--registry", registryUrl, "install"],
|
||||
{ stdio: "inherit" }
|
||||
)
|
||||
const cmd = profile === "release" ? "release" : "debug"
|
||||
await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, cmd], {
|
||||
stdio: "inherit",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function publishAutomergeWasm(registryUrl: string) {
|
||||
printHeader("Publishing automerge-wasm to verdaccio")
|
||||
await fsPromises.rm(
|
||||
path.join(VERDACCIO_DB_PATH, "@automerge/automerge-wasm"),
|
||||
{ recursive: true, force: true }
|
||||
)
|
||||
await yarnPublish(registryUrl, AUTOMERGE_WASM_PATH)
|
||||
}
|
||||
|
||||
async function buildAndPublishAutomergeJs(registryUrl: string) {
|
||||
// Build the js package
|
||||
printHeader("Building automerge")
|
||||
await removeExistingAutomerge(AUTOMERGE_JS_PATH)
|
||||
await removeFromVerdaccio("@automerge/automerge")
|
||||
await fsPromises.rm(path.join(AUTOMERGE_JS_PATH, "yarn.lock"), {
|
||||
force: true,
|
||||
})
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
[
|
||||
"--cwd",
|
||||
AUTOMERGE_JS_PATH,
|
||||
"install",
|
||||
"--registry",
|
||||
registryUrl,
|
||||
"--check-files",
|
||||
],
|
||||
{ stdio: "inherit" }
|
||||
)
|
||||
await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "build"], {
|
||||
stdio: "inherit",
|
||||
})
|
||||
await yarnPublish(registryUrl, AUTOMERGE_JS_PATH)
|
||||
}
|
||||
|
||||
/**
|
||||
* A running verdaccio process
|
||||
*
|
||||
*/
|
||||
class VerdaccioProcess {
|
||||
child: ChildProcess
|
||||
stdout: Array<Buffer>
|
||||
stderr: Array<Buffer>
|
||||
|
||||
constructor(child: ChildProcess) {
|
||||
this.child = child
|
||||
|
||||
// Collect stdout/stderr otherwise the subprocess gets blocked writing
|
||||
this.stdout = []
|
||||
this.stderr = []
|
||||
this.child.stdout &&
|
||||
this.child.stdout.on("data", data => this.stdout.push(data))
|
||||
this.child.stderr &&
|
||||
this.child.stderr.on("data", data => this.stderr.push(data))
|
||||
|
||||
const errCallback = (e: any) => {
|
||||
console.error("!!!!!!!!!ERROR IN VERDACCIO PROCESS!!!!!!!!!")
|
||||
console.error(" ", e)
|
||||
if (this.stdout.length > 0) {
|
||||
console.log("\n**Verdaccio stdout**")
|
||||
const stdout = Buffer.concat(this.stdout)
|
||||
process.stdout.write(stdout)
|
||||
}
|
||||
|
||||
if (this.stderr.length > 0) {
|
||||
console.log("\n**Verdaccio stderr**")
|
||||
const stdout = Buffer.concat(this.stderr)
|
||||
process.stdout.write(stdout)
|
||||
}
|
||||
process.exit(-1)
|
||||
}
|
||||
this.child.on("error", errCallback)
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawn a verdaccio process and wait for it to respond succesfully to http requests
|
||||
*
|
||||
* The returned `VerdaccioProcess` can be used to control the subprocess
|
||||
*/
|
||||
static async start() {
|
||||
const child = spawn(
|
||||
"yarn",
|
||||
["verdaccio", "--config", VERDACCIO_CONFIG_PATH],
|
||||
{ env: { ...process.env, FORCE_COLOR: "true" } }
|
||||
)
|
||||
|
||||
// Forward stdout and stderr whilst waiting for startup to complete
|
||||
const stdoutCallback = (data: Buffer) => process.stdout.write(data)
|
||||
const stderrCallback = (data: Buffer) => process.stderr.write(data)
|
||||
child.stdout && child.stdout.on("data", stdoutCallback)
|
||||
child.stderr && child.stderr.on("data", stderrCallback)
|
||||
|
||||
const healthCheck = async () => {
|
||||
while (true) {
|
||||
try {
|
||||
const resp = await fetch("http://localhost:4873")
|
||||
if (resp.status === 200) {
|
||||
return
|
||||
} else {
|
||||
console.log(`Healthcheck failed: bad status ${resp.status}`)
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`Healthcheck failed: ${e}`)
|
||||
}
|
||||
await setTimeout(500)
|
||||
}
|
||||
}
|
||||
await withTimeout(healthCheck(), 10000)
|
||||
|
||||
// Stop forwarding stdout/stderr
|
||||
child.stdout && child.stdout.off("data", stdoutCallback)
|
||||
child.stderr && child.stderr.off("data", stderrCallback)
|
||||
return new VerdaccioProcess(child)
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a SIGKILL to the process and wait for it to stop
|
||||
*/
|
||||
async kill() {
|
||||
this.child.stdout && this.child.stdout.destroy()
|
||||
this.child.stderr && this.child.stderr.destroy()
|
||||
this.child.kill()
|
||||
try {
|
||||
await withTimeout(once(this.child, "close"), 500)
|
||||
} catch (e) {
|
||||
console.error("unable to kill verdaccio subprocess, trying -9")
|
||||
this.child.kill(9)
|
||||
await withTimeout(once(this.child, "close"), 500)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A promise which resolves if the subprocess exits for some reason
|
||||
*/
|
||||
async died(): Promise<number | null> {
|
||||
const [exit, _signal] = await once(this.child, "exit")
|
||||
return exit
|
||||
}
|
||||
}
|
||||
|
||||
function printHeader(header: string) {
|
||||
console.log("\n===============================")
|
||||
console.log(` ${header}`)
|
||||
console.log("===============================")
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the automerge, @automerge/automerge-wasm, and @automerge/automerge packages from
|
||||
* `$packageDir/node_modules`
|
||||
*
|
||||
* This is useful to force refreshing a package by use in combination with
|
||||
* `yarn install --check-files`, which checks if a package is present in
|
||||
* `node_modules` and if it is not forces a reinstall.
|
||||
*
|
||||
* @param packageDir - The directory containing the package.json of the target project
|
||||
*/
|
||||
async function removeExistingAutomerge(packageDir: string) {
|
||||
await fsPromises.rm(path.join(packageDir, "node_modules", "@automerge"), {
|
||||
recursive: true,
|
||||
force: true,
|
||||
})
|
||||
await fsPromises.rm(path.join(packageDir, "node_modules", "automerge"), {
|
||||
recursive: true,
|
||||
force: true,
|
||||
})
|
||||
}
|
||||
|
||||
type SpawnResult = {
|
||||
stdout?: Buffer
|
||||
stderr?: Buffer
|
||||
}
|
||||
|
||||
async function spawnAndWait(
|
||||
cmd: string,
|
||||
args: Array<string>,
|
||||
options: child_process.SpawnOptions
|
||||
): Promise<SpawnResult> {
|
||||
const child = spawn(cmd, args, options)
|
||||
let stdout = null
|
||||
let stderr = null
|
||||
if (child.stdout) {
|
||||
stdout = []
|
||||
child.stdout.on("data", data => stdout.push(data))
|
||||
}
|
||||
if (child.stderr) {
|
||||
stderr = []
|
||||
child.stderr.on("data", data => stderr.push(data))
|
||||
}
|
||||
|
||||
const [exit, _signal] = await once(child, "exit")
|
||||
if (exit && exit !== 0) {
|
||||
throw new Error("nonzero exit code")
|
||||
}
|
||||
return {
|
||||
stderr: stderr ? Buffer.concat(stderr) : null,
|
||||
stdout: stdout ? Buffer.concat(stdout) : null,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a package from the verdaccio registry. This is necessary because we
|
||||
* often want to _replace_ a version rather than update the version number.
|
||||
* Obviously this is very bad and verboten in normal circumastances, but the
|
||||
* whole point here is to be able to test the entire packaging story so it's
|
||||
* okay I Promise.
|
||||
*/
|
||||
async function removeFromVerdaccio(packageName: string) {
|
||||
await fsPromises.rm(path.join(VERDACCIO_DB_PATH, packageName), {
|
||||
force: true,
|
||||
recursive: true,
|
||||
})
|
||||
}
|
||||
|
||||
async function yarnPublish(registryUrl: string, cwd: string) {
|
||||
await spawnAndWait(
|
||||
"yarn",
|
||||
["--registry", registryUrl, "--cwd", cwd, "publish", "--non-interactive"],
|
||||
{
|
||||
stdio: "inherit",
|
||||
env: {
|
||||
...process.env,
|
||||
FORCE_COLOR: "true",
|
||||
// This is a fake token, it just has to be the right format
|
||||
npm_config__auth:
|
||||
"//localhost:4873/:_authToken=Gp2Mgxm4faa/7wp0dMSuRA==",
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a given delay to resolve a promise, throwing an error if the
|
||||
* promise doesn't resolve with the timeout
|
||||
*
|
||||
* @param promise - the promise to wait for @param timeout - the delay in
|
||||
* milliseconds to wait before throwing
|
||||
*/
|
||||
async function withTimeout<T>(
|
||||
promise: Promise<T>,
|
||||
timeout: number
|
||||
): Promise<T> {
|
||||
type Step = "timed-out" | { result: T }
|
||||
const timedOut: () => Promise<Step> = async () => {
|
||||
await setTimeout(timeout)
|
||||
return "timed-out"
|
||||
}
|
||||
const succeeded: () => Promise<Step> = async () => {
|
||||
const result = await promise
|
||||
return { result }
|
||||
}
|
||||
const result = await Promise.race([timedOut(), succeeded()])
|
||||
if (result === "timed-out") {
|
||||
throw new Error("timed out")
|
||||
} else {
|
||||
return result.result
|
||||
}
|
||||
}
|
23
javascript/e2e/package.json
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"name": "e2e",
|
||||
"version": "0.0.1",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"e2e": "ts-node index.ts"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.7.18",
|
||||
"cmd-ts": "^0.11.0",
|
||||
"node-fetch": "^2",
|
||||
"ts-node": "^10.9.1",
|
||||
"typed-emitter": "^2.1.0",
|
||||
"typescript": "^4.8.3",
|
||||
"verdaccio": "5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node-fetch": "2.x"
|
||||
}
|
||||
}
|
6
javascript/e2e/tsconfig.json
Normal file
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"types": ["node"]
|
||||
},
|
||||
"module": "nodenext"
|
||||
}
|
25
javascript/e2e/verdaccio.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
storage: "./verdacciodb"
|
||||
auth:
|
||||
htpasswd:
|
||||
file: ./htpasswd
|
||||
publish:
|
||||
allow_offline: true
|
||||
logs: { type: stdout, format: pretty, level: info }
|
||||
packages:
|
||||
"@automerge/automerge-wasm":
|
||||
access: "$all"
|
||||
publish: "$all"
|
||||
"@automerge/automerge":
|
||||
access: "$all"
|
||||
publish: "$all"
|
||||
"*":
|
||||
access: "$all"
|
||||
publish: "$all"
|
||||
proxy: npmjs
|
||||
"@*/*":
|
||||
access: "$all"
|
||||
publish: "$all"
|
||||
proxy: npmjs
|
||||
uplinks:
|
||||
npmjs:
|
||||
url: https://registry.npmjs.org/
|
2130
javascript/e2e/yarn.lock
Normal file
1
javascript/examples/create-react-app/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
node_modules/
|
59
javascript/examples/create-react-app/README.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Automerge + `create-react-app`
|
||||
|
||||
This is a little fiddly to get working. The problem is that `create-react-app`
|
||||
hard codes a webpack configuration which does not support WASM modules, which we
|
||||
require in order to bundle the WASM implementation of automerge. To get around
|
||||
this we use [`craco`](https://github.com/dilanx/craco) which does some monkey
|
||||
patching to allow us to modify the webpack config that `create-react-app`
|
||||
bundles. Then we use a craco plugin called
|
||||
[`craco-wasm`](https://www.npmjs.com/package/craco-wasm) to perform the
|
||||
necessary modifications to the webpack config. It should be noted that this is
|
||||
all quite fragile and ideally you probably don't want to use `create-react-app`
|
||||
to do this in production.
|
||||
|
||||
## Setup
|
||||
|
||||
Assuming you have already run `create-react-app` and your working directory is
|
||||
the project.
|
||||
|
||||
### Install craco and craco-wasm
|
||||
|
||||
```bash
|
||||
yarn add craco craco-wasm
|
||||
```
|
||||
|
||||
### Modify `package.json` to use `craco` for scripts
|
||||
|
||||
In `package.json` the `scripts` section will look like this:
|
||||
|
||||
```json
|
||||
"scripts": {
|
||||
"start": "craco start",
|
||||
"build": "craco build",
|
||||
"test": "craco test",
|
||||
"eject": "craco eject"
|
||||
},
|
||||
```
|
||||
|
||||
Replace that section with:
|
||||
|
||||
```json
|
||||
"scripts": {
|
||||
"start": "craco start",
|
||||
"build": "craco build",
|
||||
"test": "craco test",
|
||||
"eject": "craco eject"
|
||||
},
|
||||
```
|
||||
|
||||
### Create `craco.config.js`
|
||||
|
||||
In the root of the project add the following contents to `craco.config.js`
|
||||
|
||||
```javascript
|
||||
const cracoWasm = require("craco-wasm")
|
||||
|
||||
module.exports = {
|
||||
plugins: [cracoWasm()],
|
||||
}
|
||||
```
|
5
javascript/examples/create-react-app/craco.config.js
Normal file
|
@ -0,0 +1,5 @@
|
|||
const cracoWasm = require("craco-wasm")
|
||||
|
||||
module.exports = {
|
||||
plugins: [cracoWasm()],
|
||||
}
|
41
javascript/examples/create-react-app/package.json
Normal file
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"name": "automerge-create-react-app",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@craco/craco": "^7.0.0-alpha.8",
|
||||
"craco-wasm": "0.0.1",
|
||||
"@testing-library/jest-dom": "^5.16.5",
|
||||
"@testing-library/react": "^13.4.0",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"@automerge/automerge": "2.0.0-alpha.7",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-scripts": "5.0.1",
|
||||
"web-vitals": "^2.1.4"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "craco start",
|
||||
"build": "craco build",
|
||||
"test": "craco test",
|
||||
"eject": "craco eject"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
"react-app",
|
||||
"react-app/jest"
|
||||
]
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
BIN
javascript/examples/create-react-app/public/favicon.ico
Normal file
After Width: | Height: | Size: 3.8 KiB |
43
javascript/examples/create-react-app/public/index.html
Normal file
|
@ -0,0 +1,43 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="theme-color" content="#000000" />
|
||||
<meta
|
||||
name="description"
|
||||
content="Web site created using create-react-app"
|
||||
/>
|
||||
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
|
||||
<!--
|
||||
manifest.json provides metadata used when your web app is installed on a
|
||||
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
|
||||
-->
|
||||
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
|
||||
<!--
|
||||
Notice the use of %PUBLIC_URL% in the tags above.
|
||||
It will be replaced with the URL of the `public` folder during the build.
|
||||
Only files inside the `public` folder can be referenced from the HTML.
|
||||
|
||||
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
|
||||
work correctly both with client-side routing and a non-root public URL.
|
||||
Learn how to configure a non-root public URL by running `npm run build`.
|
||||
-->
|
||||
<title>React App</title>
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root"></div>
|
||||
<!--
|
||||
This HTML file is a template.
|
||||
If you open it directly in the browser, you will see an empty page.
|
||||
|
||||
You can add webfonts, meta tags, or analytics to this file.
|
||||
The build step will place the bundled scripts into the <body> tag.
|
||||
|
||||
To begin the development, run `npm start` or `yarn start`.
|
||||
To create a production bundle, use `npm run build` or `yarn build`.
|
||||
-->
|
||||
</body>
|
||||
</html>
|
BIN
javascript/examples/create-react-app/public/logo192.png
Normal file
After Width: | Height: | Size: 5.2 KiB |
BIN
javascript/examples/create-react-app/public/logo512.png
Normal file
After Width: | Height: | Size: 9.4 KiB |
25
javascript/examples/create-react-app/public/manifest.json
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"short_name": "React App",
|
||||
"name": "Create React App Sample",
|
||||
"icons": [
|
||||
{
|
||||
"src": "favicon.ico",
|
||||
"sizes": "64x64 32x32 24x24 16x16",
|
||||
"type": "image/x-icon"
|
||||
},
|
||||
{
|
||||
"src": "logo192.png",
|
||||
"type": "image/png",
|
||||
"sizes": "192x192"
|
||||
},
|
||||
{
|
||||
"src": "logo512.png",
|
||||
"type": "image/png",
|
||||
"sizes": "512x512"
|
||||
}
|
||||
],
|
||||
"start_url": ".",
|
||||
"display": "standalone",
|
||||
"theme_color": "#000000",
|
||||
"background_color": "#ffffff"
|
||||
}
|
3
javascript/examples/create-react-app/public/robots.txt
Normal file
|
@ -0,0 +1,3 @@
|
|||
# https://www.robotstxt.org/robotstxt.html
|
||||
User-agent: *
|
||||
Disallow:
|