Compare commits
	
		
			2 commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 0975c5614b | ||
|  | b7415d18eb | 
					 315 changed files with 16547 additions and 22614 deletions
				
			
		
							
								
								
									
										28
									
								
								.github/workflows/ci.yaml
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										28
									
								
								.github/workflows/ci.yaml
									
										
									
									
										vendored
									
									
								
							|  | @ -14,7 +14,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: 1.67.0 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|           components: rustfmt |           components: rustfmt | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|  | @ -28,7 +28,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: 1.67.0 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|           components: clippy |           components: clippy | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|  | @ -42,7 +42,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: 1.67.0 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|       - name: Build rust docs |       - name: Build rust docs | ||||||
|  | @ -90,16 +90,6 @@ jobs: | ||||||
|         run: rustup target add wasm32-unknown-unknown |         run: rustup target add wasm32-unknown-unknown | ||||||
|       - name: run tests |       - name: run tests | ||||||
|         run: ./scripts/ci/deno_tests |         run: ./scripts/ci/deno_tests | ||||||
| 
 |  | ||||||
|   js_fmt: |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     steps: |  | ||||||
|       - uses: actions/checkout@v2 |  | ||||||
|       - name: install |  | ||||||
|         run: yarn global add prettier |  | ||||||
|       - name: format |  | ||||||
|         run: prettier -c javascript/.prettierrc javascript |  | ||||||
| 
 |  | ||||||
|   js_tests: |   js_tests: | ||||||
|     runs-on: ubuntu-latest |     runs-on: ubuntu-latest | ||||||
|     steps: |     steps: | ||||||
|  | @ -118,7 +108,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: nightly-2023-01-26 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|       - name: Install CMocka |       - name: Install CMocka | ||||||
|  | @ -127,8 +117,6 @@ jobs: | ||||||
|         uses: jwlawson/actions-setup-cmake@v1.12 |         uses: jwlawson/actions-setup-cmake@v1.12 | ||||||
|         with: |         with: | ||||||
|           cmake-version: latest |           cmake-version: latest | ||||||
|       - name: Install rust-src |  | ||||||
|         run: rustup component add rust-src |  | ||||||
|       - name: Build and test C bindings |       - name: Build and test C bindings | ||||||
|         run: ./scripts/ci/cmake-build Release Static |         run: ./scripts/ci/cmake-build Release Static | ||||||
|         shell: bash |         shell: bash | ||||||
|  | @ -138,7 +126,9 @@ jobs: | ||||||
|     strategy: |     strategy: | ||||||
|       matrix: |       matrix: | ||||||
|         toolchain: |         toolchain: | ||||||
|           - 1.67.0 |           - 1.60.0 | ||||||
|  |           - nightly | ||||||
|  |     continue-on-error: ${{ matrix.toolchain == 'nightly' }} | ||||||
|     steps: |     steps: | ||||||
|       - uses: actions/checkout@v2 |       - uses: actions/checkout@v2 | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|  | @ -157,7 +147,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: 1.67.0 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|       - run: ./scripts/ci/build-test |       - run: ./scripts/ci/build-test | ||||||
|  | @ -170,7 +160,7 @@ jobs: | ||||||
|       - uses: actions-rs/toolchain@v1 |       - uses: actions-rs/toolchain@v1 | ||||||
|         with: |         with: | ||||||
|           profile: minimal |           profile: minimal | ||||||
|           toolchain: 1.67.0 |           toolchain: 1.64.0 | ||||||
|           default: true |           default: true | ||||||
|       - uses: Swatinem/rust-cache@v1 |       - uses: Swatinem/rust-cache@v1 | ||||||
|       - run: ./scripts/ci/build-test |       - run: ./scripts/ci/build-test | ||||||
|  |  | ||||||
							
								
								
									
										214
									
								
								.github/workflows/release.yaml
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										214
									
								
								.github/workflows/release.yaml
									
										
									
									
										vendored
									
									
								
							|  | @ -1,214 +0,0 @@ | ||||||
| name: Release |  | ||||||
| on: |  | ||||||
|   push: |  | ||||||
|     branches: |  | ||||||
|       - main |  | ||||||
| 
 |  | ||||||
| jobs: |  | ||||||
|   check_if_wasm_version_upgraded: |  | ||||||
|     name: Check if WASM version has been upgraded |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     outputs: |  | ||||||
|       wasm_version: ${{ steps.version-updated.outputs.current-package-version }} |  | ||||||
|       wasm_has_updated: ${{ steps.version-updated.outputs.has-updated }} |  | ||||||
|     steps: |  | ||||||
|       - uses: JiPaix/package-json-updated-action@v1.0.5 |  | ||||||
|         id: version-updated |  | ||||||
|         with: |  | ||||||
|           path: rust/automerge-wasm/package.json |  | ||||||
|         env: |  | ||||||
|           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|   publish-wasm: |  | ||||||
|     name: Publish WASM package |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     needs: |  | ||||||
|       - check_if_wasm_version_upgraded |  | ||||||
|     # We create release only if the version in the package.json has been upgraded |  | ||||||
|     if: needs.check_if_wasm_version_upgraded.outputs.wasm_has_updated == 'true' |  | ||||||
|     steps: |  | ||||||
|       - uses: actions/setup-node@v3 |  | ||||||
|         with: |  | ||||||
|           node-version: '16.x' |  | ||||||
|           registry-url: 'https://registry.npmjs.org' |  | ||||||
|       - uses: denoland/setup-deno@v1 |  | ||||||
|       - uses: actions/checkout@v3 |  | ||||||
|         with: |  | ||||||
|           fetch-depth: 0 |  | ||||||
|           ref: ${{ github.ref }} |  | ||||||
|       - name: Get rid of local github workflows |  | ||||||
|         run: rm -r .github/workflows |  | ||||||
|       - name: Remove tmp_branch if it exists |  | ||||||
|         run: git push origin :tmp_branch || true |  | ||||||
|       - run: git checkout -b tmp_branch |  | ||||||
|       - name: Install wasm-bindgen-cli |  | ||||||
|         run: cargo install wasm-bindgen-cli wasm-opt |  | ||||||
|       - name: Install wasm32 target |  | ||||||
|         run: rustup target add wasm32-unknown-unknown |  | ||||||
|       - name: run wasm js tests |  | ||||||
|         id: wasm_js_tests |  | ||||||
|         run: ./scripts/ci/wasm_tests |  | ||||||
|       - name: run wasm deno tests |  | ||||||
|         id: wasm_deno_tests |  | ||||||
|         run: ./scripts/ci/deno_tests |  | ||||||
|       - name: build release |  | ||||||
|         id: build_release |  | ||||||
|         run: | |  | ||||||
|           npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm run release |  | ||||||
|       - name: Collate deno release files |  | ||||||
|         if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' |  | ||||||
|         run: | |  | ||||||
|           mkdir $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           cp $GITHUB_WORKSPACE/rust/automerge-wasm/deno/* $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           cp $GITHUB_WORKSPACE/rust/automerge-wasm/index.d.ts $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           cp $GITHUB_WORKSPACE/rust/automerge-wasm/README.md $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           cp $GITHUB_WORKSPACE/rust/automerge-wasm/LICENSE $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           sed -i '1i /// <reference types="./index.d.ts" />' $GITHUB_WORKSPACE/deno_wasm_dist/automerge_wasm.js |  | ||||||
|       - name: Create npm release |  | ||||||
|         if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' |  | ||||||
|         run: | |  | ||||||
|           if [ "$(npm --prefix $GITHUB_WORKSPACE/rust/automerge-wasm show . version)" = "$VERSION" ]; then |  | ||||||
|             echo "This version is already published" |  | ||||||
|             exit 0 |  | ||||||
|           fi |  | ||||||
|           EXTRA_ARGS="--access public" |  | ||||||
|           if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then |  | ||||||
|             echo "Is pre-release version" |  | ||||||
|             EXTRA_ARGS="$EXTRA_ARGS --tag next" |  | ||||||
|           fi |  | ||||||
|           if [ "$NODE_AUTH_TOKEN" = "" ]; then |  | ||||||
|             echo "Can't publish on NPM, You need a NPM_TOKEN secret." |  | ||||||
|             false |  | ||||||
|           fi |  | ||||||
|           npm publish $GITHUB_WORKSPACE/rust/automerge-wasm  $EXTRA_ARGS |  | ||||||
|         env: |  | ||||||
|           NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} |  | ||||||
|           VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} |  | ||||||
|       - name: Commit wasm deno release files |  | ||||||
|         run: | |  | ||||||
|           git config --global user.name "actions" |  | ||||||
|           git config --global user.email actions@github.com |  | ||||||
|           git add $GITHUB_WORKSPACE/deno_wasm_dist |  | ||||||
|           git commit -am "Add deno release files" |  | ||||||
|           git push origin tmp_branch |  | ||||||
|       - name: Tag wasm release |  | ||||||
|         if: steps.wasm_js_tests.outcome == 'success' && steps.wasm_deno_tests.outcome == 'success' |  | ||||||
|         uses: softprops/action-gh-release@v1 |  | ||||||
|         with: |  | ||||||
|           name: Automerge Wasm v${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} |  | ||||||
|           tag_name: js/automerge-wasm-${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} |  | ||||||
|           target_commitish: tmp_branch |  | ||||||
|           generate_release_notes: false |  | ||||||
|           draft: false |  | ||||||
|         env: |  | ||||||
|           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|       - name: Remove tmp_branch |  | ||||||
|         run: git push origin :tmp_branch |  | ||||||
|   check_if_js_version_upgraded: |  | ||||||
|     name: Check if JS version has been upgraded |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     outputs: |  | ||||||
|       js_version: ${{ steps.version-updated.outputs.current-package-version }} |  | ||||||
|       js_has_updated: ${{ steps.version-updated.outputs.has-updated }} |  | ||||||
|     steps: |  | ||||||
|       - uses: JiPaix/package-json-updated-action@v1.0.5 |  | ||||||
|         id: version-updated |  | ||||||
|         with: |  | ||||||
|           path: javascript/package.json |  | ||||||
|         env: |  | ||||||
|           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|   publish-js: |  | ||||||
|     name: Publish JS package |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
|     needs: |  | ||||||
|       - check_if_js_version_upgraded |  | ||||||
|       - check_if_wasm_version_upgraded |  | ||||||
|       - publish-wasm |  | ||||||
|     # We create release only if the version in the package.json has been upgraded and after the WASM release |  | ||||||
|     if: | |  | ||||||
|       (always() && ! cancelled()) && |  | ||||||
|       (needs.publish-wasm.result == 'success' || needs.publish-wasm.result == 'skipped') &&  |  | ||||||
|       needs.check_if_js_version_upgraded.outputs.js_has_updated == 'true' |  | ||||||
|     steps: |  | ||||||
|       - uses: actions/setup-node@v3 |  | ||||||
|         with: |  | ||||||
|           node-version: '16.x' |  | ||||||
|           registry-url: 'https://registry.npmjs.org' |  | ||||||
|       - uses: denoland/setup-deno@v1 |  | ||||||
|       - uses: actions/checkout@v3 |  | ||||||
|         with: |  | ||||||
|           fetch-depth: 0 |  | ||||||
|           ref: ${{ github.ref }} |  | ||||||
|       - name: Get rid of local github workflows |  | ||||||
|         run: rm -r .github/workflows |  | ||||||
|       - name: Remove js_tmp_branch if it exists |  | ||||||
|         run: git push origin :js_tmp_branch || true |  | ||||||
|       - run: git checkout -b js_tmp_branch |  | ||||||
|       - name: check js formatting |  | ||||||
|         run: | |  | ||||||
|           yarn global add prettier |  | ||||||
|           prettier -c javascript/.prettierrc javascript |  | ||||||
|       - name: run js tests |  | ||||||
|         id: js_tests |  | ||||||
|         run: | |  | ||||||
|           cargo install wasm-bindgen-cli wasm-opt |  | ||||||
|           rustup target add wasm32-unknown-unknown |  | ||||||
|           ./scripts/ci/js_tests |  | ||||||
|       - name: build js release |  | ||||||
|         id: build_release |  | ||||||
|         run: | |  | ||||||
|           npm --prefix $GITHUB_WORKSPACE/javascript run build |  | ||||||
|       - name: build js deno release |  | ||||||
|         id: build_deno_release |  | ||||||
|         run: | |  | ||||||
|           VERSION=$WASM_VERSION npm --prefix $GITHUB_WORKSPACE/javascript run deno:build |  | ||||||
|         env: |  | ||||||
|           WASM_VERSION: ${{ needs.check_if_wasm_version_upgraded.outputs.wasm_version }} |  | ||||||
|       - name: run deno tests |  | ||||||
|         id: deno_tests |  | ||||||
|         run: | |  | ||||||
|           npm --prefix $GITHUB_WORKSPACE/javascript run deno:test |  | ||||||
|       - name: Collate deno release files |  | ||||||
|         if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' |  | ||||||
|         run: | |  | ||||||
|           mkdir $GITHUB_WORKSPACE/deno_js_dist |  | ||||||
|           cp $GITHUB_WORKSPACE/javascript/deno_dist/* $GITHUB_WORKSPACE/deno_js_dist |  | ||||||
|       - name: Create npm release |  | ||||||
|         if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' |  | ||||||
|         run: | |  | ||||||
|           if [ "$(npm --prefix $GITHUB_WORKSPACE/javascript show . version)" = "$VERSION" ]; then |  | ||||||
|             echo "This version is already published" |  | ||||||
|             exit 0 |  | ||||||
|           fi |  | ||||||
|           EXTRA_ARGS="--access public" |  | ||||||
|           if [[ $VERSION == *"alpha."* ]] || [[ $VERSION == *"beta."* ]] || [[ $VERSION == *"rc."* ]]; then |  | ||||||
|             echo "Is pre-release version" |  | ||||||
|             EXTRA_ARGS="$EXTRA_ARGS --tag next" |  | ||||||
|           fi |  | ||||||
|           if [ "$NODE_AUTH_TOKEN" = "" ]; then |  | ||||||
|             echo "Can't publish on NPM, You need a NPM_TOKEN secret." |  | ||||||
|             false |  | ||||||
|           fi |  | ||||||
|           npm publish $GITHUB_WORKSPACE/javascript $EXTRA_ARGS |  | ||||||
|         env: |  | ||||||
|           NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} |  | ||||||
|           VERSION: ${{ needs.check_if_js_version_upgraded.outputs.js_version }} |  | ||||||
|       - name: Commit js deno release files |  | ||||||
|         run: | |  | ||||||
|           git config --global user.name "actions" |  | ||||||
|           git config --global user.email actions@github.com |  | ||||||
|           git add $GITHUB_WORKSPACE/deno_js_dist |  | ||||||
|           git commit -am "Add deno js release files" |  | ||||||
|           git push origin js_tmp_branch |  | ||||||
|       - name: Tag JS release |  | ||||||
|         if: steps.js_tests.outcome == 'success' && steps.deno_tests.outcome == 'success' |  | ||||||
|         uses: softprops/action-gh-release@v1 |  | ||||||
|         with: |  | ||||||
|           name: Automerge v${{ needs.check_if_js_version_upgraded.outputs.js_version }} |  | ||||||
|           tag_name: js/automerge-${{ needs.check_if_js_version_upgraded.outputs.js_version }} |  | ||||||
|           target_commitish: js_tmp_branch |  | ||||||
|           generate_release_notes: false |  | ||||||
|           draft: false |  | ||||||
|         env: |  | ||||||
|           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |  | ||||||
|       - name: Remove js_tmp_branch |  | ||||||
|         run: git push origin :js_tmp_branch |  | ||||||
							
								
								
									
										20
									
								
								README.md
									
										
									
									
									
								
							
							
						
						
									
										20
									
								
								README.md
									
										
									
									
									
								
							|  | @ -25,7 +25,7 @@ If you're familiar with CRDTs and interested in the design of Automerge in | ||||||
| particular take a look at https://automerge.org/docs/how-it-works/backend/ | particular take a look at https://automerge.org/docs/how-it-works/backend/ | ||||||
| 
 | 
 | ||||||
| Finally, if you want to talk to us about this project please [join the | Finally, if you want to talk to us about this project please [join the | ||||||
| Slack](https://join.slack.com/t/automerge/shared_invite/zt-e4p3760n-kKh7r3KRH1YwwNfiZM8ktw) | Slack](https://join.slack.com/t/automerge/shared_invite/zt-1ho1ieas2-DnWZcRR82BRu65vCD4t3Xw) | ||||||
| 
 | 
 | ||||||
| ## Status | ## Status | ||||||
| 
 | 
 | ||||||
|  | @ -42,10 +42,9 @@ In general we try and respect semver. | ||||||
| 
 | 
 | ||||||
| ### JavaScript | ### JavaScript | ||||||
| 
 | 
 | ||||||
| A stable release of the javascript package is currently available as | An alpha release of the javascript package is currently available as | ||||||
| `@automerge/automerge@2.0.0` where. pre-release verisions of the `2.0.1` are | `@automerge/automerge@2.0.0-alpha.n` where `n` is an integer. We are gathering | ||||||
| available as `2.0.1-alpha.n`. `2.0.1*` packages are also available for Deno at | feedback on the API and looking to release a `2.0.0` in the next few weeks. | ||||||
| https://deno.land/x/automerge |  | ||||||
| 
 | 
 | ||||||
| ### Rust | ### Rust | ||||||
| 
 | 
 | ||||||
|  | @ -53,9 +52,7 @@ The rust codebase is currently oriented around producing a performant backend | ||||||
| for the Javascript wrapper and as such the API for Rust code is low level and | for the Javascript wrapper and as such the API for Rust code is low level and | ||||||
| not well documented. We will be returning to this over the next few months but | not well documented. We will be returning to this over the next few months but | ||||||
| for now you will need to be comfortable reading the tests and asking questions | for now you will need to be comfortable reading the tests and asking questions | ||||||
| to figure out how to use it. If you are looking to build rust applications which | to figure out how to use it. | ||||||
| use automerge you may want to look into |  | ||||||
| [autosurgeon](https://github.com/alexjg/autosurgeon) |  | ||||||
| 
 | 
 | ||||||
| ## Repository Organisation | ## Repository Organisation | ||||||
| 
 | 
 | ||||||
|  | @ -112,16 +109,9 @@ brew install cmake node cmocka | ||||||
| # install yarn | # install yarn | ||||||
| npm install --global yarn | npm install --global yarn | ||||||
| 
 | 
 | ||||||
| # install javascript dependencies |  | ||||||
| yarn --cwd ./javascript |  | ||||||
| 
 |  | ||||||
| # install rust dependencies | # install rust dependencies | ||||||
| cargo install wasm-bindgen-cli wasm-opt cargo-deny | cargo install wasm-bindgen-cli wasm-opt cargo-deny | ||||||
| 
 | 
 | ||||||
| # get nightly rust to produce optimized automerge-c builds |  | ||||||
| rustup toolchain install nightly |  | ||||||
| rustup component add rust-src --toolchain nightly |  | ||||||
| 
 |  | ||||||
| # add wasm target in addition to current architecture | # add wasm target in addition to current architecture | ||||||
| rustup target add wasm32-unknown-unknown | rustup target add wasm32-unknown-unknown | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -54,7 +54,6 @@ | ||||||
| 
 | 
 | ||||||
|           nodejs |           nodejs | ||||||
|           yarn |           yarn | ||||||
|           deno |  | ||||||
| 
 | 
 | ||||||
|           # c deps |           # c deps | ||||||
|           cmake |           cmake | ||||||
|  |  | ||||||
|  | @ -1,3 +0,0 @@ | ||||||
| { |  | ||||||
|   "replacer": "scripts/denoify-replacer.mjs" |  | ||||||
| } |  | ||||||
|  | @ -1,15 +1,11 @@ | ||||||
| module.exports = { | module.exports = { | ||||||
|   root: true, |   root: true, | ||||||
|   parser: "@typescript-eslint/parser", |   parser: '@typescript-eslint/parser', | ||||||
|   plugins: ["@typescript-eslint"], |   plugins: [ | ||||||
|   extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended"], |     '@typescript-eslint', | ||||||
|   rules: { |  | ||||||
|     "@typescript-eslint/no-unused-vars": [ |  | ||||||
|       "error", |  | ||||||
|       { |  | ||||||
|         argsIgnorePattern: "^_", |  | ||||||
|         varsIgnorePattern: "^_", |  | ||||||
|       }, |  | ||||||
|   ], |   ], | ||||||
|   }, |   extends: [ | ||||||
| } |     'eslint:recommended', | ||||||
|  |     'plugin:@typescript-eslint/recommended', | ||||||
|  |   ], | ||||||
|  | }; | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								javascript/.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								javascript/.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -2,5 +2,3 @@ | ||||||
| /yarn.lock | /yarn.lock | ||||||
| dist | dist | ||||||
| docs/ | docs/ | ||||||
| .vim |  | ||||||
| deno_dist/ |  | ||||||
|  |  | ||||||
|  | @ -1,4 +0,0 @@ | ||||||
| e2e/verdacciodb |  | ||||||
| dist |  | ||||||
| docs |  | ||||||
| deno_dist |  | ||||||
|  | @ -1,4 +0,0 @@ | ||||||
| { |  | ||||||
|   "semi": false, |  | ||||||
|   "arrowParens": "avoid" |  | ||||||
| } |  | ||||||
|  | @ -37,3 +37,4 @@ yarn test | ||||||
| 
 | 
 | ||||||
| If you make changes to the `automerge-wasm` package you will need to re-run | If you make changes to the `automerge-wasm` package you will need to re-run | ||||||
| `yarn e2e buildjs` | `yarn e2e buildjs` | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | @ -19,6 +19,7 @@ data](#make-some-data). If you're in a browser you need a bundler | ||||||
| 
 | 
 | ||||||
| ### Bundler setup | ### Bundler setup | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| `@automerge/automerge` is a wrapper around a core library which is written in | `@automerge/automerge` is a wrapper around a core library which is written in | ||||||
| rust, compiled to WebAssembly and distributed as a separate package called | rust, compiled to WebAssembly and distributed as a separate package called | ||||||
| `@automerge/automerge-wasm`. Browsers don't currently support WebAssembly | `@automerge/automerge-wasm`. Browsers don't currently support WebAssembly | ||||||
|  | @ -56,7 +57,7 @@ let doc1 = automerge.from({ | ||||||
|     tasks: [ |     tasks: [ | ||||||
|         {description: "feed fish", done: false}, |         {description: "feed fish", done: false}, | ||||||
|         {description: "water plants", done: false}, |         {description: "water plants", done: false}, | ||||||
|   ], |     ] | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
| // Create a new thread of execution  | // Create a new thread of execution  | ||||||
|  | @ -73,7 +74,7 @@ doc2 = automerge.change(doc2, d => { | ||||||
| doc1 = automerge.change(doc1, d => { | doc1 = automerge.change(doc1, d => { | ||||||
|     d.tasks.push({ |     d.tasks.push({ | ||||||
|         description: "water fish", |         description: "water fish", | ||||||
|     done: false, |         done: false | ||||||
|     }) |     }) | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
|  | @ -87,7 +88,7 @@ assert.deepEqual(doc1, { | ||||||
|         {description: "feed fish", done: true}, |         {description: "feed fish", done: true}, | ||||||
|         {description: "water plants", done: false}, |         {description: "water plants", done: false}, | ||||||
|         {description: "water fish", done: false}, |         {description: "water fish", done: false}, | ||||||
|   ], |     ] | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
| assert.deepEqual(doc2, { | assert.deepEqual(doc2, { | ||||||
|  | @ -95,7 +96,7 @@ assert.deepEqual(doc2, { | ||||||
|         {description: "feed fish", done: true}, |         {description: "feed fish", done: true}, | ||||||
|         {description: "water plants", done: false}, |         {description: "water plants", done: false}, | ||||||
|         {description: "water fish", done: false}, |         {description: "water fish", done: false}, | ||||||
|   ], |     ] | ||||||
| }) | }) | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,11 +1,5 @@ | ||||||
| { | { | ||||||
|     "extends": "../tsconfig.json", |     "extends": "../tsconfig.json", | ||||||
|   "exclude": [ |  | ||||||
|     "../dist/**/*", |  | ||||||
|     "../node_modules", |  | ||||||
|     "../test/**/*", |  | ||||||
|     "../src/**/*.deno.ts" |  | ||||||
|   ], |  | ||||||
|     "compilerOptions": { |     "compilerOptions": { | ||||||
|         "outDir": "../dist/cjs" |         "outDir": "../dist/cjs" | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  | @ -1,13 +0,0 @@ | ||||||
| { |  | ||||||
|   "extends": "../tsconfig.json", |  | ||||||
|   "exclude": [ |  | ||||||
|     "../dist/**/*", |  | ||||||
|     "../node_modules", |  | ||||||
|     "../test/**/*", |  | ||||||
|     "../src/**/*.deno.ts" |  | ||||||
|   ], |  | ||||||
|   "emitDeclarationOnly": true, |  | ||||||
|   "compilerOptions": { |  | ||||||
|     "outDir": "../dist" |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  | @ -1,11 +1,5 @@ | ||||||
| { | { | ||||||
|     "extends": "../tsconfig.json", |     "extends": "../tsconfig.json", | ||||||
|   "exclude": [ |  | ||||||
|     "../dist/**/*", |  | ||||||
|     "../node_modules", |  | ||||||
|     "../test/**/*", |  | ||||||
|     "../src/**/*.deno.ts" |  | ||||||
|   ], |  | ||||||
|     "compilerOptions": { |     "compilerOptions": { | ||||||
|         "target": "es6", |         "target": "es6", | ||||||
|         "module": "es6", |         "module": "es6", | ||||||
|  |  | ||||||
|  | @ -1,10 +0,0 @@ | ||||||
| import * as Automerge from "../deno_dist/index.ts" |  | ||||||
| 
 |  | ||||||
| Deno.test("It should create, clone and free", () => { |  | ||||||
|   let doc1 = Automerge.init() |  | ||||||
|   let doc2 = Automerge.clone(doc1) |  | ||||||
| 
 |  | ||||||
|   // this is only needed if weakrefs are not supported
 |  | ||||||
|   Automerge.free(doc1) |  | ||||||
|   Automerge.free(doc2) |  | ||||||
| }) |  | ||||||
|  | @ -63,6 +63,7 @@ yarn e2e run-registry | ||||||
| You can now run `yarn install --registry http://localhost:4873` to experiment | You can now run `yarn install --registry http://localhost:4873` to experiment | ||||||
| with the built packages. | with the built packages. | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| ## Using the `dev` build of `automerge-wasm` | ## Using the `dev` build of `automerge-wasm` | ||||||
| 
 | 
 | ||||||
| All the commands above take a `-p` flag which can be either `release` or | All the commands above take a `-p` flag which can be either `release` or | ||||||
|  |  | ||||||
|  | @ -2,24 +2,14 @@ import { once } from "events" | ||||||
| import {setTimeout} from "timers/promises" | import {setTimeout} from "timers/promises" | ||||||
| import {spawn, ChildProcess} from "child_process" | import {spawn, ChildProcess} from "child_process" | ||||||
| import * as child_process from "child_process" | import * as child_process from "child_process" | ||||||
| import { | import {command, subcommands, run, array, multioption, option, Type} from "cmd-ts" | ||||||
|   command, |  | ||||||
|   subcommands, |  | ||||||
|   run, |  | ||||||
|   array, |  | ||||||
|   multioption, |  | ||||||
|   option, |  | ||||||
|   Type, |  | ||||||
| } from "cmd-ts" |  | ||||||
| import * as path from "path" | import * as path from "path" | ||||||
| import * as fsPromises from "fs/promises" | import * as fsPromises from "fs/promises" | ||||||
| import fetch from "node-fetch" | import fetch from "node-fetch" | ||||||
| 
 | 
 | ||||||
| const VERDACCIO_DB_PATH = path.normalize(`${__dirname}/verdacciodb`) | const VERDACCIO_DB_PATH = path.normalize(`${__dirname}/verdacciodb`) | ||||||
| const VERDACCIO_CONFIG_PATH = path.normalize(`${__dirname}/verdaccio.yaml`) | const VERDACCIO_CONFIG_PATH = path.normalize(`${__dirname}/verdaccio.yaml`) | ||||||
| const AUTOMERGE_WASM_PATH = path.normalize( | const AUTOMERGE_WASM_PATH = path.normalize(`${__dirname}/../../rust/automerge-wasm`) | ||||||
|   `${__dirname}/../../rust/automerge-wasm` |  | ||||||
| ) |  | ||||||
| const AUTOMERGE_JS_PATH = path.normalize(`${__dirname}/..`) | const AUTOMERGE_JS_PATH = path.normalize(`${__dirname}/..`) | ||||||
| const EXAMPLES_DIR = path.normalize(path.join(__dirname, "../", "examples")) | const EXAMPLES_DIR = path.normalize(path.join(__dirname, "../", "examples")) | ||||||
| 
 | 
 | ||||||
|  | @ -38,7 +28,7 @@ const ReadExample: Type<string, Example> = { | ||||||
|         } else { |         } else { | ||||||
|             throw new Error(`Unknown example type ${str}`) |             throw new Error(`Unknown example type ${str}`) | ||||||
|         } |         } | ||||||
|   }, |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type Profile = "dev" | "release" | type Profile = "dev" | "release" | ||||||
|  | @ -52,7 +42,7 @@ const ReadProfile: Type<string, Profile> = { | ||||||
|         } else { |         } else { | ||||||
|             throw new Error(`Unknown profile ${str}`) |             throw new Error(`Unknown profile ${str}`) | ||||||
|         } |         } | ||||||
|   }, |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const buildjs = command({ | const buildjs = command({ | ||||||
|  | @ -62,15 +52,15 @@ const buildjs = command({ | ||||||
|             type: ReadProfile, |             type: ReadProfile, | ||||||
|             long: "profile", |             long: "profile", | ||||||
|             short: "p", |             short: "p", | ||||||
|       defaultValue: () => "dev" as Profile, |             defaultValue: () => "dev" as Profile | ||||||
|     }), |         }) | ||||||
|     }, |     }, | ||||||
|     handler: ({profile}) => { |     handler: ({profile}) => { | ||||||
|         console.log("building js") |         console.log("building js") | ||||||
|         withPublishedWasm(profile, async (registryUrl: string) => { |         withPublishedWasm(profile, async (registryUrl: string) => { | ||||||
|             await buildAndPublishAutomergeJs(registryUrl) |             await buildAndPublishAutomergeJs(registryUrl) | ||||||
|         }) |         }) | ||||||
|   }, |     } | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
| const buildWasm = command({ | const buildWasm = command({ | ||||||
|  | @ -80,13 +70,15 @@ const buildWasm = command({ | ||||||
|             type: ReadProfile, |             type: ReadProfile, | ||||||
|             long: "profile", |             long: "profile", | ||||||
|             short: "p", |             short: "p", | ||||||
|       defaultValue: () => "dev" as Profile, |             defaultValue: () => "dev" as Profile | ||||||
|     }), |         }) | ||||||
|     }, |     }, | ||||||
|     handler: ({profile}) => { |     handler: ({profile}) => { | ||||||
|         console.log("building automerge-wasm") |         console.log("building automerge-wasm") | ||||||
|     withRegistry(buildAutomergeWasm(profile)) |         withRegistry( | ||||||
|   }, |             buildAutomergeWasm(profile), | ||||||
|  |         ) | ||||||
|  |     } | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
| const buildexamples = command({ | const buildexamples = command({ | ||||||
|  | @ -101,17 +93,18 @@ const buildexamples = command({ | ||||||
|             type: ReadProfile, |             type: ReadProfile, | ||||||
|             long: "profile", |             long: "profile", | ||||||
|             short: "p", |             short: "p", | ||||||
|       defaultValue: () => "dev" as Profile, |             defaultValue: () => "dev" as Profile | ||||||
|     }), |         }) | ||||||
|     }, |     }, | ||||||
|     handler: ({examples, profile}) => { |     handler: ({examples, profile}) => { | ||||||
|         if (examples.length === 0) { |         if (examples.length === 0) { | ||||||
|             examples = ["webpack", "vite", "create-react-app"] |             examples = ["webpack", "vite", "create-react-app"] | ||||||
|         } |         } | ||||||
|         buildExamples(examples, profile) |         buildExamples(examples, profile) | ||||||
|   }, |     } | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| const runRegistry = command({ | const runRegistry = command({ | ||||||
|     name: "run-registry", |     name: "run-registry", | ||||||
|     args: { |     args: { | ||||||
|  | @ -119,8 +112,8 @@ const runRegistry = command({ | ||||||
|             type: ReadProfile, |             type: ReadProfile, | ||||||
|             long: "profile", |             long: "profile", | ||||||
|             short: "p", |             short: "p", | ||||||
|       defaultValue: () => "dev" as Profile, |             defaultValue: () => "dev" as Profile | ||||||
|     }), |         }) | ||||||
|     }, |     }, | ||||||
|     handler: ({profile}) => { |     handler: ({profile}) => { | ||||||
|         withPublishedWasm(profile, async (registryUrl: string) => { |         withPublishedWasm(profile, async (registryUrl: string) => { | ||||||
|  | @ -133,23 +126,19 @@ const runRegistry = command({ | ||||||
|         }).catch(e => { |         }).catch(e => { | ||||||
|             console.error(`Failed: ${e}`) |             console.error(`Failed: ${e}`) | ||||||
|         }) |         }) | ||||||
|   }, |     } | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| const app = subcommands({ | const app = subcommands({ | ||||||
|     name: "e2e", |     name: "e2e", | ||||||
|   cmds: { |     cmds: {buildjs, buildexamples, buildwasm: buildWasm, "run-registry": runRegistry} | ||||||
|     buildjs, |  | ||||||
|     buildexamples, |  | ||||||
|     buildwasm: buildWasm, |  | ||||||
|     "run-registry": runRegistry, |  | ||||||
|   }, |  | ||||||
| }) | }) | ||||||
| 
 | 
 | ||||||
| run(app, process.argv.slice(2)) | run(app, process.argv.slice(2)) | ||||||
| 
 | 
 | ||||||
| async function buildExamples(examples: Array<Example>, profile: Profile) { | async function buildExamples(examples: Array<Example>, profile: Profile) { | ||||||
|   await withPublishedWasm(profile, async registryUrl => { |     await withPublishedWasm(profile, async (registryUrl) => { | ||||||
|         printHeader("building and publishing automerge") |         printHeader("building and publishing automerge") | ||||||
|         await buildAndPublishAutomergeJs(registryUrl)  |         await buildAndPublishAutomergeJs(registryUrl)  | ||||||
|         for (const example of examples) { |         for (const example of examples) { | ||||||
|  | @ -157,66 +146,21 @@ async function buildExamples(examples: Array<Example>, profile: Profile) { | ||||||
|             if (example === "webpack") { |             if (example === "webpack") { | ||||||
|                 const projectPath = path.join(EXAMPLES_DIR, example) |                 const projectPath = path.join(EXAMPLES_DIR, example) | ||||||
|                 await removeExistingAutomerge(projectPath) |                 await removeExistingAutomerge(projectPath) | ||||||
|         await fsPromises.rm(path.join(projectPath, "yarn.lock"), { |                 await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) | ||||||
|           force: true, |                 await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) | ||||||
|         }) |                 await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) | ||||||
|         await spawnAndWait( |  | ||||||
|           "yarn", |  | ||||||
|           [ |  | ||||||
|             "--cwd", |  | ||||||
|             projectPath, |  | ||||||
|             "install", |  | ||||||
|             "--registry", |  | ||||||
|             registryUrl, |  | ||||||
|             "--check-files", |  | ||||||
|           ], |  | ||||||
|           { stdio: "inherit" } |  | ||||||
|         ) |  | ||||||
|         await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { |  | ||||||
|           stdio: "inherit", |  | ||||||
|         }) |  | ||||||
|             } else if (example === "vite") { |             } else if (example === "vite") { | ||||||
|                 const projectPath = path.join(EXAMPLES_DIR, example) |                 const projectPath = path.join(EXAMPLES_DIR, example) | ||||||
|                 await removeExistingAutomerge(projectPath) |                 await removeExistingAutomerge(projectPath) | ||||||
|         await fsPromises.rm(path.join(projectPath, "yarn.lock"), { |                 await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) | ||||||
|           force: true, |                 await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) | ||||||
|         }) |                 await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) | ||||||
|         await spawnAndWait( |  | ||||||
|           "yarn", |  | ||||||
|           [ |  | ||||||
|             "--cwd", |  | ||||||
|             projectPath, |  | ||||||
|             "install", |  | ||||||
|             "--registry", |  | ||||||
|             registryUrl, |  | ||||||
|             "--check-files", |  | ||||||
|           ], |  | ||||||
|           { stdio: "inherit" } |  | ||||||
|         ) |  | ||||||
|         await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { |  | ||||||
|           stdio: "inherit", |  | ||||||
|         }) |  | ||||||
|             } else if (example === "create-react-app") { |             } else if (example === "create-react-app") { | ||||||
|                 const projectPath = path.join(EXAMPLES_DIR, example) |                 const projectPath = path.join(EXAMPLES_DIR, example) | ||||||
|                 await removeExistingAutomerge(projectPath) |                 await removeExistingAutomerge(projectPath) | ||||||
|         await fsPromises.rm(path.join(projectPath, "yarn.lock"), { |                 await fsPromises.rm(path.join(projectPath, "yarn.lock"), {force: true}) | ||||||
|           force: true, |                 await spawnAndWait("yarn", ["--cwd", projectPath, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) | ||||||
|         }) |                 await spawnAndWait("yarn", ["--cwd", projectPath, "build"], {stdio: "inherit"}) | ||||||
|         await spawnAndWait( |  | ||||||
|           "yarn", |  | ||||||
|           [ |  | ||||||
|             "--cwd", |  | ||||||
|             projectPath, |  | ||||||
|             "install", |  | ||||||
|             "--registry", |  | ||||||
|             registryUrl, |  | ||||||
|             "--check-files", |  | ||||||
|           ], |  | ||||||
|           { stdio: "inherit" } |  | ||||||
|         ) |  | ||||||
|         await spawnAndWait("yarn", ["--cwd", projectPath, "build"], { |  | ||||||
|           stdio: "inherit", |  | ||||||
|         }) |  | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     }) |     }) | ||||||
|  | @ -224,10 +168,7 @@ async function buildExamples(examples: Array<Example>, profile: Profile) { | ||||||
| 
 | 
 | ||||||
| type WithRegistryAction = (registryUrl: string) => Promise<void> | type WithRegistryAction = (registryUrl: string) => Promise<void> | ||||||
| 
 | 
 | ||||||
| async function withRegistry( | async function withRegistry(action: WithRegistryAction, ...actions: Array<WithRegistryAction>) { | ||||||
|   action: WithRegistryAction, |  | ||||||
|   ...actions: Array<WithRegistryAction> |  | ||||||
| ) { |  | ||||||
|     // First, start verdaccio
 |     // First, start verdaccio
 | ||||||
|     printHeader("Starting verdaccio NPM server") |     printHeader("Starting verdaccio NPM server") | ||||||
|     const verd = await VerdaccioProcess.start() |     const verd = await VerdaccioProcess.start() | ||||||
|  | @ -257,30 +198,25 @@ async function withRegistry( | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| async function withPublishedWasm(profile: Profile, action: WithRegistryAction) { | async function withPublishedWasm(profile: Profile, action: WithRegistryAction) { | ||||||
|   await withRegistry(buildAutomergeWasm(profile), publishAutomergeWasm, action) |     await withRegistry( | ||||||
|  |         buildAutomergeWasm(profile), | ||||||
|  |         publishAutomergeWasm, | ||||||
|  |         action | ||||||
|  |     ) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| function buildAutomergeWasm(profile: Profile): WithRegistryAction { | function buildAutomergeWasm(profile: Profile): WithRegistryAction { | ||||||
|     return async (registryUrl: string) => { |     return async (registryUrl: string) => { | ||||||
|         printHeader("building automerge-wasm") |         printHeader("building automerge-wasm") | ||||||
|     await spawnAndWait( |         await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, "--registry", registryUrl, "install"], {stdio: "inherit"}) | ||||||
|       "yarn", |  | ||||||
|       ["--cwd", AUTOMERGE_WASM_PATH, "--registry", registryUrl, "install"], |  | ||||||
|       { stdio: "inherit" } |  | ||||||
|     ) |  | ||||||
|         const cmd = profile === "release" ? "release" : "debug" |         const cmd = profile === "release" ? "release" : "debug" | ||||||
|     await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, cmd], { |         await spawnAndWait("yarn", ["--cwd", AUTOMERGE_WASM_PATH, cmd], {stdio: "inherit"}) | ||||||
|       stdio: "inherit", |  | ||||||
|     }) |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| async function publishAutomergeWasm(registryUrl: string) { | async function publishAutomergeWasm(registryUrl: string) { | ||||||
|     printHeader("Publishing automerge-wasm to verdaccio") |     printHeader("Publishing automerge-wasm to verdaccio") | ||||||
|   await fsPromises.rm( |     await fsPromises.rm(path.join(VERDACCIO_DB_PATH, "@automerge/automerge-wasm"), { recursive: true, force: true} ) | ||||||
|     path.join(VERDACCIO_DB_PATH, "@automerge/automerge-wasm"), |  | ||||||
|     { recursive: true, force: true } |  | ||||||
|   ) |  | ||||||
|     await yarnPublish(registryUrl, AUTOMERGE_WASM_PATH) |     await yarnPublish(registryUrl, AUTOMERGE_WASM_PATH) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -289,24 +225,9 @@ async function buildAndPublishAutomergeJs(registryUrl: string) { | ||||||
|     printHeader("Building automerge") |     printHeader("Building automerge") | ||||||
|     await removeExistingAutomerge(AUTOMERGE_JS_PATH) |     await removeExistingAutomerge(AUTOMERGE_JS_PATH) | ||||||
|     await removeFromVerdaccio("@automerge/automerge") |     await removeFromVerdaccio("@automerge/automerge") | ||||||
|   await fsPromises.rm(path.join(AUTOMERGE_JS_PATH, "yarn.lock"), { |     await fsPromises.rm(path.join(AUTOMERGE_JS_PATH, "yarn.lock"), {force: true}) | ||||||
|     force: true, |     await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "install", "--registry", registryUrl, "--check-files"], {stdio: "inherit"}) | ||||||
|   }) |     await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "build"], {stdio: "inherit"}) | ||||||
|   await spawnAndWait( |  | ||||||
|     "yarn", |  | ||||||
|     [ |  | ||||||
|       "--cwd", |  | ||||||
|       AUTOMERGE_JS_PATH, |  | ||||||
|       "install", |  | ||||||
|       "--registry", |  | ||||||
|       registryUrl, |  | ||||||
|       "--check-files", |  | ||||||
|     ], |  | ||||||
|     { stdio: "inherit" } |  | ||||||
|   ) |  | ||||||
|   await spawnAndWait("yarn", ["--cwd", AUTOMERGE_JS_PATH, "build"], { |  | ||||||
|     stdio: "inherit", |  | ||||||
|   }) |  | ||||||
|     await yarnPublish(registryUrl, AUTOMERGE_JS_PATH) |     await yarnPublish(registryUrl, AUTOMERGE_JS_PATH) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -325,10 +246,8 @@ class VerdaccioProcess { | ||||||
|         // Collect stdout/stderr otherwise the subprocess gets blocked writing
 |         // Collect stdout/stderr otherwise the subprocess gets blocked writing
 | ||||||
|         this.stdout = [] |         this.stdout = [] | ||||||
|         this.stderr = [] |         this.stderr = [] | ||||||
|     this.child.stdout && |         this.child.stdout && this.child.stdout.on("data", (data) => this.stdout.push(data)) | ||||||
|       this.child.stdout.on("data", data => this.stdout.push(data)) |         this.child.stderr && this.child.stderr.on("data", (data) => this.stderr.push(data)) | ||||||
|     this.child.stderr && |  | ||||||
|       this.child.stderr.on("data", data => this.stderr.push(data)) |  | ||||||
| 
 | 
 | ||||||
|         const errCallback = (e: any) => { |         const errCallback = (e: any) => { | ||||||
|             console.error("!!!!!!!!!ERROR IN VERDACCIO PROCESS!!!!!!!!!") |             console.error("!!!!!!!!!ERROR IN VERDACCIO PROCESS!!!!!!!!!") | ||||||
|  | @ -355,11 +274,7 @@ class VerdaccioProcess { | ||||||
|      * The returned `VerdaccioProcess` can be used to control the subprocess |      * The returned `VerdaccioProcess` can be used to control the subprocess | ||||||
|      */ |      */ | ||||||
|     static async start() { |     static async start() { | ||||||
|     const child = spawn( |         const child = spawn("yarn", ["verdaccio", "--config", VERDACCIO_CONFIG_PATH], {env: { ...process.env, FORCE_COLOR: "true"}}) | ||||||
|       "yarn", |  | ||||||
|       ["verdaccio", "--config", VERDACCIO_CONFIG_PATH], |  | ||||||
|       { env: { ...process.env, FORCE_COLOR: "true" } } |  | ||||||
|     ) |  | ||||||
| 
 | 
 | ||||||
|         // Forward stdout and stderr whilst waiting for startup to complete
 |         // Forward stdout and stderr whilst waiting for startup to complete
 | ||||||
|         const stdoutCallback = (data: Buffer) => process.stdout.write(data) |         const stdoutCallback = (data: Buffer) => process.stdout.write(data) | ||||||
|  | @ -396,7 +311,7 @@ class VerdaccioProcess { | ||||||
|     async kill() { |     async kill() { | ||||||
|         this.child.stdout && this.child.stdout.destroy() |         this.child.stdout && this.child.stdout.destroy() | ||||||
|         this.child.stderr && this.child.stderr.destroy() |         this.child.stderr && this.child.stderr.destroy() | ||||||
|     this.child.kill() |         this.child.kill(); | ||||||
|         try { |         try { | ||||||
|             await withTimeout(once(this.child, "close"), 500) |             await withTimeout(once(this.child, "close"), 500) | ||||||
|         } catch (e) { |         } catch (e) { | ||||||
|  | @ -432,26 +347,16 @@ function printHeader(header: string) { | ||||||
|  * @param packageDir - The directory containing the package.json of the target project |  * @param packageDir - The directory containing the package.json of the target project | ||||||
|  */ |  */ | ||||||
| async function removeExistingAutomerge(packageDir: string) { | async function removeExistingAutomerge(packageDir: string) { | ||||||
|   await fsPromises.rm(path.join(packageDir, "node_modules", "@automerge"), { |     await fsPromises.rm(path.join(packageDir, "node_modules", "@automerge"), {recursive: true, force: true}) | ||||||
|     recursive: true, |     await fsPromises.rm(path.join(packageDir, "node_modules", "automerge"), {recursive: true, force: true}) | ||||||
|     force: true, |  | ||||||
|   }) |  | ||||||
|   await fsPromises.rm(path.join(packageDir, "node_modules", "automerge"), { |  | ||||||
|     recursive: true, |  | ||||||
|     force: true, |  | ||||||
|   }) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type SpawnResult = { | type SpawnResult = { | ||||||
|   stdout?: Buffer |     stdout?: Buffer, | ||||||
|   stderr?: Buffer |     stderr?: Buffer, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| async function spawnAndWait( | async function spawnAndWait(cmd: string, args: Array<string>, options: child_process.SpawnOptions): Promise<SpawnResult> { | ||||||
|   cmd: string, |  | ||||||
|   args: Array<string>, |  | ||||||
|   options: child_process.SpawnOptions |  | ||||||
| ): Promise<SpawnResult> { |  | ||||||
|     const child = spawn(cmd, args, options) |     const child = spawn(cmd, args, options) | ||||||
|     let stdout = null |     let stdout = null | ||||||
|     let stderr = null |     let stderr = null | ||||||
|  | @ -470,7 +375,7 @@ async function spawnAndWait( | ||||||
|     } |     } | ||||||
|     return { |     return { | ||||||
|         stderr: stderr? Buffer.concat(stderr) : null, |         stderr: stderr? Buffer.concat(stderr) : null, | ||||||
|     stdout: stdout ? Buffer.concat(stdout) : null, |         stdout: stdout ? Buffer.concat(stdout) : null | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -482,27 +387,29 @@ async function spawnAndWait( | ||||||
|  * okay I Promise. |  * okay I Promise. | ||||||
|  */ |  */ | ||||||
| async function removeFromVerdaccio(packageName: string) { | async function removeFromVerdaccio(packageName: string) { | ||||||
|   await fsPromises.rm(path.join(VERDACCIO_DB_PATH, packageName), { |     await fsPromises.rm(path.join(VERDACCIO_DB_PATH, packageName), {force: true, recursive: true}) | ||||||
|     force: true, |  | ||||||
|     recursive: true, |  | ||||||
|   }) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| async function yarnPublish(registryUrl: string, cwd: string) { | async function yarnPublish(registryUrl: string, cwd: string) { | ||||||
|     await spawnAndWait( |     await spawnAndWait( | ||||||
|         "yarn", |         "yarn", | ||||||
|     ["--registry", registryUrl, "--cwd", cwd, "publish", "--non-interactive"], |         [ | ||||||
|  |             "--registry", | ||||||
|  |             registryUrl, | ||||||
|  |             "--cwd", | ||||||
|  |             cwd, | ||||||
|  |             "publish", | ||||||
|  |             "--non-interactive", | ||||||
|  |         ], | ||||||
|         { |         { | ||||||
|             stdio: "inherit", |             stdio: "inherit", | ||||||
|             env: { |             env: { | ||||||
|                 ...process.env, |                 ...process.env, | ||||||
|                 FORCE_COLOR: "true", |                 FORCE_COLOR: "true", | ||||||
|                 // This is a fake token, it just has to be the right format
 |                 // This is a fake token, it just has to be the right format
 | ||||||
|         npm_config__auth: |                 npm_config__auth: "//localhost:4873/:_authToken=Gp2Mgxm4faa/7wp0dMSuRA==" | ||||||
|           "//localhost:4873/:_authToken=Gp2Mgxm4faa/7wp0dMSuRA==", |  | ||||||
|       }, |  | ||||||
|             } |             } | ||||||
|   ) |         }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  | @ -512,10 +419,7 @@ async function yarnPublish(registryUrl: string, cwd: string) { | ||||||
|  * @param promise - the promise to wait for @param timeout - the delay in |  * @param promise - the promise to wait for @param timeout - the delay in | ||||||
|  * milliseconds to wait before throwing |  * milliseconds to wait before throwing | ||||||
|  */ |  */ | ||||||
| async function withTimeout<T>( | async function withTimeout<T>(promise: Promise<T>, timeout: number): Promise<T> { | ||||||
|   promise: Promise<T>, |  | ||||||
|   timeout: number |  | ||||||
| ): Promise<T> { |  | ||||||
|     type Step = "timed-out" | {result: T} |     type Step = "timed-out" | {result: T} | ||||||
|     const timedOut: () => Promise<Step> = async () => { |     const timedOut: () => Promise<Step> = async () => { | ||||||
|         await setTimeout(timeout) |         await setTimeout(timeout) | ||||||
|  |  | ||||||
|  | @ -54,6 +54,6 @@ In the root of the project add the following contents to `craco.config.js` | ||||||
| const cracoWasm = require("craco-wasm") | const cracoWasm = require("craco-wasm") | ||||||
| 
 | 
 | ||||||
| module.exports = { | module.exports = { | ||||||
|   plugins: [cracoWasm()], |     plugins: [cracoWasm()] | ||||||
| } | } | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
|  | @ -1,5 +1,5 @@ | ||||||
| const cracoWasm = require("craco-wasm") | const cracoWasm = require("craco-wasm") | ||||||
| 
 | 
 | ||||||
| module.exports = { | module.exports = { | ||||||
|   plugins: [cracoWasm()], |     plugins: [cracoWasm()] | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,11 +1,12 @@ | ||||||
| import * as Automerge from "@automerge/automerge" | import * as Automerge from "@automerge/automerge" | ||||||
| import logo from "./logo.svg" | import logo from './logo.svg'; | ||||||
| import "./App.css" | import './App.css'; | ||||||
| 
 | 
 | ||||||
| let doc = Automerge.init() | let doc = Automerge.init() | ||||||
| doc = Automerge.change(doc, d => (d.hello = "from automerge")) | doc = Automerge.change(doc, (d) => d.hello = "from automerge") | ||||||
| const result = JSON.stringify(doc) | const result = JSON.stringify(doc) | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| function App() { | function App() { | ||||||
|   return ( |   return ( | ||||||
|     <div className="App"> |     <div className="App"> | ||||||
|  | @ -14,7 +15,7 @@ function App() { | ||||||
|         <p>{result}</p> |         <p>{result}</p> | ||||||
|       </header> |       </header> | ||||||
|     </div> |     </div> | ||||||
|   ) |   ); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| export default App | export default App; | ||||||
|  |  | ||||||
|  | @ -1,8 +1,8 @@ | ||||||
| import { render, screen } from "@testing-library/react" | import { render, screen } from '@testing-library/react'; | ||||||
| import App from "./App" | import App from './App'; | ||||||
| 
 | 
 | ||||||
| test("renders learn react link", () => { | test('renders learn react link', () => { | ||||||
|   render(<App />) |   render(<App />); | ||||||
|   const linkElement = screen.getByText(/learn react/i) |   const linkElement = screen.getByText(/learn react/i); | ||||||
|   expect(linkElement).toBeInTheDocument() |   expect(linkElement).toBeInTheDocument(); | ||||||
| }) | }); | ||||||
|  |  | ||||||
|  | @ -1,13 +1,13 @@ | ||||||
| body { | body { | ||||||
|   margin: 0; |   margin: 0; | ||||||
|   font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", |   font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', | ||||||
|     "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", |     'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', | ||||||
|     sans-serif; |     sans-serif; | ||||||
|   -webkit-font-smoothing: antialiased; |   -webkit-font-smoothing: antialiased; | ||||||
|   -moz-osx-font-smoothing: grayscale; |   -moz-osx-font-smoothing: grayscale; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| code { | code { | ||||||
|   font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", |   font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', | ||||||
|     monospace; |     monospace; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,17 +1,17 @@ | ||||||
| import React from "react" | import React from 'react'; | ||||||
| import ReactDOM from "react-dom/client" | import ReactDOM from 'react-dom/client'; | ||||||
| import "./index.css" | import './index.css'; | ||||||
| import App from "./App" | import App from './App'; | ||||||
| import reportWebVitals from "./reportWebVitals" | import reportWebVitals from './reportWebVitals'; | ||||||
| 
 | 
 | ||||||
| const root = ReactDOM.createRoot(document.getElementById("root")) | const root = ReactDOM.createRoot(document.getElementById('root')); | ||||||
| root.render( | root.render( | ||||||
|   <React.StrictMode> |   <React.StrictMode> | ||||||
|     <App /> |     <App /> | ||||||
|   </React.StrictMode> |   </React.StrictMode> | ||||||
| ) | ); | ||||||
| 
 | 
 | ||||||
| // If you want to start measuring performance in your app, pass a function
 | // If you want to start measuring performance in your app, pass a function
 | ||||||
| // to log results (for example: reportWebVitals(console.log))
 | // to log results (for example: reportWebVitals(console.log))
 | ||||||
| // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
 | ||||||
| reportWebVitals() | reportWebVitals(); | ||||||
|  |  | ||||||
|  | @ -1,13 +1,13 @@ | ||||||
| const reportWebVitals = onPerfEntry => { | const reportWebVitals = onPerfEntry => { | ||||||
|   if (onPerfEntry && onPerfEntry instanceof Function) { |   if (onPerfEntry && onPerfEntry instanceof Function) { | ||||||
|     import("web-vitals").then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { |     import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { | ||||||
|       getCLS(onPerfEntry) |       getCLS(onPerfEntry); | ||||||
|       getFID(onPerfEntry) |       getFID(onPerfEntry); | ||||||
|       getFCP(onPerfEntry) |       getFCP(onPerfEntry); | ||||||
|       getLCP(onPerfEntry) |       getLCP(onPerfEntry); | ||||||
|       getTTFB(onPerfEntry) |       getTTFB(onPerfEntry); | ||||||
|     }) |     }); | ||||||
|   } |  | ||||||
|   } |   } | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| export default reportWebVitals | export default reportWebVitals; | ||||||
|  |  | ||||||
|  | @ -2,4 +2,4 @@ | ||||||
| // allows you to do things like:
 | // allows you to do things like:
 | ||||||
| // expect(element).toHaveTextContent(/react/i)
 | // expect(element).toHaveTextContent(/react/i)
 | ||||||
| // learn more: https://github.com/testing-library/jest-dom
 | // learn more: https://github.com/testing-library/jest-dom
 | ||||||
| import "@testing-library/jest-dom" | import '@testing-library/jest-dom'; | ||||||
|  |  | ||||||
|  | @ -24,17 +24,17 @@ | ||||||
|     jsonpointer "^5.0.0" |     jsonpointer "^5.0.0" | ||||||
|     leven "^3.1.0" |     leven "^3.1.0" | ||||||
| 
 | 
 | ||||||
| "@automerge/automerge-wasm@0.1.12": | "@automerge/automerge-wasm@0.1.9": | ||||||
|   version "0.1.12" |   version "0.1.9" | ||||||
|   resolved "https://registry.yarnpkg.com/@automerge/automerge-wasm/-/automerge-wasm-0.1.12.tgz#8ce25255d95d4ed6fb387de6858f7b7b7e2ed4a9" |   resolved "http://localhost:4873/@automerge%2fautomerge-wasm/-/automerge-wasm-0.1.9.tgz#b2def5e8b643f1802bc696843b7755dc444dc2eb" | ||||||
|   integrity sha512-/xjX1217QYJ+QaoT6iHQw4hGNUIoc3xc65c9eCnfX5v9J9BkTOl05p2Cnr51O2rPc/M6TqZLmlvpvNVdcH9JpA== |   integrity sha512-S+sjJUJ3aPn2F37vKYAzKxz8CDgbHpOOGVjKSgkLjkAqe1pQ+wp4BpiELXafX73w8DVIrGx1zzru4w3t+Eo8gw== | ||||||
| 
 | 
 | ||||||
| "@automerge/automerge@2.0.0-alpha.7": | "@automerge/automerge@2.0.0-alpha.4": | ||||||
|   version "2.0.0-alpha.7" |   version "2.0.0-alpha.4" | ||||||
|   resolved "https://registry.yarnpkg.com/@automerge/automerge/-/automerge-2.0.0-alpha.7.tgz#2ee220d51bcd796074a18af74eeabb5f177e1f36" |   resolved "http://localhost:4873/@automerge%2fautomerge/-/automerge-2.0.0-alpha.4.tgz#df406f5364960a4d21040044da55ebd47406ea3a" | ||||||
|   integrity sha512-Wd2/GNeqtBybUtXclEE7bWBmmEkhv3q2ITQmLh18V0VvMPbqMBpcOKYzQFnKCyiPyRe5XcYeQAyGyunhE5V0ug== |   integrity sha512-PVRD1dmLy0U4GttyMvlWr99wyr6xvskJbOkxJDHnp+W2VAFfcqa4QKouaFbJ4W3iIsYX8DfQJ+uhRxa6UnvkHg== | ||||||
|   dependencies: |   dependencies: | ||||||
|     "@automerge/automerge-wasm" "0.1.12" |     "@automerge/automerge-wasm" "0.1.9" | ||||||
|     uuid "^8.3" |     uuid "^8.3" | ||||||
| 
 | 
 | ||||||
| "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.18.6", "@babel/code-frame@^7.8.3": | "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.18.6", "@babel/code-frame@^7.8.3": | ||||||
|  | @ -2827,7 +2827,7 @@ bfj@^7.0.2: | ||||||
| 
 | 
 | ||||||
| big.js@^5.2.2: | big.js@^5.2.2: | ||||||
|   version "5.2.2" |   version "5.2.2" | ||||||
|   resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" |   resolved "http://localhost:4873/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" | ||||||
|   integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== |   integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== | ||||||
| 
 | 
 | ||||||
| binary-extensions@^2.0.0: | binary-extensions@^2.0.0: | ||||||
|  | @ -3817,7 +3817,7 @@ emoji-regex@^9.2.2: | ||||||
| 
 | 
 | ||||||
| emojis-list@^3.0.0: | emojis-list@^3.0.0: | ||||||
|   version "3.0.0" |   version "3.0.0" | ||||||
|   resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" |   resolved "http://localhost:4873/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" | ||||||
|   integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== |   integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== | ||||||
| 
 | 
 | ||||||
| encodeurl@~1.0.2: | encodeurl@~1.0.2: | ||||||
|  | @ -5845,9 +5845,9 @@ json-stable-stringify-without-jsonify@^1.0.1: | ||||||
|   integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== |   integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== | ||||||
| 
 | 
 | ||||||
| json5@^1.0.1: | json5@^1.0.1: | ||||||
|   version "1.0.2" |   version "1.0.1" | ||||||
|   resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" |   resolved "http://localhost:4873/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" | ||||||
|   integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA== |   integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== | ||||||
|   dependencies: |   dependencies: | ||||||
|     minimist "^1.2.0" |     minimist "^1.2.0" | ||||||
| 
 | 
 | ||||||
|  | @ -5942,9 +5942,9 @@ loader-runner@^4.2.0: | ||||||
|   integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg== |   integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg== | ||||||
| 
 | 
 | ||||||
| loader-utils@^2.0.0: | loader-utils@^2.0.0: | ||||||
|   version "2.0.4" |   version "2.0.2" | ||||||
|   resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.4.tgz#8b5cb38b5c34a9a018ee1fc0e6a066d1dfcc528c" |   resolved "http://localhost:4873/loader-utils/-/loader-utils-2.0.2.tgz#d6e3b4fb81870721ae4e0868ab11dd638368c129" | ||||||
|   integrity sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw== |   integrity sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A== | ||||||
|   dependencies: |   dependencies: | ||||||
|     big.js "^5.2.2" |     big.js "^5.2.2" | ||||||
|     emojis-list "^3.0.0" |     emojis-list "^3.0.0" | ||||||
|  | @ -6165,9 +6165,9 @@ minimatch@^5.0.1: | ||||||
|     brace-expansion "^2.0.1" |     brace-expansion "^2.0.1" | ||||||
| 
 | 
 | ||||||
| minimist@^1.2.0, minimist@^1.2.6: | minimist@^1.2.0, minimist@^1.2.6: | ||||||
|   version "1.2.7" |   version "1.2.6" | ||||||
|   resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" |   resolved "http://localhost:4873/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" | ||||||
|   integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== |   integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== | ||||||
| 
 | 
 | ||||||
| mkdirp@~0.5.1: | mkdirp@~0.5.1: | ||||||
|   version "0.5.6" |   version "0.5.6" | ||||||
|  |  | ||||||
|  | @ -7,7 +7,6 @@ There are three things you need to do to get WASM packaging working with vite: | ||||||
| 3. Exclude `automerge-wasm` from the optimizer | 3. Exclude `automerge-wasm` from the optimizer | ||||||
| 
 | 
 | ||||||
| First, install the packages we need: | First, install the packages we need: | ||||||
| 
 |  | ||||||
| ```bash | ```bash | ||||||
| yarn add vite-plugin-top-level-await | yarn add vite-plugin-top-level-await | ||||||
| yarn add vite-plugin-wasm | yarn add vite-plugin-wasm | ||||||
|  | @ -27,7 +26,7 @@ export default defineConfig({ | ||||||
|     // documented in https://vitejs.dev/guide/features.html#import-with-constructors |     // documented in https://vitejs.dev/guide/features.html#import-with-constructors | ||||||
|     worker: {  |     worker: {  | ||||||
|         format: "es", |         format: "es", | ||||||
|     plugins: [topLevelAwait(), wasm()], |         plugins: [topLevelAwait(), wasm()]  | ||||||
|     }, |     }, | ||||||
| 
 | 
 | ||||||
|     optimizeDeps: { |     optimizeDeps: { | ||||||
|  | @ -35,8 +34,8 @@ export default defineConfig({ | ||||||
|         // versions of the JS wrapper. This causes problems because the JS |         // versions of the JS wrapper. This causes problems because the JS | ||||||
|         // wrapper has a module level variable to track JS side heap |         // wrapper has a module level variable to track JS side heap | ||||||
|         // allocations, initializing this twice causes horrible breakage |         // allocations, initializing this twice causes horrible breakage | ||||||
|     exclude: ["@automerge/automerge-wasm"], |         exclude: ["@automerge/automerge-wasm"] | ||||||
|   }, |     } | ||||||
| }) | }) | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
|  | @ -52,3 +51,4 @@ yarn vite | ||||||
| yarn install | yarn install | ||||||
| yarn dev | yarn dev | ||||||
| ``` | ``` | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | @ -1,15 +1,15 @@ | ||||||
| import * as Automerge from "/node_modules/.vite/deps/automerge-js.js?v=6e973f28" | import * as Automerge from "/node_modules/.vite/deps/automerge-js.js?v=6e973f28"; | ||||||
| console.log(Automerge) | console.log(Automerge); | ||||||
| let doc = Automerge.init() | let doc = Automerge.init(); | ||||||
| doc = Automerge.change(doc, d => (d.hello = "from automerge-js")) | doc = Automerge.change(doc, (d) => d.hello = "from automerge-js"); | ||||||
| console.log(doc) | console.log(doc); | ||||||
| const result = JSON.stringify(doc) | const result = JSON.stringify(doc); | ||||||
| if (typeof document !== "undefined") { | if (typeof document !== "undefined") { | ||||||
|   const element = document.createElement("div") |   const element = document.createElement("div"); | ||||||
|   element.innerHTML = JSON.stringify(result) |   element.innerHTML = JSON.stringify(result); | ||||||
|   document.body.appendChild(element) |   document.body.appendChild(element); | ||||||
| } else { | } else { | ||||||
|   console.log("node:", result) |   console.log("node:", result); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi9ob21lL2FsZXgvUHJvamVjdHMvYXV0b21lcmdlL2F1dG9tZXJnZS1ycy9hdXRvbWVyZ2UtanMvZXhhbXBsZXMvdml0ZS9zcmMvbWFpbi50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgKiBhcyBBdXRvbWVyZ2UgZnJvbSBcImF1dG9tZXJnZS1qc1wiXG5cbi8vIGhlbGxvIHdvcmxkIGNvZGUgdGhhdCB3aWxsIHJ1biBjb3JyZWN0bHkgb24gd2ViIG9yIG5vZGVcblxuY29uc29sZS5sb2coQXV0b21lcmdlKVxubGV0IGRvYyA9IEF1dG9tZXJnZS5pbml0KClcbmRvYyA9IEF1dG9tZXJnZS5jaGFuZ2UoZG9jLCAoZDogYW55KSA9PiBkLmhlbGxvID0gXCJmcm9tIGF1dG9tZXJnZS1qc1wiKVxuY29uc29sZS5sb2coZG9jKVxuY29uc3QgcmVzdWx0ID0gSlNPTi5zdHJpbmdpZnkoZG9jKVxuXG5pZiAodHlwZW9mIGRvY3VtZW50ICE9PSAndW5kZWZpbmVkJykge1xuICAgIC8vIGJyb3dzZXJcbiAgICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnZGl2Jyk7XG4gICAgZWxlbWVudC5pbm5lckhUTUwgPSBKU09OLnN0cmluZ2lmeShyZXN1bHQpXG4gICAgZG9jdW1lbnQuYm9keS5hcHBlbmRDaGlsZChlbGVtZW50KTtcbn0gZWxzZSB7XG4gICAgLy8gc2VydmVyXG4gICAgY29uc29sZS5sb2coXCJub2RlOlwiLCByZXN1bHQpXG59XG5cbiJdLCJtYXBwaW5ncyI6IkFBQUEsWUFBWSxlQUFlO0FBSTNCLFFBQVEsSUFBSSxTQUFTO0FBQ3JCLElBQUksTUFBTSxVQUFVLEtBQUs7QUFDekIsTUFBTSxVQUFVLE9BQU8sS0FBSyxDQUFDLE1BQVcsRUFBRSxRQUFRLG1CQUFtQjtBQUNyRSxRQUFRLElBQUksR0FBRztBQUNmLE1BQU0sU0FBUyxLQUFLLFVBQVUsR0FBRztBQUVqQyxJQUFJLE9BQU8sYUFBYSxhQUFhO0FBRWpDLFFBQU0sVUFBVSxTQUFTLGNBQWMsS0FBSztBQUM1QyxVQUFRLFlBQVksS0FBSyxVQUFVLE1BQU07QUFDekMsV0FBUyxLQUFLLFlBQVksT0FBTztBQUNyQyxPQUFPO0FBRUgsVUFBUSxJQUFJLFNBQVMsTUFBTTtBQUMvQjsiLCJuYW1lcyI6W119
 | //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi9ob21lL2FsZXgvUHJvamVjdHMvYXV0b21lcmdlL2F1dG9tZXJnZS1ycy9hdXRvbWVyZ2UtanMvZXhhbXBsZXMvdml0ZS9zcmMvbWFpbi50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgKiBhcyBBdXRvbWVyZ2UgZnJvbSBcImF1dG9tZXJnZS1qc1wiXG5cbi8vIGhlbGxvIHdvcmxkIGNvZGUgdGhhdCB3aWxsIHJ1biBjb3JyZWN0bHkgb24gd2ViIG9yIG5vZGVcblxuY29uc29sZS5sb2coQXV0b21lcmdlKVxubGV0IGRvYyA9IEF1dG9tZXJnZS5pbml0KClcbmRvYyA9IEF1dG9tZXJnZS5jaGFuZ2UoZG9jLCAoZDogYW55KSA9PiBkLmhlbGxvID0gXCJmcm9tIGF1dG9tZXJnZS1qc1wiKVxuY29uc29sZS5sb2coZG9jKVxuY29uc3QgcmVzdWx0ID0gSlNPTi5zdHJpbmdpZnkoZG9jKVxuXG5pZiAodHlwZW9mIGRvY3VtZW50ICE9PSAndW5kZWZpbmVkJykge1xuICAgIC8vIGJyb3dzZXJcbiAgICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnZGl2Jyk7XG4gICAgZWxlbWVudC5pbm5lckhUTUwgPSBKU09OLnN0cmluZ2lmeShyZXN1bHQpXG4gICAgZG9jdW1lbnQuYm9keS5hcHBlbmRDaGlsZChlbGVtZW50KTtcbn0gZWxzZSB7XG4gICAgLy8gc2VydmVyXG4gICAgY29uc29sZS5sb2coXCJub2RlOlwiLCByZXN1bHQpXG59XG5cbiJdLCJtYXBwaW5ncyI6IkFBQUEsWUFBWSxlQUFlO0FBSTNCLFFBQVEsSUFBSSxTQUFTO0FBQ3JCLElBQUksTUFBTSxVQUFVLEtBQUs7QUFDekIsTUFBTSxVQUFVLE9BQU8sS0FBSyxDQUFDLE1BQVcsRUFBRSxRQUFRLG1CQUFtQjtBQUNyRSxRQUFRLElBQUksR0FBRztBQUNmLE1BQU0sU0FBUyxLQUFLLFVBQVUsR0FBRztBQUVqQyxJQUFJLE9BQU8sYUFBYSxhQUFhO0FBRWpDLFFBQU0sVUFBVSxTQUFTLGNBQWMsS0FBSztBQUM1QyxVQUFRLFlBQVksS0FBSyxVQUFVLE1BQU07QUFDekMsV0FBUyxLQUFLLFlBQVksT0FBTztBQUNyQyxPQUFPO0FBRUgsVUFBUSxJQUFJLFNBQVMsTUFBTTtBQUMvQjsiLCJuYW1lcyI6W119
 | ||||||
|  | @ -4,6 +4,6 @@ export function setupCounter(element: HTMLButtonElement) { | ||||||
|     counter = count |     counter = count | ||||||
|     element.innerHTML = `count is ${counter}` |     element.innerHTML = `count is ${counter}` | ||||||
|   } |   } | ||||||
|   element.addEventListener("click", () => setCounter(++counter)) |   element.addEventListener('click', () => setCounter(++counter)) | ||||||
|   setCounter(0) |   setCounter(0) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -3,15 +3,16 @@ import * as Automerge from "@automerge/automerge" | ||||||
| // hello world code that will run correctly on web or node
 | // hello world code that will run correctly on web or node
 | ||||||
| 
 | 
 | ||||||
| let doc = Automerge.init() | let doc = Automerge.init() | ||||||
| doc = Automerge.change(doc, (d: any) => (d.hello = "from automerge")) | doc = Automerge.change(doc, (d: any) => d.hello = "from automerge") | ||||||
| const result = JSON.stringify(doc) | const result = JSON.stringify(doc) | ||||||
| 
 | 
 | ||||||
| if (typeof document !== "undefined") { | if (typeof document !== 'undefined') { | ||||||
|     // browser
 |     // browser
 | ||||||
|   const element = document.createElement("div") |     const element = document.createElement('div'); | ||||||
|     element.innerHTML = JSON.stringify(result) |     element.innerHTML = JSON.stringify(result) | ||||||
|   document.body.appendChild(element) |     document.body.appendChild(element); | ||||||
| } else { | } else { | ||||||
|     // server
 |     // server
 | ||||||
|     console.log("node:", result) |     console.log("node:", result) | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | @ -9,7 +9,7 @@ export default defineConfig({ | ||||||
|     // documented in https://vitejs.dev/guide/features.html#import-with-constructors
 |     // documented in https://vitejs.dev/guide/features.html#import-with-constructors
 | ||||||
|     worker: {  |     worker: {  | ||||||
|         format: "es", |         format: "es", | ||||||
|     plugins: [topLevelAwait(), wasm()], |         plugins: [topLevelAwait(), wasm()]  | ||||||
|     }, |     }, | ||||||
| 
 | 
 | ||||||
|     optimizeDeps: { |     optimizeDeps: { | ||||||
|  | @ -17,6 +17,6 @@ export default defineConfig({ | ||||||
|         // versions of the JS wrapper. This causes problems because the JS
 |         // versions of the JS wrapper. This causes problems because the JS
 | ||||||
|         // wrapper has a module level variable to track JS side heap
 |         // wrapper has a module level variable to track JS side heap
 | ||||||
|         // allocations, initializing this twice causes horrible breakage
 |         // allocations, initializing this twice causes horrible breakage
 | ||||||
|     exclude: ["@automerge/automerge-wasm"], |         exclude: ["@automerge/automerge-wasm"] | ||||||
|   }, |     } | ||||||
| }) | }) | ||||||
|  |  | ||||||
|  | @ -1,34 +1,36 @@ | ||||||
| # Webpack + Automerge | # Webpack + Automerge | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| Getting WASM working in webpack 5 is very easy. You just need to enable the | Getting WASM working in webpack 5 is very easy. You just need to enable the | ||||||
| `asyncWebAssembly` | `asyncWebAssembly` | ||||||
| [experiment](https://webpack.js.org/configuration/experiments/). For example: | [experiment](https://webpack.js.org/configuration/experiments/). For example: | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| ```javascript | ```javascript | ||||||
| const path = require("path") | const path = require('path'); | ||||||
| 
 | 
 | ||||||
| const clientConfig = { | const clientConfig = { | ||||||
|   experiments: { asyncWebAssembly: true }, |   experiments: { asyncWebAssembly: true }, | ||||||
|   target: "web", |   target: 'web', | ||||||
|   entry: "./src/index.js", |   entry: './src/index.js', | ||||||
|   output: { |   output: { | ||||||
|     filename: "main.js", |     filename: 'main.js', | ||||||
|     path: path.resolve(__dirname, "public"), |     path: path.resolve(__dirname, 'public'), | ||||||
|   }, |   }, | ||||||
|   mode: "development", // or production |   mode: "development", // or production | ||||||
|   performance: { |   performance: {       // we dont want the wasm blob to generate warnings | ||||||
|     // we dont want the wasm blob to generate warnings |  | ||||||
|      hints: false, |      hints: false, | ||||||
|      maxEntrypointSize: 512000, |      maxEntrypointSize: 512000, | ||||||
|     maxAssetSize: 512000, |      maxAssetSize: 512000 | ||||||
|   }, |  | ||||||
|   } |   } | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| module.exports = clientConfig | module.exports = clientConfig | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| ## Running the example | ## Running the example | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| ```bash | ```bash | ||||||
| yarn install | yarn install | ||||||
| yarn start | yarn start | ||||||
|  |  | ||||||
|  | @ -3,15 +3,16 @@ import * as Automerge from "@automerge/automerge" | ||||||
| // hello world code that will run correctly on web or node
 | // hello world code that will run correctly on web or node
 | ||||||
| 
 | 
 | ||||||
| let doc = Automerge.init() | let doc = Automerge.init() | ||||||
| doc = Automerge.change(doc, d => (d.hello = "from automerge")) | doc = Automerge.change(doc, (d) => d.hello = "from automerge") | ||||||
| const result = JSON.stringify(doc) | const result = JSON.stringify(doc) | ||||||
| 
 | 
 | ||||||
| if (typeof document !== "undefined") { | if (typeof document !== 'undefined') { | ||||||
|   // browser
 |   // browser
 | ||||||
|   const element = document.createElement("div") |   const element = document.createElement('div'); | ||||||
|   element.innerHTML = JSON.stringify(result) |   element.innerHTML = JSON.stringify(result) | ||||||
|   document.body.appendChild(element) |   document.body.appendChild(element); | ||||||
| } else { | } else { | ||||||
|   // server
 |   // server
 | ||||||
|   console.log("node:", result) |   console.log("node:", result) | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | @ -1,37 +1,36 @@ | ||||||
| const path = require("path") | const path = require('path'); | ||||||
| const nodeExternals = require("webpack-node-externals") | const nodeExternals = require('webpack-node-externals'); | ||||||
| 
 | 
 | ||||||
| // the most basic webpack config for node or web targets for automerge-wasm
 | // the most basic webpack config for node or web targets for automerge-wasm
 | ||||||
| 
 | 
 | ||||||
| const serverConfig = { | const serverConfig = { | ||||||
|   // basic setup for bundling a node package
 |   // basic setup for bundling a node package
 | ||||||
|   target: "node", |   target: 'node', | ||||||
|   externals: [nodeExternals()], |   externals: [nodeExternals()], | ||||||
|   externalsPresets: { node: true }, |   externalsPresets: { node: true }, | ||||||
| 
 | 
 | ||||||
|   entry: "./src/index.js", |   entry: './src/index.js', | ||||||
|   output: { |   output: { | ||||||
|     filename: "node.js", |     filename: 'node.js', | ||||||
|     path: path.resolve(__dirname, "dist"), |     path: path.resolve(__dirname, 'dist'), | ||||||
|   }, |   }, | ||||||
|   mode: "development", // or production
 |   mode: "development", // or production
 | ||||||
| } | }; | ||||||
| 
 | 
 | ||||||
| const clientConfig = { | const clientConfig = { | ||||||
|   experiments: { asyncWebAssembly: true }, |   experiments: { asyncWebAssembly: true }, | ||||||
|   target: "web", |   target: 'web', | ||||||
|   entry: "./src/index.js", |   entry: './src/index.js', | ||||||
|   output: { |   output: { | ||||||
|     filename: "main.js", |     filename: 'main.js', | ||||||
|     path: path.resolve(__dirname, "public"), |     path: path.resolve(__dirname, 'public'), | ||||||
|   }, |   }, | ||||||
|   mode: "development", // or production
 |   mode: "development", // or production
 | ||||||
|   performance: { |   performance: {       // we dont want the wasm blob to generate warnings
 | ||||||
|     // we dont want the wasm blob to generate warnings
 |  | ||||||
|      hints: false, |      hints: false, | ||||||
|      maxEntrypointSize: 512000, |      maxEntrypointSize: 512000, | ||||||
|     maxAssetSize: 512000, |      maxAssetSize: 512000 | ||||||
|   }, |  | ||||||
|   } |   } | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| module.exports = [serverConfig, clientConfig] | module.exports = [serverConfig, clientConfig]; | ||||||
|  |  | ||||||
|  | @ -4,7 +4,7 @@ | ||||||
|     "Orion Henry <orion@inkandswitch.com>", |     "Orion Henry <orion@inkandswitch.com>", | ||||||
|     "Martin Kleppmann" |     "Martin Kleppmann" | ||||||
|   ], |   ], | ||||||
|   "version": "2.0.2", |   "version": "2.0.1-alpha.2", | ||||||
|   "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", |   "description": "Javascript implementation of automerge, backed by @automerge/automerge-wasm", | ||||||
|   "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", |   "homepage": "https://github.com/automerge/automerge-rs/tree/main/wrappers/javascript", | ||||||
|   "repository": "github:automerge/automerge-rs", |   "repository": "github:automerge/automerge-rs", | ||||||
|  | @ -12,10 +12,26 @@ | ||||||
|     "README.md", |     "README.md", | ||||||
|     "LICENSE", |     "LICENSE", | ||||||
|     "package.json", |     "package.json", | ||||||
|     "dist/index.d.ts", |     "index.d.ts", | ||||||
|     "dist/cjs/**/*.js", |     "dist/*.d.ts", | ||||||
|     "dist/mjs/**/*.js", |     "dist/cjs/constants.js", | ||||||
|     "dist/*.d.ts" |     "dist/cjs/types.js", | ||||||
|  |     "dist/cjs/numbers.js", | ||||||
|  |     "dist/cjs/index.js", | ||||||
|  |     "dist/cjs/uuid.js", | ||||||
|  |     "dist/cjs/counter.js", | ||||||
|  |     "dist/cjs/low_level.js", | ||||||
|  |     "dist/cjs/text.js", | ||||||
|  |     "dist/cjs/proxies.js", | ||||||
|  |     "dist/mjs/constants.js", | ||||||
|  |     "dist/mjs/types.js", | ||||||
|  |     "dist/mjs/numbers.js", | ||||||
|  |     "dist/mjs/index.js", | ||||||
|  |     "dist/mjs/uuid.js", | ||||||
|  |     "dist/mjs/counter.js", | ||||||
|  |     "dist/mjs/low_level.js", | ||||||
|  |     "dist/mjs/text.js", | ||||||
|  |     "dist/mjs/proxies.js" | ||||||
|   ], |   ], | ||||||
|   "types": "./dist/index.d.ts", |   "types": "./dist/index.d.ts", | ||||||
|   "module": "./dist/mjs/index.js", |   "module": "./dist/mjs/index.js", | ||||||
|  | @ -23,31 +39,27 @@ | ||||||
|   "license": "MIT", |   "license": "MIT", | ||||||
|   "scripts": { |   "scripts": { | ||||||
|     "lint": "eslint src", |     "lint": "eslint src", | ||||||
|     "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc -p config/declonly.json --emitDeclarationOnly", |     "build": "tsc -p config/mjs.json && tsc -p config/cjs.json && tsc --emitDeclarationOnly", | ||||||
|     "test": "ts-mocha test/*.ts", |     "test": "ts-mocha test/*.ts", | ||||||
|     "deno:build": "denoify && node ./scripts/deno-prefixer.mjs", |     "watch-docs": "typedoc src/index.ts --watch --readme typedoc-readme.md" | ||||||
|     "deno:test": "deno test ./deno-tests/deno.ts --allow-read --allow-net", |  | ||||||
|     "watch-docs": "typedoc src/index.ts --watch --readme none" |  | ||||||
|   }, |   }, | ||||||
|   "devDependencies": { |   "devDependencies": { | ||||||
|     "@types/expect": "^24.3.0", |     "@types/expect": "^24.3.0", | ||||||
|     "@types/mocha": "^10.0.1", |     "@types/mocha": "^9.1.1", | ||||||
|     "@types/uuid": "^9.0.0", |     "@types/uuid": "^8.3.4", | ||||||
|     "@typescript-eslint/eslint-plugin": "^5.46.0", |     "@typescript-eslint/eslint-plugin": "^5.25.0", | ||||||
|     "@typescript-eslint/parser": "^5.46.0", |     "@typescript-eslint/parser": "^5.25.0", | ||||||
|     "denoify": "^1.4.5", |     "eslint": "^8.15.0", | ||||||
|     "eslint": "^8.29.0", |  | ||||||
|     "fast-sha256": "^1.3.0", |     "fast-sha256": "^1.3.0", | ||||||
|     "mocha": "^10.2.0", |     "mocha": "^10.0.0", | ||||||
|     "pako": "^2.1.0", |     "pako": "^2.0.4", | ||||||
|     "prettier": "^2.8.1", |  | ||||||
|     "ts-mocha": "^10.0.0", |     "ts-mocha": "^10.0.0", | ||||||
|     "ts-node": "^10.9.1", |     "ts-node": "^10.9.1", | ||||||
|     "typedoc": "^0.23.22", |     "typedoc": "^0.23.16", | ||||||
|     "typescript": "^4.9.4" |     "typescript": "^4.6.4" | ||||||
|   }, |   }, | ||||||
|   "dependencies": { |   "dependencies": { | ||||||
|     "@automerge/automerge-wasm": "0.1.25", |     "@automerge/automerge-wasm": "0.1.19", | ||||||
|     "uuid": "^9.0.0" |     "uuid": "^8.3" | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,9 +0,0 @@ | ||||||
| import * as fs from "fs" |  | ||||||
| 
 |  | ||||||
| const files = ["./deno_dist/proxies.ts"] |  | ||||||
| for (const filepath of files) { |  | ||||||
|   const data = fs.readFileSync(filepath) |  | ||||||
|   fs.writeFileSync(filepath, "// @ts-nocheck \n" + data) |  | ||||||
| 
 |  | ||||||
|   console.log('Prepended "// @ts-nocheck" to ' + filepath) |  | ||||||
| } |  | ||||||
|  | @ -1,42 +0,0 @@ | ||||||
| // @denoify-ignore
 |  | ||||||
| 
 |  | ||||||
| import { makeThisModuleAnExecutableReplacer } from "denoify" |  | ||||||
| // import { assert } from "tsafe";
 |  | ||||||
| // import * as path from "path";
 |  | ||||||
| 
 |  | ||||||
| makeThisModuleAnExecutableReplacer( |  | ||||||
|   async ({ parsedImportExportStatement, destDirPath, version }) => { |  | ||||||
|     version = process.env.VERSION || version |  | ||||||
| 
 |  | ||||||
|     switch (parsedImportExportStatement.parsedArgument.nodeModuleName) { |  | ||||||
|       case "@automerge/automerge-wasm": |  | ||||||
|         { |  | ||||||
|           const moduleRoot = |  | ||||||
|             process.env.ROOT_MODULE || |  | ||||||
|             `https://deno.land/x/automerge_wasm@${version}` |  | ||||||
|           /* |  | ||||||
|            *We expect not to run against statements like |  | ||||||
|            *import(..).then(...) |  | ||||||
|            *or |  | ||||||
|            *export * from "..." |  | ||||||
|            *in our code. |  | ||||||
|            */ |  | ||||||
|           if ( |  | ||||||
|             !parsedImportExportStatement.isAsyncImport && |  | ||||||
|             (parsedImportExportStatement.statementType === "import" || |  | ||||||
|               parsedImportExportStatement.statementType === "export") |  | ||||||
|           ) { |  | ||||||
|             if (parsedImportExportStatement.isTypeOnly) { |  | ||||||
|               return `${parsedImportExportStatement.statementType} type ${parsedImportExportStatement.target} from "${moduleRoot}/index.d.ts";` |  | ||||||
|             } else { |  | ||||||
|               return `${parsedImportExportStatement.statementType} ${parsedImportExportStatement.target} from "${moduleRoot}/automerge_wasm.js";` |  | ||||||
|             } |  | ||||||
|           } |  | ||||||
|         } |  | ||||||
|         break |  | ||||||
|     } |  | ||||||
| 
 |  | ||||||
|     //The replacer should return undefined when we want to let denoify replace the statement
 |  | ||||||
|     return undefined |  | ||||||
|   } |  | ||||||
| ) |  | ||||||
|  | @ -1,100 +0,0 @@ | ||||||
| import { Counter, type AutomergeValue } from "./types" |  | ||||||
| import { Text } from "./text" |  | ||||||
| import { type AutomergeValue as UnstableAutomergeValue } from "./unstable_types" |  | ||||||
| import { type Target, Text1Target, Text2Target } from "./proxies" |  | ||||||
| import { mapProxy, listProxy, ValueType } from "./proxies" |  | ||||||
| import type { Prop, ObjID } from "@automerge/automerge-wasm" |  | ||||||
| import { Automerge } from "@automerge/automerge-wasm" |  | ||||||
| 
 |  | ||||||
| export type ConflictsF<T extends Target> = { [key: string]: ValueType<T> } |  | ||||||
| export type Conflicts = ConflictsF<Text1Target> |  | ||||||
| export type UnstableConflicts = ConflictsF<Text2Target> |  | ||||||
| 
 |  | ||||||
| export function stableConflictAt( |  | ||||||
|   context: Automerge, |  | ||||||
|   objectId: ObjID, |  | ||||||
|   prop: Prop |  | ||||||
| ): Conflicts | undefined { |  | ||||||
|   return conflictAt<Text1Target>( |  | ||||||
|     context, |  | ||||||
|     objectId, |  | ||||||
|     prop, |  | ||||||
|     true, |  | ||||||
|     (context: Automerge, conflictId: ObjID): AutomergeValue => { |  | ||||||
|       return new Text(context.text(conflictId)) |  | ||||||
|     } |  | ||||||
|   ) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function unstableConflictAt( |  | ||||||
|   context: Automerge, |  | ||||||
|   objectId: ObjID, |  | ||||||
|   prop: Prop |  | ||||||
| ): UnstableConflicts | undefined { |  | ||||||
|   return conflictAt<Text2Target>( |  | ||||||
|     context, |  | ||||||
|     objectId, |  | ||||||
|     prop, |  | ||||||
|     true, |  | ||||||
|     (context: Automerge, conflictId: ObjID): UnstableAutomergeValue => { |  | ||||||
|       return context.text(conflictId) |  | ||||||
|     } |  | ||||||
|   ) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function conflictAt<T extends Target>( |  | ||||||
|   context: Automerge, |  | ||||||
|   objectId: ObjID, |  | ||||||
|   prop: Prop, |  | ||||||
|   textV2: boolean, |  | ||||||
|   handleText: (a: Automerge, conflictId: ObjID) => ValueType<T> |  | ||||||
| ): ConflictsF<T> | undefined { |  | ||||||
|   const values = context.getAll(objectId, prop) |  | ||||||
|   if (values.length <= 1) { |  | ||||||
|     return |  | ||||||
|   } |  | ||||||
|   const result: ConflictsF<T> = {} |  | ||||||
|   for (const fullVal of values) { |  | ||||||
|     switch (fullVal[0]) { |  | ||||||
|       case "map": |  | ||||||
|         result[fullVal[1]] = mapProxy<T>( |  | ||||||
|           context, |  | ||||||
|           fullVal[1], |  | ||||||
|           textV2, |  | ||||||
|           [prop], |  | ||||||
|           true |  | ||||||
|         ) |  | ||||||
|         break |  | ||||||
|       case "list": |  | ||||||
|         result[fullVal[1]] = listProxy<T>( |  | ||||||
|           context, |  | ||||||
|           fullVal[1], |  | ||||||
|           textV2, |  | ||||||
|           [prop], |  | ||||||
|           true |  | ||||||
|         ) |  | ||||||
|         break |  | ||||||
|       case "text": |  | ||||||
|         result[fullVal[1]] = handleText(context, fullVal[1] as ObjID) |  | ||||||
|         break |  | ||||||
|       case "str": |  | ||||||
|       case "uint": |  | ||||||
|       case "int": |  | ||||||
|       case "f64": |  | ||||||
|       case "boolean": |  | ||||||
|       case "bytes": |  | ||||||
|       case "null": |  | ||||||
|         result[fullVal[2]] = fullVal[1] as ValueType<T> |  | ||||||
|         break |  | ||||||
|       case "counter": |  | ||||||
|         result[fullVal[2]] = new Counter(fullVal[1]) as ValueType<T> |  | ||||||
|         break |  | ||||||
|       case "timestamp": |  | ||||||
|         result[fullVal[2]] = new Date(fullVal[1]) as ValueType<T> |  | ||||||
|         break |  | ||||||
|       default: |  | ||||||
|         throw RangeError(`datatype ${fullVal[0]} unimplemented`) |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|   return result |  | ||||||
| } |  | ||||||
|  | @ -1,12 +1,13 @@ | ||||||
| // Properties of the document root object
 | // Properties of the document root object
 | ||||||
| 
 | 
 | ||||||
| export const STATE = Symbol.for("_am_meta") // symbol used to hide application metadata on automerge objects
 | export const STATE      = Symbol.for('_am_meta')     // symbol used to hide application metadata on automerge objects
 | ||||||
| export const TRACE = Symbol.for("_am_trace") // used for debugging
 | export const TRACE      = Symbol.for('_am_trace')    // used for debugging
 | ||||||
| export const OBJECT_ID = Symbol.for("_am_objectId") // symbol used to hide the object id on automerge objects
 | export const OBJECT_ID  = Symbol.for('_am_objectId') // synbol used to hide the object id on automerge objects
 | ||||||
| export const IS_PROXY = Symbol.for("_am_isProxy") // symbol used to test if the document is a proxy object
 | export const IS_PROXY   = Symbol.for('_am_isProxy')  // symbol used to test if the document is a proxy object
 | ||||||
|  | 
 | ||||||
|  | export const UINT     = Symbol.for('_am_uint') | ||||||
|  | export const INT      = Symbol.for('_am_int') | ||||||
|  | export const F64      = Symbol.for('_am_f64') | ||||||
|  | export const COUNTER  = Symbol.for('_am_counter') | ||||||
|  | export const TEXT     = Symbol.for('_am_text') | ||||||
| 
 | 
 | ||||||
| export const UINT = Symbol.for("_am_uint") |  | ||||||
| export const INT = Symbol.for("_am_int") |  | ||||||
| export const F64 = Symbol.for("_am_f64") |  | ||||||
| export const COUNTER = Symbol.for("_am_counter") |  | ||||||
| export const TEXT = Symbol.for("_am_text") |  | ||||||
|  |  | ||||||
|  | @ -1,4 +1,4 @@ | ||||||
| import { Automerge, type ObjID, type Prop } from "@automerge/automerge-wasm" | import { Automerge, ObjID, Prop } from "@automerge/automerge-wasm" | ||||||
| import { COUNTER } from "./constants" | import { COUNTER } from "./constants" | ||||||
| /** | /** | ||||||
|  * The most basic CRDT: an integer value that can be changed only by |  * The most basic CRDT: an integer value that can be changed only by | ||||||
|  | @ -6,7 +6,7 @@ import { COUNTER } from "./constants" | ||||||
|  * the value trivially converges. |  * the value trivially converges. | ||||||
|  */ |  */ | ||||||
| export class Counter { | export class Counter { | ||||||
|   value: number |   value : number; | ||||||
| 
 | 
 | ||||||
|   constructor(value?: number) { |   constructor(value?: number) { | ||||||
|     this.value = value || 0 |     this.value = value || 0 | ||||||
|  | @ -49,17 +49,11 @@ export class Counter { | ||||||
|  */ |  */ | ||||||
| class WriteableCounter extends Counter { | class WriteableCounter extends Counter { | ||||||
|   context: Automerge |   context: Automerge | ||||||
|   path: Prop[] |   path: string[] | ||||||
|   objectId: ObjID |   objectId: ObjID | ||||||
|   key: Prop |   key: Prop | ||||||
| 
 | 
 | ||||||
|   constructor( |   constructor(value: number, context: Automerge, path: string[], objectId: ObjID, key: Prop) { | ||||||
|     value: number, |  | ||||||
|     context: Automerge, |  | ||||||
|     path: Prop[], |  | ||||||
|     objectId: ObjID, |  | ||||||
|     key: Prop |  | ||||||
|   ) { |  | ||||||
|     super(value) |     super(value) | ||||||
|     this.context = context |     this.context = context | ||||||
|     this.path = path |     this.path = path | ||||||
|  | @ -72,7 +66,7 @@ class WriteableCounter extends Counter { | ||||||
|    * increases the value of the counter by 1. |    * increases the value of the counter by 1. | ||||||
|    */ |    */ | ||||||
|   increment(delta: number) : number { |   increment(delta: number) : number { | ||||||
|     delta = typeof delta === "number" ? delta : 1 |     delta = typeof delta === 'number' ? delta : 1 | ||||||
|     this.context.increment(this.objectId, this.key, delta) |     this.context.increment(this.objectId, this.key, delta) | ||||||
|     this.value += delta |     this.value += delta | ||||||
|     return this.value |     return this.value | ||||||
|  | @ -83,7 +77,7 @@ class WriteableCounter extends Counter { | ||||||
|    * decreases the value of the counter by 1. |    * decreases the value of the counter by 1. | ||||||
|    */ |    */ | ||||||
|   decrement(delta: number) : number { |   decrement(delta: number) : number { | ||||||
|     return this.increment(typeof delta === "number" ? -delta : -1) |     return this.increment(typeof delta === 'number' ? -delta : -1) | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -94,13 +88,7 @@ class WriteableCounter extends Counter { | ||||||
|  * the property name (key in map, or index in list) where the counter is |  * the property name (key in map, or index in list) where the counter is | ||||||
|  * located. |  * located. | ||||||
| */ | */ | ||||||
| export function getWriteableCounter( | export function getWriteableCounter(value: number, context: Automerge, path: string[], objectId: ObjID, key: Prop) { | ||||||
|   value: number, |  | ||||||
|   context: Automerge, |  | ||||||
|   path: Prop[], |  | ||||||
|   objectId: ObjID, |  | ||||||
|   key: Prop |  | ||||||
| ): WriteableCounter { |  | ||||||
|   return new WriteableCounter(value, context, path, objectId, key) |   return new WriteableCounter(value, context, path, objectId, key) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,43 +0,0 @@ | ||||||
| import { type ObjID, type Heads, Automerge } from "@automerge/automerge-wasm" |  | ||||||
| 
 |  | ||||||
| import { STATE, OBJECT_ID, TRACE, IS_PROXY } from "./constants" |  | ||||||
| 
 |  | ||||||
| import type { Doc, PatchCallback } from "./types" |  | ||||||
| 
 |  | ||||||
| export interface InternalState<T> { |  | ||||||
|   handle: Automerge |  | ||||||
|   heads: Heads | undefined |  | ||||||
|   freeze: boolean |  | ||||||
|   patchCallback?: PatchCallback<T> |  | ||||||
|   textV2: boolean |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function _state<T>(doc: Doc<T>, checkroot = true): InternalState<T> { |  | ||||||
|   if (typeof doc !== "object") { |  | ||||||
|     throw new RangeError("must be the document root") |  | ||||||
|   } |  | ||||||
|   const state = Reflect.get(doc, STATE) as InternalState<T> |  | ||||||
|   if ( |  | ||||||
|     state === undefined || |  | ||||||
|     state == null || |  | ||||||
|     (checkroot && _obj(doc) !== "_root") |  | ||||||
|   ) { |  | ||||||
|     throw new RangeError("must be the document root") |  | ||||||
|   } |  | ||||||
|   return state |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function _trace<T>(doc: Doc<T>): string | undefined { |  | ||||||
|   return Reflect.get(doc, TRACE) as string |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function _obj<T>(doc: Doc<T>): ObjID | null { |  | ||||||
|   if (!(typeof doc === "object") || doc === null) { |  | ||||||
|     return null |  | ||||||
|   } |  | ||||||
|   return Reflect.get(doc, OBJECT_ID) as ObjID |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function _is_proxy<T>(doc: Doc<T>): boolean { |  | ||||||
|   return !!Reflect.get(doc, IS_PROXY) |  | ||||||
| } |  | ||||||
|  | @ -1,58 +1,25 @@ | ||||||
| import { | 
 | ||||||
|   type API, | import { Automerge, Change, DecodedChange, Actor, SyncState, SyncMessage, JsSyncState, DecodedSyncMessage }  from "@automerge/automerge-wasm" | ||||||
|   Automerge, | import { API } from "@automerge/automerge-wasm" | ||||||
|   type Change, |  | ||||||
|   type DecodedChange, |  | ||||||
|   type Actor, |  | ||||||
|   SyncState, |  | ||||||
|   type SyncMessage, |  | ||||||
|   type JsSyncState, |  | ||||||
|   type DecodedSyncMessage, |  | ||||||
|   type ChangeToEncode, |  | ||||||
| } from "@automerge/automerge-wasm" |  | ||||||
| export type { ChangeToEncode } from "@automerge/automerge-wasm" |  | ||||||
| 
 | 
 | ||||||
| export function UseApi(api: API) { | export function UseApi(api: API) { | ||||||
|   for (const k in api) { |   for (const k in api) { | ||||||
|     // eslint-disable-next-line @typescript-eslint/no-extra-semi,@typescript-eslint/no-explicit-any
 |     ApiHandler[k] = api[k] | ||||||
|     ;(ApiHandler as any)[k] = (api as any)[k] |  | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* eslint-disable */ | /* eslint-disable */ | ||||||
| export const ApiHandler : API = { | export const ApiHandler : API = { | ||||||
|   create(textV2: boolean, actor?: Actor): Automerge { |   create(actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called") }, | ||||||
|     throw new RangeError("Automerge.use() not called") |   load(data: Uint8Array, actor?: Actor): Automerge { throw new RangeError("Automerge.use() not called (load)") }, | ||||||
|   }, |   encodeChange(change: DecodedChange): Change { throw new RangeError("Automerge.use() not called (encodeChange)") }, | ||||||
|   load(data: Uint8Array, textV2: boolean, actor?: Actor): Automerge { |   decodeChange(change: Change): DecodedChange { throw new RangeError("Automerge.use() not called (decodeChange)") }, | ||||||
|     throw new RangeError("Automerge.use() not called (load)") |   initSyncState(): SyncState { throw new RangeError("Automerge.use() not called (initSyncState)") }, | ||||||
|   }, |   encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { throw new RangeError("Automerge.use() not called (encodeSyncMessage)") }, | ||||||
|   encodeChange(change: ChangeToEncode): Change { |   decodeSyncMessage(msg: SyncMessage): DecodedSyncMessage { throw new RangeError("Automerge.use() not called (decodeSyncMessage)") }, | ||||||
|     throw new RangeError("Automerge.use() not called (encodeChange)") |   encodeSyncState(state: SyncState): Uint8Array { throw new RangeError("Automerge.use() not called (encodeSyncState)") }, | ||||||
|   }, |   decodeSyncState(data: Uint8Array): SyncState { throw new RangeError("Automerge.use() not called (decodeSyncState)") }, | ||||||
|   decodeChange(change: Change): DecodedChange { |   exportSyncState(state: SyncState): JsSyncState { throw new RangeError("Automerge.use() not called (exportSyncState)") }, | ||||||
|     throw new RangeError("Automerge.use() not called (decodeChange)") |   importSyncState(state: JsSyncState): SyncState { throw new RangeError("Automerge.use() not called (importSyncState)") }, | ||||||
|   }, |  | ||||||
|   initSyncState(): SyncState { |  | ||||||
|     throw new RangeError("Automerge.use() not called (initSyncState)") |  | ||||||
|   }, |  | ||||||
|   encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { |  | ||||||
|     throw new RangeError("Automerge.use() not called (encodeSyncMessage)") |  | ||||||
|   }, |  | ||||||
|   decodeSyncMessage(msg: SyncMessage): DecodedSyncMessage { |  | ||||||
|     throw new RangeError("Automerge.use() not called (decodeSyncMessage)") |  | ||||||
|   }, |  | ||||||
|   encodeSyncState(state: SyncState): Uint8Array { |  | ||||||
|     throw new RangeError("Automerge.use() not called (encodeSyncState)") |  | ||||||
|   }, |  | ||||||
|   decodeSyncState(data: Uint8Array): SyncState { |  | ||||||
|     throw new RangeError("Automerge.use() not called (decodeSyncState)") |  | ||||||
|   }, |  | ||||||
|   exportSyncState(state: SyncState): JsSyncState { |  | ||||||
|     throw new RangeError("Automerge.use() not called (exportSyncState)") |  | ||||||
|   }, |  | ||||||
|   importSyncState(state: JsSyncState): SyncState { |  | ||||||
|     throw new RangeError("Automerge.use() not called (importSyncState)") |  | ||||||
|   }, |  | ||||||
| } | } | ||||||
| /* eslint-enable */ | /* eslint-enable */ | ||||||
|  |  | ||||||
|  | @ -1,18 +1,12 @@ | ||||||
| // Convenience classes to allow users to strictly specify the number type they want
 | // Convience classes to allow users to stricly specify the number type they want
 | ||||||
| 
 | 
 | ||||||
| import { INT, UINT, F64 } from "./constants" | import { INT, UINT, F64 } from "./constants" | ||||||
| 
 | 
 | ||||||
| export class Int { | export class Int { | ||||||
|   value: number |   value: number; | ||||||
| 
 | 
 | ||||||
|   constructor(value: number) { |   constructor(value: number) { | ||||||
|     if ( |     if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= Number.MIN_SAFE_INTEGER)) { | ||||||
|       !( |  | ||||||
|         Number.isInteger(value) && |  | ||||||
|         value <= Number.MAX_SAFE_INTEGER && |  | ||||||
|         value >= Number.MIN_SAFE_INTEGER |  | ||||||
|       ) |  | ||||||
|     ) { |  | ||||||
|       throw new RangeError(`Value ${value} cannot be a uint`) |       throw new RangeError(`Value ${value} cannot be a uint`) | ||||||
|     } |     } | ||||||
|     this.value = value |     this.value = value | ||||||
|  | @ -22,16 +16,10 @@ export class Int { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| export class Uint { | export class Uint { | ||||||
|   value: number |   value: number; | ||||||
| 
 | 
 | ||||||
|   constructor(value: number) { |   constructor(value: number) { | ||||||
|     if ( |     if (!(Number.isInteger(value) && value <= Number.MAX_SAFE_INTEGER && value >= 0)) { | ||||||
|       !( |  | ||||||
|         Number.isInteger(value) && |  | ||||||
|         value <= Number.MAX_SAFE_INTEGER && |  | ||||||
|         value >= 0 |  | ||||||
|       ) |  | ||||||
|     ) { |  | ||||||
|       throw new RangeError(`Value ${value} cannot be a uint`) |       throw new RangeError(`Value ${value} cannot be a uint`) | ||||||
|     } |     } | ||||||
|     this.value = value |     this.value = value | ||||||
|  | @ -41,10 +29,10 @@ export class Uint { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| export class Float64 { | export class Float64 { | ||||||
|   value: number |   value: number; | ||||||
| 
 | 
 | ||||||
|   constructor(value: number) { |   constructor(value: number) { | ||||||
|     if (typeof value !== "number") { |     if (typeof value !== 'number') { | ||||||
|       throw new RangeError(`Value ${value} cannot be a float64`) |       throw new RangeError(`Value ${value} cannot be a float64`) | ||||||
|     } |     } | ||||||
|     this.value = value || 0.0 |     this.value = value || 0.0 | ||||||
|  | @ -52,3 +40,4 @@ export class Float64 { | ||||||
|     Object.freeze(this) |     Object.freeze(this) | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,6 +0,0 @@ | ||||||
| export class RawString { |  | ||||||
|   val: string |  | ||||||
|   constructor(val: string) { |  | ||||||
|     this.val = val |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  | @ -1,944 +0,0 @@ | ||||||
| /** @hidden **/ |  | ||||||
| export { /** @hidden */ uuid } from "./uuid" |  | ||||||
| 
 |  | ||||||
| import { rootProxy } from "./proxies" |  | ||||||
| import { STATE } from "./constants" |  | ||||||
| 
 |  | ||||||
| import { |  | ||||||
|   type AutomergeValue, |  | ||||||
|   Counter, |  | ||||||
|   type Doc, |  | ||||||
|   type PatchCallback, |  | ||||||
| } from "./types" |  | ||||||
| export { |  | ||||||
|   type AutomergeValue, |  | ||||||
|   Counter, |  | ||||||
|   type Doc, |  | ||||||
|   Int, |  | ||||||
|   Uint, |  | ||||||
|   Float64, |  | ||||||
|   type Patch, |  | ||||||
|   type PatchCallback, |  | ||||||
|   type ScalarValue, |  | ||||||
| } from "./types" |  | ||||||
| 
 |  | ||||||
| import { Text } from "./text" |  | ||||||
| export { Text } from "./text" |  | ||||||
| 
 |  | ||||||
| import type { |  | ||||||
|   API as WasmAPI, |  | ||||||
|   Actor as ActorId, |  | ||||||
|   Prop, |  | ||||||
|   ObjID, |  | ||||||
|   Change, |  | ||||||
|   DecodedChange, |  | ||||||
|   Heads, |  | ||||||
|   MaterializeValue, |  | ||||||
|   JsSyncState, |  | ||||||
|   SyncMessage, |  | ||||||
|   DecodedSyncMessage, |  | ||||||
| } from "@automerge/automerge-wasm" |  | ||||||
| export type { |  | ||||||
|   PutPatch, |  | ||||||
|   DelPatch, |  | ||||||
|   SpliceTextPatch, |  | ||||||
|   InsertPatch, |  | ||||||
|   IncPatch, |  | ||||||
|   SyncMessage, |  | ||||||
| } from "@automerge/automerge-wasm" |  | ||||||
| 
 |  | ||||||
| /** @hidden **/ |  | ||||||
| type API = WasmAPI |  | ||||||
| 
 |  | ||||||
| const SyncStateSymbol = Symbol("_syncstate") |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * An opaque type tracking the state of sync with a remote peer |  | ||||||
|  */ |  | ||||||
| type SyncState = JsSyncState & { _opaque: typeof SyncStateSymbol } |  | ||||||
| 
 |  | ||||||
| import { ApiHandler, type ChangeToEncode, UseApi } from "./low_level" |  | ||||||
| 
 |  | ||||||
| import { Automerge } from "@automerge/automerge-wasm" |  | ||||||
| 
 |  | ||||||
| import { RawString } from "./raw_string" |  | ||||||
| 
 |  | ||||||
| import { _state, _is_proxy, _trace, _obj } from "./internal_state" |  | ||||||
| 
 |  | ||||||
| import { stableConflictAt } from "./conflicts" |  | ||||||
| 
 |  | ||||||
| /** Options passed to {@link change}, and {@link emptyChange} |  | ||||||
|  * @typeParam T - The type of value contained in the document |  | ||||||
|  */ |  | ||||||
| export type ChangeOptions<T> = { |  | ||||||
|   /** A message which describes the changes */ |  | ||||||
|   message?: string |  | ||||||
|   /** The unix timestamp of the change (purely advisory, not used in conflict resolution) */ |  | ||||||
|   time?: number |  | ||||||
|   /** A callback which will be called to notify the caller of any changes to the document */ |  | ||||||
|   patchCallback?: PatchCallback<T> |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** Options passed to {@link loadIncremental}, {@link applyChanges}, and {@link receiveSyncMessage} |  | ||||||
|  * @typeParam T - The type of value contained in the document |  | ||||||
|  */ |  | ||||||
| export type ApplyOptions<T> = { patchCallback?: PatchCallback<T> } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * A List is an extended Array that adds the two helper methods `deleteAt` and `insertAt`. |  | ||||||
|  */ |  | ||||||
| export interface List<T> extends Array<T> { |  | ||||||
|   insertAt(index: number, ...args: T[]): List<T> |  | ||||||
|   deleteAt(index: number, numDelete?: number): List<T> |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * To extend an arbitrary type, we have to turn any arrays that are part of the type's definition into Lists. |  | ||||||
|  * So we recurse through the properties of T, turning any Arrays we find into Lists. |  | ||||||
|  */ |  | ||||||
| export type Extend<T> = |  | ||||||
|   // is it an array? make it a list (we recursively extend the type of the array's elements as well)
 |  | ||||||
|   T extends Array<infer T> |  | ||||||
|     ? List<Extend<T>> |  | ||||||
|     : // is it an object? recursively extend all of its properties
 |  | ||||||
|     // eslint-disable-next-line @typescript-eslint/ban-types
 |  | ||||||
|     T extends Object |  | ||||||
|     ? { [P in keyof T]: Extend<T[P]> } |  | ||||||
|     : // otherwise leave the type alone
 |  | ||||||
|       T |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Function which is called by {@link change} when making changes to a `Doc<T>` |  | ||||||
|  * @typeParam T - The type of value contained in the document |  | ||||||
|  * |  | ||||||
|  * This function may mutate `doc` |  | ||||||
|  */ |  | ||||||
| export type ChangeFn<T> = (doc: Extend<T>) => void |  | ||||||
| 
 |  | ||||||
| /** @hidden **/ |  | ||||||
| export interface State<T> { |  | ||||||
|   change: DecodedChange |  | ||||||
|   snapshot: T |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden **/ |  | ||||||
| export function use(api: API) { |  | ||||||
|   UseApi(api) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| import * as wasm from "@automerge/automerge-wasm" |  | ||||||
| use(wasm) |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Options to be passed to {@link init} or {@link load} |  | ||||||
|  * @typeParam T - The type of the value the document contains |  | ||||||
|  */ |  | ||||||
| export type InitOptions<T> = { |  | ||||||
|   /** The actor ID to use for this document, a random one will be generated if `null` is passed */ |  | ||||||
|   actor?: ActorId |  | ||||||
|   freeze?: boolean |  | ||||||
|   /** A callback which will be called with the initial patch once the document has finished loading */ |  | ||||||
|   patchCallback?: PatchCallback<T> |  | ||||||
|   /** @hidden */ |  | ||||||
|   enableTextV2?: boolean |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function getBackend<T>(doc: Doc<T>): Automerge { |  | ||||||
|   return _state(doc).handle |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function importOpts<T>(_actor?: ActorId | InitOptions<T>): InitOptions<T> { |  | ||||||
|   if (typeof _actor === "object") { |  | ||||||
|     return _actor |  | ||||||
|   } else { |  | ||||||
|     return { actor: _actor } |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Create a new automerge document |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of value contained in the document. This will be the |  | ||||||
|  *     type that is passed to the change closure in {@link change} |  | ||||||
|  * @param _opts - Either an actorId or an {@link InitOptions} (which may |  | ||||||
|  *     contain an actorId). If this is null the document will be initialised with a |  | ||||||
|  *     random actor ID |  | ||||||
|  */ |  | ||||||
| export function init<T>(_opts?: ActorId | InitOptions<T>): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   const freeze = !!opts.freeze |  | ||||||
|   const patchCallback = opts.patchCallback |  | ||||||
|   const handle = ApiHandler.create(opts.enableTextV2 || false, opts.actor) |  | ||||||
|   handle.enablePatches(true) |  | ||||||
|   handle.enableFreeze(!!opts.freeze) |  | ||||||
|   handle.registerDatatype("counter", (n: number) => new Counter(n)) |  | ||||||
|   const textV2 = opts.enableTextV2 || false |  | ||||||
|   if (textV2) { |  | ||||||
|     handle.registerDatatype("str", (n: string) => new RawString(n)) |  | ||||||
|   } else { |  | ||||||
|     // eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
|     handle.registerDatatype("text", (n: any) => new Text(n)) |  | ||||||
|   } |  | ||||||
|   const doc = handle.materialize("/", undefined, { |  | ||||||
|     handle, |  | ||||||
|     heads: undefined, |  | ||||||
|     freeze, |  | ||||||
|     patchCallback, |  | ||||||
|     textV2, |  | ||||||
|   }) as Doc<T> |  | ||||||
|   return doc |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Make an immutable view of an automerge document as at `heads` |  | ||||||
|  * |  | ||||||
|  * @remarks |  | ||||||
|  * The document returned from this function cannot be passed to {@link change}. |  | ||||||
|  * This is because it shares the same underlying memory as `doc`, but it is |  | ||||||
|  * consequently a very cheap copy. |  | ||||||
|  * |  | ||||||
|  * Note that this function will throw an error if any of the hashes in `heads` |  | ||||||
|  * are not in the document. |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value contained in the document |  | ||||||
|  * @param doc - The document to create a view of |  | ||||||
|  * @param heads - The hashes of the heads to create a view at |  | ||||||
|  */ |  | ||||||
| export function view<T>(doc: Doc<T>, heads: Heads): Doc<T> { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   const handle = state.handle |  | ||||||
|   return state.handle.materialize("/", heads, { |  | ||||||
|     ...state, |  | ||||||
|     handle, |  | ||||||
|     heads, |  | ||||||
|   }) as Doc<T> |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Make a full writable copy of an automerge document |  | ||||||
|  * |  | ||||||
|  * @remarks |  | ||||||
|  * Unlike {@link view} this function makes a full copy of the memory backing |  | ||||||
|  * the document and can thus be passed to {@link change}. It also generates a |  | ||||||
|  * new actor ID so that changes made in the new document do not create duplicate |  | ||||||
|  * sequence numbers with respect to the old document. If you need control over |  | ||||||
|  * the actor ID which is generated you can pass the actor ID as the second |  | ||||||
|  * argument |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value contained in the document |  | ||||||
|  * @param doc - The document to clone |  | ||||||
|  * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} |  | ||||||
|  */ |  | ||||||
| export function clone<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   const heads = state.heads |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   const handle = state.handle.fork(opts.actor, heads) |  | ||||||
| 
 |  | ||||||
|   // `change` uses the presence of state.heads to determine if we are in a view
 |  | ||||||
|   // set it to undefined to indicate that this is a full fat document
 |  | ||||||
|   const { heads: _oldHeads, ...stateSansHeads } = state |  | ||||||
|   return handle.applyPatches(doc, { ...stateSansHeads, handle }) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** Explicity free the memory backing a document. Note that this is note |  | ||||||
|  * necessary in environments which support |  | ||||||
|  * [`FinalizationRegistry`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry)
 |  | ||||||
|  */ |  | ||||||
| export function free<T>(doc: Doc<T>) { |  | ||||||
|   return _state(doc).handle.free() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Create an automerge document from a POJO |  | ||||||
|  * |  | ||||||
|  * @param initialState - The initial state which will be copied into the document |  | ||||||
|  * @typeParam T - The type of the value passed to `from` _and_ the type the resulting document will contain |  | ||||||
|  * @typeParam actor - The actor ID of the resulting document, if this is null a random actor ID will be used |  | ||||||
|  * |  | ||||||
|  * @example |  | ||||||
|  * ``` |  | ||||||
|  * const doc = automerge.from({ |  | ||||||
|  *     tasks: [ |  | ||||||
|  *         {description: "feed dogs", done: false} |  | ||||||
|  *     ] |  | ||||||
|  * }) |  | ||||||
|  * ``` |  | ||||||
|  */ |  | ||||||
| export function from<T extends Record<string, unknown>>( |  | ||||||
|   initialState: T | Doc<T>, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   return change(init(_opts), d => Object.assign(d, initialState)) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Update the contents of an automerge document |  | ||||||
|  * @typeParam T - The type of the value contained in the document |  | ||||||
|  * @param doc - The document to update |  | ||||||
|  * @param options - Either a message, an {@link ChangeOptions}, or a {@link ChangeFn} |  | ||||||
|  * @param callback - A `ChangeFn` to be used if `options` was a `string` |  | ||||||
|  * |  | ||||||
|  * Note that if the second argument is a function it will be used as the `ChangeFn` regardless of what the third argument is. |  | ||||||
|  * |  | ||||||
|  * @example A simple change |  | ||||||
|  * ``` |  | ||||||
|  * let doc1 = automerge.init() |  | ||||||
|  * doc1 = automerge.change(doc1, d => { |  | ||||||
|  *     d.key = "value" |  | ||||||
|  * }) |  | ||||||
|  * assert.equal(doc1.key, "value") |  | ||||||
|  * ``` |  | ||||||
|  * |  | ||||||
|  * @example A change with a message |  | ||||||
|  * |  | ||||||
|  * ``` |  | ||||||
|  * doc1 = automerge.change(doc1, "add another value", d => { |  | ||||||
|  *     d.key2 = "value2" |  | ||||||
|  * }) |  | ||||||
|  * ``` |  | ||||||
|  * |  | ||||||
|  * @example A change with a message and a timestamp |  | ||||||
|  * |  | ||||||
|  * ``` |  | ||||||
|  * doc1 = automerge.change(doc1, {message: "add another value", time: 1640995200}, d => { |  | ||||||
|  *     d.key2 = "value2" |  | ||||||
|  * }) |  | ||||||
|  * ``` |  | ||||||
|  * |  | ||||||
|  * @example responding to a patch callback |  | ||||||
|  * ``` |  | ||||||
|  * let patchedPath |  | ||||||
|  * let patchCallback = patch => { |  | ||||||
|  *    patchedPath = patch.path |  | ||||||
|  * } |  | ||||||
|  * doc1 = automerge.change(doc1, {message, "add another value", time: 1640995200, patchCallback}, d => { |  | ||||||
|  *     d.key2 = "value2" |  | ||||||
|  * }) |  | ||||||
|  * assert.equal(patchedPath, ["key2"]) |  | ||||||
|  * ``` |  | ||||||
|  */ |  | ||||||
| export function change<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   options: string | ChangeOptions<T> | ChangeFn<T>, |  | ||||||
|   callback?: ChangeFn<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   if (typeof options === "function") { |  | ||||||
|     return _change(doc, {}, options) |  | ||||||
|   } else if (typeof callback === "function") { |  | ||||||
|     if (typeof options === "string") { |  | ||||||
|       options = { message: options } |  | ||||||
|     } |  | ||||||
|     return _change(doc, options, callback) |  | ||||||
|   } else { |  | ||||||
|     throw RangeError("Invalid args for change") |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function progressDocument<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   heads: Heads | null, |  | ||||||
|   callback?: PatchCallback<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   if (heads == null) { |  | ||||||
|     return doc |  | ||||||
|   } |  | ||||||
|   const state = _state(doc) |  | ||||||
|   const nextState = { ...state, heads: undefined } |  | ||||||
|   const nextDoc = state.handle.applyPatches(doc, nextState, callback) |  | ||||||
|   state.heads = heads |  | ||||||
|   return nextDoc |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function _change<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   options: ChangeOptions<T>, |  | ||||||
|   callback: ChangeFn<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   if (typeof callback !== "function") { |  | ||||||
|     throw new RangeError("invalid change function") |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   const state = _state(doc) |  | ||||||
| 
 |  | ||||||
|   if (doc === undefined || state === undefined) { |  | ||||||
|     throw new RangeError("must be the document root") |  | ||||||
|   } |  | ||||||
|   if (state.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an outdated document.  Use Automerge.clone() if you wish to make a writable copy." |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   if (_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("Calls to Automerge.change cannot be nested") |  | ||||||
|   } |  | ||||||
|   const heads = state.handle.getHeads() |  | ||||||
|   try { |  | ||||||
|     state.heads = heads |  | ||||||
|     const root: T = rootProxy(state.handle, state.textV2) |  | ||||||
|     callback(root as Extend<T>) |  | ||||||
|     if (state.handle.pendingOps() === 0) { |  | ||||||
|       state.heads = undefined |  | ||||||
|       return doc |  | ||||||
|     } else { |  | ||||||
|       state.handle.commit(options.message, options.time) |  | ||||||
|       return progressDocument( |  | ||||||
|         doc, |  | ||||||
|         heads, |  | ||||||
|         options.patchCallback || state.patchCallback |  | ||||||
|       ) |  | ||||||
|     } |  | ||||||
|   } catch (e) { |  | ||||||
|     state.heads = undefined |  | ||||||
|     state.handle.rollback() |  | ||||||
|     throw e |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Make a change to a document which does not modify the document |  | ||||||
|  * |  | ||||||
|  * @param doc - The doc to add the empty change to |  | ||||||
|  * @param options - Either a message or a {@link ChangeOptions} for the new change |  | ||||||
|  * |  | ||||||
|  * Why would you want to do this? One reason might be that you have merged |  | ||||||
|  * changes from some other peers and you want to generate a change which |  | ||||||
|  * depends on those merged changes so that you can sign the new change with all |  | ||||||
|  * of the merged changes as part of the new change. |  | ||||||
|  */ |  | ||||||
| export function emptyChange<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   options: string | ChangeOptions<T> | void |  | ||||||
| ) { |  | ||||||
|   if (options === undefined) { |  | ||||||
|     options = {} |  | ||||||
|   } |  | ||||||
|   if (typeof options === "string") { |  | ||||||
|     options = { message: options } |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   const state = _state(doc) |  | ||||||
| 
 |  | ||||||
|   if (state.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an outdated document.  Use Automerge.clone() if you wish to make a writable copy." |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   if (_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("Calls to Automerge.change cannot be nested") |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   const heads = state.handle.getHeads() |  | ||||||
|   state.handle.emptyChange(options.message, options.time) |  | ||||||
|   return progressDocument(doc, heads) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Load an automerge document from a compressed document produce by {@link save} |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value which is contained in the document. |  | ||||||
|  *                Note that no validation is done to make sure this type is in |  | ||||||
|  *                fact the type of the contained value so be a bit careful |  | ||||||
|  * @param data  - The compressed document |  | ||||||
|  * @param _opts - Either an actor ID or some {@link InitOptions}, if the actor |  | ||||||
|  *                ID is null a random actor ID will be created |  | ||||||
|  * |  | ||||||
|  * Note that `load` will throw an error if passed incomplete content (for |  | ||||||
|  * example if you are receiving content over the network and don't know if you |  | ||||||
|  * have the complete document yet). If you need to handle incomplete content use |  | ||||||
|  * {@link init} followed by {@link loadIncremental}. |  | ||||||
|  */ |  | ||||||
| export function load<T>( |  | ||||||
|   data: Uint8Array, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   const actor = opts.actor |  | ||||||
|   const patchCallback = opts.patchCallback |  | ||||||
|   const handle = ApiHandler.load(data, opts.enableTextV2 || false, actor) |  | ||||||
|   handle.enablePatches(true) |  | ||||||
|   handle.enableFreeze(!!opts.freeze) |  | ||||||
|   handle.registerDatatype("counter", (n: number) => new Counter(n)) |  | ||||||
|   const textV2 = opts.enableTextV2 || false |  | ||||||
|   if (textV2) { |  | ||||||
|     handle.registerDatatype("str", (n: string) => new RawString(n)) |  | ||||||
|   } else { |  | ||||||
|     handle.registerDatatype("text", (n: string) => new Text(n)) |  | ||||||
|   } |  | ||||||
|   const doc = handle.materialize("/", undefined, { |  | ||||||
|     handle, |  | ||||||
|     heads: undefined, |  | ||||||
|     patchCallback, |  | ||||||
|     textV2, |  | ||||||
|   }) as Doc<T> |  | ||||||
|   return doc |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Load changes produced by {@link saveIncremental}, or partial changes |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value which is contained in the document. |  | ||||||
|  *                Note that no validation is done to make sure this type is in |  | ||||||
|  *                fact the type of the contained value so be a bit careful |  | ||||||
|  * @param data  - The compressedchanges |  | ||||||
|  * @param opts  - an {@link ApplyOptions} |  | ||||||
|  * |  | ||||||
|  * This function is useful when staying up to date with a connected peer. |  | ||||||
|  * Perhaps the other end sent you a full compresed document which you loaded |  | ||||||
|  * with {@link load} and they're sending you the result of |  | ||||||
|  * {@link getLastLocalChange} every time they make a change. |  | ||||||
|  * |  | ||||||
|  * Note that this function will succesfully load the results of {@link save} as |  | ||||||
|  * well as {@link getLastLocalChange} or any other incremental change. |  | ||||||
|  */ |  | ||||||
| export function loadIncremental<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   data: Uint8Array, |  | ||||||
|   opts?: ApplyOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   if (!opts) { |  | ||||||
|     opts = {} |  | ||||||
|   } |  | ||||||
|   const state = _state(doc) |  | ||||||
|   if (state.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an out of date document - set at: " + _trace(doc) |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   if (_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("Calls to Automerge.change cannot be nested") |  | ||||||
|   } |  | ||||||
|   const heads = state.handle.getHeads() |  | ||||||
|   state.handle.loadIncremental(data) |  | ||||||
|   return progressDocument(doc, heads, opts.patchCallback || state.patchCallback) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Export the contents of a document to a compressed format |  | ||||||
|  * |  | ||||||
|  * @param doc - The doc to save |  | ||||||
|  * |  | ||||||
|  * The returned bytes can be passed to {@link load} or {@link loadIncremental} |  | ||||||
|  */ |  | ||||||
| export function save<T>(doc: Doc<T>): Uint8Array { |  | ||||||
|   return _state(doc).handle.save() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Merge `local` into `remote` |  | ||||||
|  * @typeParam T - The type of values contained in each document |  | ||||||
|  * @param local - The document to merge changes into |  | ||||||
|  * @param remote - The document to merge changes from |  | ||||||
|  * |  | ||||||
|  * @returns - The merged document |  | ||||||
|  * |  | ||||||
|  * Often when you are merging documents you will also need to clone them. Both |  | ||||||
|  * arguments to `merge` are frozen after the call so you can no longer call |  | ||||||
|  * mutating methods (such as {@link change}) on them. The symtom of this will be |  | ||||||
|  * an error which says "Attempting to change an out of date document". To |  | ||||||
|  * overcome this call {@link clone} on the argument before passing it to {@link |  | ||||||
|  * merge}. |  | ||||||
|  */ |  | ||||||
| export function merge<T>(local: Doc<T>, remote: Doc<T>): Doc<T> { |  | ||||||
|   const localState = _state(local) |  | ||||||
| 
 |  | ||||||
|   if (localState.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an out of date document - set at: " + _trace(local) |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   const heads = localState.handle.getHeads() |  | ||||||
|   const remoteState = _state(remote) |  | ||||||
|   const changes = localState.handle.getChangesAdded(remoteState.handle) |  | ||||||
|   localState.handle.applyChanges(changes) |  | ||||||
|   return progressDocument(local, heads, localState.patchCallback) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the actor ID associated with the document |  | ||||||
|  */ |  | ||||||
| export function getActorId<T>(doc: Doc<T>): ActorId { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   return state.handle.getActorId() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * The type of conflicts for particular key or index |  | ||||||
|  * |  | ||||||
|  * Maps and sequences in automerge can contain conflicting values for a |  | ||||||
|  * particular key or index. In this case {@link getConflicts} can be used to |  | ||||||
|  * obtain a `Conflicts` representing the multiple values present for the property |  | ||||||
|  * |  | ||||||
|  * A `Conflicts` is a map from a unique (per property or index) key to one of |  | ||||||
|  * the possible conflicting values for the given property. |  | ||||||
|  */ |  | ||||||
| type Conflicts = { [key: string]: AutomergeValue } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the conflicts associated with a property |  | ||||||
|  * |  | ||||||
|  * The values of properties in a map in automerge can be conflicted if there |  | ||||||
|  * are concurrent "put" operations to the same key. Automerge chooses one value |  | ||||||
|  * arbitrarily (but deterministically, any two nodes who have the same set of |  | ||||||
|  * changes will choose the same value) from the set of conflicting values to |  | ||||||
|  * present as the value of the key. |  | ||||||
|  * |  | ||||||
|  * Sometimes you may want to examine these conflicts, in this case you can use |  | ||||||
|  * {@link getConflicts} to get the conflicts for the key. |  | ||||||
|  * |  | ||||||
|  * @example |  | ||||||
|  * ``` |  | ||||||
|  * import * as automerge from "@automerge/automerge" |  | ||||||
|  * |  | ||||||
|  * type Profile = { |  | ||||||
|  *     pets: Array<{name: string, type: string}> |  | ||||||
|  * } |  | ||||||
|  * |  | ||||||
|  * let doc1 = automerge.init<Profile>("aaaa") |  | ||||||
|  * doc1 = automerge.change(doc1, d => { |  | ||||||
|  *     d.pets = [{name: "Lassie", type: "dog"}] |  | ||||||
|  * }) |  | ||||||
|  * let doc2 = automerge.init<Profile>("bbbb") |  | ||||||
|  * doc2 = automerge.merge(doc2, automerge.clone(doc1)) |  | ||||||
|  * |  | ||||||
|  * doc2 = automerge.change(doc2, d => { |  | ||||||
|  *     d.pets[0].name = "Beethoven" |  | ||||||
|  * }) |  | ||||||
|  * |  | ||||||
|  * doc1 = automerge.change(doc1, d => { |  | ||||||
|  *     d.pets[0].name = "Babe" |  | ||||||
|  * }) |  | ||||||
|  * |  | ||||||
|  * const doc3 = automerge.merge(doc1, doc2) |  | ||||||
|  * |  | ||||||
|  * // Note that here we pass `doc3.pets`, not `doc3`
 |  | ||||||
|  * let conflicts = automerge.getConflicts(doc3.pets[0], "name") |  | ||||||
|  * |  | ||||||
|  * // The two conflicting values are the keys of the conflicts object
 |  | ||||||
|  * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) |  | ||||||
|  * ``` |  | ||||||
|  */ |  | ||||||
| export function getConflicts<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   prop: Prop |  | ||||||
| ): Conflicts | undefined { |  | ||||||
|   const state = _state(doc, false) |  | ||||||
|   if (state.textV2) { |  | ||||||
|     throw new Error("use unstable.getConflicts for an unstable document") |  | ||||||
|   } |  | ||||||
|   const objectId = _obj(doc) |  | ||||||
|   if (objectId != null) { |  | ||||||
|     return stableConflictAt(state.handle, objectId, prop) |  | ||||||
|   } else { |  | ||||||
|     return undefined |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the binary representation of the last change which was made to this doc |  | ||||||
|  * |  | ||||||
|  * This is most useful when staying in sync with other peers, every time you |  | ||||||
|  * make a change locally via {@link change} you immediately call {@link |  | ||||||
|  * getLastLocalChange} and send the result over the network to other peers. |  | ||||||
|  */ |  | ||||||
| export function getLastLocalChange<T>(doc: Doc<T>): Change | undefined { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   return state.handle.getLastLocalChange() || undefined |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Return the object ID of an arbitrary javascript value |  | ||||||
|  * |  | ||||||
|  * This is useful to determine if something is actually an automerge document, |  | ||||||
|  * if `doc` is not an automerge document this will return null. |  | ||||||
|  */ |  | ||||||
| // eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
| export function getObjectId(doc: any, prop?: Prop): ObjID | null { |  | ||||||
|   if (prop) { |  | ||||||
|     const state = _state(doc, false) |  | ||||||
|     const objectId = _obj(doc) |  | ||||||
|     if (!state || !objectId) { |  | ||||||
|       return null |  | ||||||
|     } |  | ||||||
|     return state.handle.get(objectId, prop) as ObjID |  | ||||||
|   } else { |  | ||||||
|     return _obj(doc) |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the changes which are in `newState` but not in `oldState`. The returned |  | ||||||
|  * changes can be loaded in `oldState` via {@link applyChanges}. |  | ||||||
|  * |  | ||||||
|  * Note that this will crash if there are changes in `oldState` which are not in `newState`. |  | ||||||
|  */ |  | ||||||
| export function getChanges<T>(oldState: Doc<T>, newState: Doc<T>): Change[] { |  | ||||||
|   const n = _state(newState) |  | ||||||
|   return n.handle.getChanges(getHeads(oldState)) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get all the changes in a document |  | ||||||
|  * |  | ||||||
|  * This is different to {@link save} because the output is an array of changes |  | ||||||
|  * which can be individually applied via {@link applyChanges}` |  | ||||||
|  * |  | ||||||
|  */ |  | ||||||
| export function getAllChanges<T>(doc: Doc<T>): Change[] { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   return state.handle.getChanges([]) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Apply changes received from another document |  | ||||||
|  * |  | ||||||
|  * `doc` will be updated to reflect the `changes`. If there are changes which |  | ||||||
|  * we do not have dependencies for yet those will be stored in the document and |  | ||||||
|  * applied when the depended on changes arrive. |  | ||||||
|  * |  | ||||||
|  * You can use the {@link ApplyOptions} to pass a patchcallback which will be |  | ||||||
|  * informed of any changes which occur as a result of applying the changes |  | ||||||
|  * |  | ||||||
|  */ |  | ||||||
| export function applyChanges<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   changes: Change[], |  | ||||||
|   opts?: ApplyOptions<T> |  | ||||||
| ): [Doc<T>] { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   if (!opts) { |  | ||||||
|     opts = {} |  | ||||||
|   } |  | ||||||
|   if (state.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an outdated document.  Use Automerge.clone() if you wish to make a writable copy." |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   if (_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("Calls to Automerge.change cannot be nested") |  | ||||||
|   } |  | ||||||
|   const heads = state.handle.getHeads() |  | ||||||
|   state.handle.applyChanges(changes) |  | ||||||
|   state.heads = heads |  | ||||||
|   return [ |  | ||||||
|     progressDocument(doc, heads, opts.patchCallback || state.patchCallback), |  | ||||||
|   ] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function getHistory<T>(doc: Doc<T>): State<T>[] { |  | ||||||
|   const textV2 = _state(doc).textV2 |  | ||||||
|   const history = getAllChanges(doc) |  | ||||||
|   return history.map((change, index) => ({ |  | ||||||
|     get change() { |  | ||||||
|       return decodeChange(change) |  | ||||||
|     }, |  | ||||||
|     get snapshot() { |  | ||||||
|       const [state] = applyChanges( |  | ||||||
|         init({ enableTextV2: textV2 }), |  | ||||||
|         history.slice(0, index + 1) |  | ||||||
|       ) |  | ||||||
|       return <T>state |  | ||||||
|     }, |  | ||||||
|   })) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| // FIXME : no tests
 |  | ||||||
| // FIXME can we just use deep equals now?
 |  | ||||||
| export function equals(val1: unknown, val2: unknown): boolean { |  | ||||||
|   if (!isObject(val1) || !isObject(val2)) return val1 === val2 |  | ||||||
|   const keys1 = Object.keys(val1).sort(), |  | ||||||
|     keys2 = Object.keys(val2).sort() |  | ||||||
|   if (keys1.length !== keys2.length) return false |  | ||||||
|   for (let i = 0; i < keys1.length; i++) { |  | ||||||
|     if (keys1[i] !== keys2[i]) return false |  | ||||||
|     if (!equals(val1[keys1[i]], val2[keys2[i]])) return false |  | ||||||
|   } |  | ||||||
|   return true |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * encode a {@link SyncState} into binary to send over the network |  | ||||||
|  * |  | ||||||
|  * @group sync |  | ||||||
|  * */ |  | ||||||
| export function encodeSyncState(state: SyncState): Uint8Array { |  | ||||||
|   const sync = ApiHandler.importSyncState(state) |  | ||||||
|   const result = ApiHandler.encodeSyncState(sync) |  | ||||||
|   sync.free() |  | ||||||
|   return result |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Decode some binary data into a {@link SyncState} |  | ||||||
|  * |  | ||||||
|  * @group sync |  | ||||||
|  */ |  | ||||||
| export function decodeSyncState(state: Uint8Array): SyncState { |  | ||||||
|   const sync = ApiHandler.decodeSyncState(state) |  | ||||||
|   const result = ApiHandler.exportSyncState(sync) |  | ||||||
|   sync.free() |  | ||||||
|   return result as SyncState |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Generate a sync message to send to the peer represented by `inState` |  | ||||||
|  * @param doc - The doc to generate messages about |  | ||||||
|  * @param inState - The {@link SyncState} representing the peer we are talking to |  | ||||||
|  * |  | ||||||
|  * @group sync |  | ||||||
|  * |  | ||||||
|  * @returns An array of `[newSyncState, syncMessage | null]` where |  | ||||||
|  * `newSyncState` should replace `inState` and `syncMessage` should be sent to |  | ||||||
|  * the peer if it is not null. If `syncMessage` is null then we are up to date. |  | ||||||
|  */ |  | ||||||
| export function generateSyncMessage<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   inState: SyncState |  | ||||||
| ): [SyncState, SyncMessage | null] { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   const syncState = ApiHandler.importSyncState(inState) |  | ||||||
|   const message = state.handle.generateSyncMessage(syncState) |  | ||||||
|   const outState = ApiHandler.exportSyncState(syncState) as SyncState |  | ||||||
|   return [outState, message] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Update a document and our sync state on receiving a sync message |  | ||||||
|  * |  | ||||||
|  * @group sync |  | ||||||
|  * |  | ||||||
|  * @param doc     - The doc the sync message is about |  | ||||||
|  * @param inState - The {@link SyncState} for the peer we are communicating with |  | ||||||
|  * @param message - The message which was received |  | ||||||
|  * @param opts    - Any {@link ApplyOption}s, used for passing a |  | ||||||
|  *                  {@link PatchCallback} which will be informed of any changes |  | ||||||
|  *                  in `doc` which occur because of the received sync message. |  | ||||||
|  * |  | ||||||
|  * @returns An array of `[newDoc, newSyncState, syncMessage | null]` where |  | ||||||
|  * `newDoc` is the updated state of `doc`, `newSyncState` should replace |  | ||||||
|  * `inState` and `syncMessage` should be sent to the peer if it is not null. If |  | ||||||
|  * `syncMessage` is null then we are up to date. |  | ||||||
|  */ |  | ||||||
| export function receiveSyncMessage<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   inState: SyncState, |  | ||||||
|   message: SyncMessage, |  | ||||||
|   opts?: ApplyOptions<T> |  | ||||||
| ): [Doc<T>, SyncState, null] { |  | ||||||
|   const syncState = ApiHandler.importSyncState(inState) |  | ||||||
|   if (!opts) { |  | ||||||
|     opts = {} |  | ||||||
|   } |  | ||||||
|   const state = _state(doc) |  | ||||||
|   if (state.heads) { |  | ||||||
|     throw new RangeError( |  | ||||||
|       "Attempting to change an outdated document.  Use Automerge.clone() if you wish to make a writable copy." |  | ||||||
|     ) |  | ||||||
|   } |  | ||||||
|   if (_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("Calls to Automerge.change cannot be nested") |  | ||||||
|   } |  | ||||||
|   const heads = state.handle.getHeads() |  | ||||||
|   state.handle.receiveSyncMessage(syncState, message) |  | ||||||
|   const outSyncState = ApiHandler.exportSyncState(syncState) as SyncState |  | ||||||
|   return [ |  | ||||||
|     progressDocument(doc, heads, opts.patchCallback || state.patchCallback), |  | ||||||
|     outSyncState, |  | ||||||
|     null, |  | ||||||
|   ] |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Create a new, blank {@link SyncState} |  | ||||||
|  * |  | ||||||
|  * When communicating with a peer for the first time use this to generate a new |  | ||||||
|  * {@link SyncState} for them |  | ||||||
|  * |  | ||||||
|  * @group sync |  | ||||||
|  */ |  | ||||||
| export function initSyncState(): SyncState { |  | ||||||
|   return ApiHandler.exportSyncState(ApiHandler.initSyncState()) as SyncState |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function encodeChange(change: ChangeToEncode): Change { |  | ||||||
|   return ApiHandler.encodeChange(change) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function decodeChange(data: Change): DecodedChange { |  | ||||||
|   return ApiHandler.decodeChange(data) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function encodeSyncMessage(message: DecodedSyncMessage): SyncMessage { |  | ||||||
|   return ApiHandler.encodeSyncMessage(message) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function decodeSyncMessage(message: SyncMessage): DecodedSyncMessage { |  | ||||||
|   return ApiHandler.decodeSyncMessage(message) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get any changes in `doc` which are not dependencies of `heads` |  | ||||||
|  */ |  | ||||||
| export function getMissingDeps<T>(doc: Doc<T>, heads: Heads): Heads { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   return state.handle.getMissingDeps(heads) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the hashes of the heads of this document |  | ||||||
|  */ |  | ||||||
| export function getHeads<T>(doc: Doc<T>): Heads { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   return state.heads || state.handle.getHeads() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function dump<T>(doc: Doc<T>) { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   state.handle.dump() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export function toJS<T>(doc: Doc<T>): T { |  | ||||||
|   const state = _state(doc) |  | ||||||
|   const enabled = state.handle.enableFreeze(false) |  | ||||||
|   const result = state.handle.materialize() |  | ||||||
|   state.handle.enableFreeze(enabled) |  | ||||||
|   return result as T |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function isAutomerge(doc: unknown): boolean { |  | ||||||
|   if (typeof doc == "object" && doc !== null) { |  | ||||||
|     return getObjectId(doc) === "_root" && !!Reflect.get(doc, STATE) |  | ||||||
|   } else { |  | ||||||
|     return false |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function isObject(obj: unknown): obj is Record<string, unknown> { |  | ||||||
|   return typeof obj === "object" && obj !== null |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export type { |  | ||||||
|   API, |  | ||||||
|   SyncState, |  | ||||||
|   ActorId, |  | ||||||
|   Conflicts, |  | ||||||
|   Prop, |  | ||||||
|   Change, |  | ||||||
|   ObjID, |  | ||||||
|   DecodedChange, |  | ||||||
|   DecodedSyncMessage, |  | ||||||
|   Heads, |  | ||||||
|   MaterializeValue, |  | ||||||
| } |  | ||||||
|  | @ -1,224 +0,0 @@ | ||||||
| import type { Value } from "@automerge/automerge-wasm" |  | ||||||
| import { TEXT, STATE } from "./constants" |  | ||||||
| import type { InternalState } from "./internal_state" |  | ||||||
| 
 |  | ||||||
| export class Text { |  | ||||||
|   //eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
|   elems: Array<any> |  | ||||||
|   str: string | undefined |  | ||||||
|   //eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
|   spans: Array<any> | undefined; |  | ||||||
|   //eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
|   [STATE]?: InternalState<any> |  | ||||||
| 
 |  | ||||||
|   constructor(text?: string | string[] | Value[]) { |  | ||||||
|     if (typeof text === "string") { |  | ||||||
|       this.elems = [...text] |  | ||||||
|     } else if (Array.isArray(text)) { |  | ||||||
|       this.elems = text |  | ||||||
|     } else if (text === undefined) { |  | ||||||
|       this.elems = [] |  | ||||||
|     } else { |  | ||||||
|       throw new TypeError(`Unsupported initial value for Text: ${text}`) |  | ||||||
|     } |  | ||||||
|     Reflect.defineProperty(this, TEXT, { value: true }) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   get length(): number { |  | ||||||
|     return this.elems.length |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   //eslint-disable-next-line @typescript-eslint/no-explicit-any
 |  | ||||||
|   get(index: number): any { |  | ||||||
|     return this.elems[index] |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Iterates over the text elements character by character, including any |  | ||||||
|    * inline objects. |  | ||||||
|    */ |  | ||||||
|   [Symbol.iterator]() { |  | ||||||
|     const elems = this.elems |  | ||||||
|     let index = -1 |  | ||||||
|     return { |  | ||||||
|       next() { |  | ||||||
|         index += 1 |  | ||||||
|         if (index < elems.length) { |  | ||||||
|           return { done: false, value: elems[index] } |  | ||||||
|         } else { |  | ||||||
|           return { done: true } |  | ||||||
|         } |  | ||||||
|       }, |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Returns the content of the Text object as a simple string, ignoring any |  | ||||||
|    * non-character elements. |  | ||||||
|    */ |  | ||||||
|   toString(): string { |  | ||||||
|     if (!this.str) { |  | ||||||
|       // Concatting to a string is faster than creating an array and then
 |  | ||||||
|       // .join()ing for small (<100KB) arrays.
 |  | ||||||
|       // https://jsperf.com/join-vs-loop-w-type-test
 |  | ||||||
|       this.str = "" |  | ||||||
|       for (const elem of this.elems) { |  | ||||||
|         if (typeof elem === "string") this.str += elem |  | ||||||
|         else this.str += "\uFFFC" |  | ||||||
|       } |  | ||||||
|     } |  | ||||||
|     return this.str |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Returns the content of the Text object as a sequence of strings, |  | ||||||
|    * interleaved with non-character elements. |  | ||||||
|    * |  | ||||||
|    * For example, the value `['a', 'b', {x: 3}, 'c', 'd']` has spans: |  | ||||||
|    * `=> ['ab', {x: 3}, 'cd']` |  | ||||||
|    */ |  | ||||||
|   toSpans(): Array<Value | object> { |  | ||||||
|     if (!this.spans) { |  | ||||||
|       this.spans = [] |  | ||||||
|       let chars = "" |  | ||||||
|       for (const elem of this.elems) { |  | ||||||
|         if (typeof elem === "string") { |  | ||||||
|           chars += elem |  | ||||||
|         } else { |  | ||||||
|           if (chars.length > 0) { |  | ||||||
|             this.spans.push(chars) |  | ||||||
|             chars = "" |  | ||||||
|           } |  | ||||||
|           this.spans.push(elem) |  | ||||||
|         } |  | ||||||
|       } |  | ||||||
|       if (chars.length > 0) { |  | ||||||
|         this.spans.push(chars) |  | ||||||
|       } |  | ||||||
|     } |  | ||||||
|     return this.spans |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Returns the content of the Text object as a simple string, so that the |  | ||||||
|    * JSON serialization of an Automerge document represents text nicely. |  | ||||||
|    */ |  | ||||||
|   toJSON(): string { |  | ||||||
|     return this.toString() |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Updates the list item at position `index` to a new value `value`. |  | ||||||
|    */ |  | ||||||
|   set(index: number, value: Value) { |  | ||||||
|     if (this[STATE]) { |  | ||||||
|       throw new RangeError( |  | ||||||
|         "object cannot be modified outside of a change block" |  | ||||||
|       ) |  | ||||||
|     } |  | ||||||
|     this.elems[index] = value |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Inserts new list items `values` starting at position `index`. |  | ||||||
|    */ |  | ||||||
|   insertAt(index: number, ...values: Array<Value | object>) { |  | ||||||
|     if (this[STATE]) { |  | ||||||
|       throw new RangeError( |  | ||||||
|         "object cannot be modified outside of a change block" |  | ||||||
|       ) |  | ||||||
|     } |  | ||||||
|     this.elems.splice(index, 0, ...values) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   /** |  | ||||||
|    * Deletes `numDelete` list items starting at position `index`. |  | ||||||
|    * if `numDelete` is not given, one item is deleted. |  | ||||||
|    */ |  | ||||||
|   deleteAt(index: number, numDelete = 1) { |  | ||||||
|     if (this[STATE]) { |  | ||||||
|       throw new RangeError( |  | ||||||
|         "object cannot be modified outside of a change block" |  | ||||||
|       ) |  | ||||||
|     } |  | ||||||
|     this.elems.splice(index, numDelete) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   map<T>(callback: (e: Value | object) => T) { |  | ||||||
|     this.elems.map(callback) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   lastIndexOf(searchElement: Value, fromIndex?: number) { |  | ||||||
|     this.elems.lastIndexOf(searchElement, fromIndex) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   concat(other: Text): Text { |  | ||||||
|     return new Text(this.elems.concat(other.elems)) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   every(test: (v: Value) => boolean): boolean { |  | ||||||
|     return this.elems.every(test) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   filter(test: (v: Value) => boolean): Text { |  | ||||||
|     return new Text(this.elems.filter(test)) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   find(test: (v: Value) => boolean): Value | undefined { |  | ||||||
|     return this.elems.find(test) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   findIndex(test: (v: Value) => boolean): number | undefined { |  | ||||||
|     return this.elems.findIndex(test) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   forEach(f: (v: Value) => undefined) { |  | ||||||
|     this.elems.forEach(f) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   includes(elem: Value): boolean { |  | ||||||
|     return this.elems.includes(elem) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   indexOf(elem: Value) { |  | ||||||
|     return this.elems.indexOf(elem) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   join(sep?: string): string { |  | ||||||
|     return this.elems.join(sep) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   reduce( |  | ||||||
|     f: ( |  | ||||||
|       previousValue: Value, |  | ||||||
|       currentValue: Value, |  | ||||||
|       currentIndex: number, |  | ||||||
|       array: Value[] |  | ||||||
|     ) => Value |  | ||||||
|   ) { |  | ||||||
|     this.elems.reduce(f) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   reduceRight( |  | ||||||
|     f: ( |  | ||||||
|       previousValue: Value, |  | ||||||
|       currentValue: Value, |  | ||||||
|       currentIndex: number, |  | ||||||
|       array: Value[] |  | ||||||
|     ) => Value |  | ||||||
|   ) { |  | ||||||
|     this.elems.reduceRight(f) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   slice(start?: number, end?: number) { |  | ||||||
|     new Text(this.elems.slice(start, end)) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   some(test: (arg: Value) => boolean): boolean { |  | ||||||
|     return this.elems.some(test) |  | ||||||
|   } |  | ||||||
| 
 |  | ||||||
|   toLocaleString() { |  | ||||||
|     this.toString() |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  | @ -1,46 +1,10 @@ | ||||||
| export { Text } from "./text" | 
 | ||||||
| import { Text } from "./text" |  | ||||||
| export { Counter  } from "./counter" | export { Counter  } from "./counter" | ||||||
| export { Int, Uint, Float64  } from "./numbers" | export { Int, Uint, Float64  } from "./numbers" | ||||||
| 
 | 
 | ||||||
| import { Counter } from "./counter" | import { Counter } from "./counter" | ||||||
| import type { Patch } from "@automerge/automerge-wasm" |  | ||||||
| export type { Patch } from "@automerge/automerge-wasm" |  | ||||||
| 
 | 
 | ||||||
| export type AutomergeValue = | export type AutomergeValue = ScalarValue | { [key: string]: AutomergeValue } | Array<AutomergeValue> | ||||||
|   | ScalarValue |  | ||||||
|   | { [key: string]: AutomergeValue } |  | ||||||
|   | Array<AutomergeValue> |  | ||||||
|   | Text |  | ||||||
| export type MapValue =  { [key: string]: AutomergeValue } | export type MapValue =  { [key: string]: AutomergeValue } | ||||||
| export type ListValue = Array<AutomergeValue>  | export type ListValue = Array<AutomergeValue>  | ||||||
| export type ScalarValue = | export type ScalarValue = string | number | null | boolean | Date | Counter | Uint8Array | ||||||
|   | string |  | ||||||
|   | number |  | ||||||
|   | null |  | ||||||
|   | boolean |  | ||||||
|   | Date |  | ||||||
|   | Counter |  | ||||||
|   | Uint8Array |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * An automerge document. |  | ||||||
|  * @typeParam T - The type of the value contained in this document |  | ||||||
|  * |  | ||||||
|  * Note that this provides read only access to the fields of the value. To |  | ||||||
|  * modify the value use {@link change} |  | ||||||
|  */ |  | ||||||
| export type Doc<T> = { readonly [P in keyof T]: T[P] } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Callback which is called by various methods in this library to notify the |  | ||||||
|  * user of what changes have been made. |  | ||||||
|  * @param patch - A description of the changes made |  | ||||||
|  * @param before - The document before the change was made |  | ||||||
|  * @param after - The document after the change was made |  | ||||||
|  */ |  | ||||||
| export type PatchCallback<T> = ( |  | ||||||
|   patches: Array<Patch>, |  | ||||||
|   before: Doc<T>, |  | ||||||
|   after: Doc<T> |  | ||||||
| ) => void |  | ||||||
|  |  | ||||||
|  | @ -1,294 +0,0 @@ | ||||||
| /** |  | ||||||
|  * # The unstable API |  | ||||||
|  * |  | ||||||
|  * This module contains new features we are working on which are either not yet |  | ||||||
|  * ready for a stable release and/or which will result in backwards incompatible |  | ||||||
|  * API changes. The API of this module may change in arbitrary ways between |  | ||||||
|  * point releases - we will always document what these changes are in the |  | ||||||
|  * [CHANGELOG](#changelog) below, but only depend on this module if you are prepared to deal |  | ||||||
|  * with frequent changes. |  | ||||||
|  * |  | ||||||
|  * ## Differences from stable |  | ||||||
|  * |  | ||||||
|  * In the stable API text objects are represented using the {@link Text} class. |  | ||||||
|  * This means you must decide up front whether your string data might need |  | ||||||
|  * concurrent merges in the future and if you change your mind you have to |  | ||||||
|  * figure out how to migrate your data. In the unstable API the `Text` class is |  | ||||||
|  * gone and all `string`s are represented using the text CRDT, allowing for |  | ||||||
|  * concurrent changes. Modifying a string is done using the {@link splice} |  | ||||||
|  * function. You can still access the old behaviour of strings which do not |  | ||||||
|  * support merging behaviour via the {@link RawString} class. |  | ||||||
|  * |  | ||||||
|  * This leads to the following differences from `stable`: |  | ||||||
|  * |  | ||||||
|  * * There is no `unstable.Text` class, all strings are text objects |  | ||||||
|  * * Reading strings in an `unstable` document is the same as reading any other |  | ||||||
|  *   javascript string |  | ||||||
|  * * To modify strings in an `unstable` document use {@link splice} |  | ||||||
|  * * The {@link AutomergeValue} type does not include the {@link Text} |  | ||||||
|  *   class but the  {@link RawString} class is included in the {@link ScalarValue} |  | ||||||
|  *   type |  | ||||||
|  * |  | ||||||
|  * ## CHANGELOG |  | ||||||
|  * * Introduce this module to expose the new API which has no `Text` class |  | ||||||
|  * |  | ||||||
|  * |  | ||||||
|  * @module |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| export { |  | ||||||
|   Counter, |  | ||||||
|   type Doc, |  | ||||||
|   Int, |  | ||||||
|   Uint, |  | ||||||
|   Float64, |  | ||||||
|   type Patch, |  | ||||||
|   type PatchCallback, |  | ||||||
|   type AutomergeValue, |  | ||||||
|   type ScalarValue, |  | ||||||
| } from "./unstable_types" |  | ||||||
| 
 |  | ||||||
| import type { PatchCallback } from "./stable" |  | ||||||
| 
 |  | ||||||
| import { type UnstableConflicts as Conflicts } from "./conflicts" |  | ||||||
| import { unstableConflictAt } from "./conflicts" |  | ||||||
| 
 |  | ||||||
| export type { |  | ||||||
|   PutPatch, |  | ||||||
|   DelPatch, |  | ||||||
|   SpliceTextPatch, |  | ||||||
|   InsertPatch, |  | ||||||
|   IncPatch, |  | ||||||
|   SyncMessage, |  | ||||||
| } from "@automerge/automerge-wasm" |  | ||||||
| 
 |  | ||||||
| export type { ChangeOptions, ApplyOptions, ChangeFn } from "./stable" |  | ||||||
| export { |  | ||||||
|   view, |  | ||||||
|   free, |  | ||||||
|   getHeads, |  | ||||||
|   change, |  | ||||||
|   emptyChange, |  | ||||||
|   loadIncremental, |  | ||||||
|   save, |  | ||||||
|   merge, |  | ||||||
|   getActorId, |  | ||||||
|   getLastLocalChange, |  | ||||||
|   getChanges, |  | ||||||
|   getAllChanges, |  | ||||||
|   applyChanges, |  | ||||||
|   getHistory, |  | ||||||
|   equals, |  | ||||||
|   encodeSyncState, |  | ||||||
|   decodeSyncState, |  | ||||||
|   generateSyncMessage, |  | ||||||
|   receiveSyncMessage, |  | ||||||
|   initSyncState, |  | ||||||
|   encodeChange, |  | ||||||
|   decodeChange, |  | ||||||
|   encodeSyncMessage, |  | ||||||
|   decodeSyncMessage, |  | ||||||
|   getMissingDeps, |  | ||||||
|   dump, |  | ||||||
|   toJS, |  | ||||||
|   isAutomerge, |  | ||||||
|   getObjectId, |  | ||||||
| } from "./stable" |  | ||||||
| 
 |  | ||||||
| export type InitOptions<T> = { |  | ||||||
|   /** The actor ID to use for this document, a random one will be generated if `null` is passed */ |  | ||||||
|   actor?: ActorId |  | ||||||
|   freeze?: boolean |  | ||||||
|   /** A callback which will be called with the initial patch once the document has finished loading */ |  | ||||||
|   patchCallback?: PatchCallback<T> |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| import { ActorId, Doc } from "./stable" |  | ||||||
| import * as stable from "./stable" |  | ||||||
| export { RawString } from "./raw_string" |  | ||||||
| 
 |  | ||||||
| /** @hidden */ |  | ||||||
| export const getBackend = stable.getBackend |  | ||||||
| 
 |  | ||||||
| import { _is_proxy, _state, _obj } from "./internal_state" |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Create a new automerge document |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of value contained in the document. This will be the |  | ||||||
|  *     type that is passed to the change closure in {@link change} |  | ||||||
|  * @param _opts - Either an actorId or an {@link InitOptions} (which may |  | ||||||
|  *     contain an actorId). If this is null the document will be initialised with a |  | ||||||
|  *     random actor ID |  | ||||||
|  */ |  | ||||||
| export function init<T>(_opts?: ActorId | InitOptions<T>): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   opts.enableTextV2 = true |  | ||||||
|   return stable.init(opts) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Make a full writable copy of an automerge document |  | ||||||
|  * |  | ||||||
|  * @remarks |  | ||||||
|  * Unlike {@link view} this function makes a full copy of the memory backing |  | ||||||
|  * the document and can thus be passed to {@link change}. It also generates a |  | ||||||
|  * new actor ID so that changes made in the new document do not create duplicate |  | ||||||
|  * sequence numbers with respect to the old document. If you need control over |  | ||||||
|  * the actor ID which is generated you can pass the actor ID as the second |  | ||||||
|  * argument |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value contained in the document |  | ||||||
|  * @param doc - The document to clone |  | ||||||
|  * @param _opts - Either an actor ID to use for the new doc or an {@link InitOptions} |  | ||||||
|  */ |  | ||||||
| export function clone<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   opts.enableTextV2 = true |  | ||||||
|   return stable.clone(doc, opts) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Create an automerge document from a POJO |  | ||||||
|  * |  | ||||||
|  * @param initialState - The initial state which will be copied into the document |  | ||||||
|  * @typeParam T - The type of the value passed to `from` _and_ the type the resulting document will contain |  | ||||||
|  * @typeParam actor - The actor ID of the resulting document, if this is null a random actor ID will be used |  | ||||||
|  * |  | ||||||
|  * @example |  | ||||||
|  * ``` |  | ||||||
|  * const doc = automerge.from({ |  | ||||||
|  *     tasks: [ |  | ||||||
|  *         {description: "feed dogs", done: false} |  | ||||||
|  *     ] |  | ||||||
|  * }) |  | ||||||
|  * ``` |  | ||||||
|  */ |  | ||||||
| export function from<T extends Record<string, unknown>>( |  | ||||||
|   initialState: T | Doc<T>, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   opts.enableTextV2 = true |  | ||||||
|   return stable.from(initialState, opts) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Load an automerge document from a compressed document produce by {@link save} |  | ||||||
|  * |  | ||||||
|  * @typeParam T - The type of the value which is contained in the document. |  | ||||||
|  *                Note that no validation is done to make sure this type is in |  | ||||||
|  *                fact the type of the contained value so be a bit careful |  | ||||||
|  * @param data  - The compressed document |  | ||||||
|  * @param _opts - Either an actor ID or some {@link InitOptions}, if the actor |  | ||||||
|  *                ID is null a random actor ID will be created |  | ||||||
|  * |  | ||||||
|  * Note that `load` will throw an error if passed incomplete content (for |  | ||||||
|  * example if you are receiving content over the network and don't know if you |  | ||||||
|  * have the complete document yet). If you need to handle incomplete content use |  | ||||||
|  * {@link init} followed by {@link loadIncremental}. |  | ||||||
|  */ |  | ||||||
| export function load<T>( |  | ||||||
|   data: Uint8Array, |  | ||||||
|   _opts?: ActorId | InitOptions<T> |  | ||||||
| ): Doc<T> { |  | ||||||
|   const opts = importOpts(_opts) |  | ||||||
|   opts.enableTextV2 = true |  | ||||||
|   return stable.load(data, opts) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| function importOpts<T>( |  | ||||||
|   _actor?: ActorId | InitOptions<T> |  | ||||||
| ): stable.InitOptions<T> { |  | ||||||
|   if (typeof _actor === "object") { |  | ||||||
|     return _actor |  | ||||||
|   } else { |  | ||||||
|     return { actor: _actor } |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export function splice<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   prop: stable.Prop, |  | ||||||
|   index: number, |  | ||||||
|   del: number, |  | ||||||
|   newText?: string |  | ||||||
| ) { |  | ||||||
|   if (!_is_proxy(doc)) { |  | ||||||
|     throw new RangeError("object cannot be modified outside of a change block") |  | ||||||
|   } |  | ||||||
|   const state = _state(doc, false) |  | ||||||
|   const objectId = _obj(doc) |  | ||||||
|   if (!objectId) { |  | ||||||
|     throw new RangeError("invalid object for splice") |  | ||||||
|   } |  | ||||||
|   const value = `${objectId}/${prop}` |  | ||||||
|   try { |  | ||||||
|     return state.handle.splice(value, index, del, newText) |  | ||||||
|   } catch (e) { |  | ||||||
|     throw new RangeError(`Cannot splice: ${e}`) |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * Get the conflicts associated with a property |  | ||||||
|  * |  | ||||||
|  * The values of properties in a map in automerge can be conflicted if there |  | ||||||
|  * are concurrent "put" operations to the same key. Automerge chooses one value |  | ||||||
|  * arbitrarily (but deterministically, any two nodes who have the same set of |  | ||||||
|  * changes will choose the same value) from the set of conflicting values to |  | ||||||
|  * present as the value of the key. |  | ||||||
|  * |  | ||||||
|  * Sometimes you may want to examine these conflicts, in this case you can use |  | ||||||
|  * {@link getConflicts} to get the conflicts for the key. |  | ||||||
|  * |  | ||||||
|  * @example |  | ||||||
|  * ``` |  | ||||||
|  * import * as automerge from "@automerge/automerge" |  | ||||||
|  * |  | ||||||
|  * type Profile = { |  | ||||||
|  *     pets: Array<{name: string, type: string}> |  | ||||||
|  * } |  | ||||||
|  * |  | ||||||
|  * let doc1 = automerge.init<Profile>("aaaa") |  | ||||||
|  * doc1 = automerge.change(doc1, d => { |  | ||||||
|  *     d.pets = [{name: "Lassie", type: "dog"}] |  | ||||||
|  * }) |  | ||||||
|  * let doc2 = automerge.init<Profile>("bbbb") |  | ||||||
|  * doc2 = automerge.merge(doc2, automerge.clone(doc1)) |  | ||||||
|  * |  | ||||||
|  * doc2 = automerge.change(doc2, d => { |  | ||||||
|  *     d.pets[0].name = "Beethoven" |  | ||||||
|  * }) |  | ||||||
|  * |  | ||||||
|  * doc1 = automerge.change(doc1, d => { |  | ||||||
|  *     d.pets[0].name = "Babe" |  | ||||||
|  * }) |  | ||||||
|  * |  | ||||||
|  * const doc3 = automerge.merge(doc1, doc2) |  | ||||||
|  * |  | ||||||
|  * // Note that here we pass `doc3.pets`, not `doc3`
 |  | ||||||
|  * let conflicts = automerge.getConflicts(doc3.pets[0], "name") |  | ||||||
|  * |  | ||||||
|  * // The two conflicting values are the keys of the conflicts object
 |  | ||||||
|  * assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) |  | ||||||
|  * ``` |  | ||||||
|  */ |  | ||||||
| export function getConflicts<T>( |  | ||||||
|   doc: Doc<T>, |  | ||||||
|   prop: stable.Prop |  | ||||||
| ): Conflicts | undefined { |  | ||||||
|   const state = _state(doc, false) |  | ||||||
|   if (!state.textV2) { |  | ||||||
|     throw new Error("use getConflicts for a stable document") |  | ||||||
|   } |  | ||||||
|   const objectId = _obj(doc) |  | ||||||
|   if (objectId != null) { |  | ||||||
|     return unstableConflictAt(state.handle, objectId, prop) |  | ||||||
|   } else { |  | ||||||
|     return undefined |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  | @ -1,30 +0,0 @@ | ||||||
| import { Counter } from "./types" |  | ||||||
| 
 |  | ||||||
| export { |  | ||||||
|   Counter, |  | ||||||
|   type Doc, |  | ||||||
|   Int, |  | ||||||
|   Uint, |  | ||||||
|   Float64, |  | ||||||
|   type Patch, |  | ||||||
|   type PatchCallback, |  | ||||||
| } from "./types" |  | ||||||
| 
 |  | ||||||
| import { RawString } from "./raw_string" |  | ||||||
| export { RawString } from "./raw_string" |  | ||||||
| 
 |  | ||||||
| export type AutomergeValue = |  | ||||||
|   | ScalarValue |  | ||||||
|   | { [key: string]: AutomergeValue } |  | ||||||
|   | Array<AutomergeValue> |  | ||||||
| export type MapValue = { [key: string]: AutomergeValue } |  | ||||||
| export type ListValue = Array<AutomergeValue> |  | ||||||
| export type ScalarValue = |  | ||||||
|   | string |  | ||||||
|   | number |  | ||||||
|   | null |  | ||||||
|   | boolean |  | ||||||
|   | Date |  | ||||||
|   | Counter |  | ||||||
|   | Uint8Array |  | ||||||
|   | RawString |  | ||||||
|  | @ -1,26 +0,0 @@ | ||||||
| import * as v4 from "https://deno.land/x/uuid@v0.1.2/mod.ts" |  | ||||||
| 
 |  | ||||||
| // this file is a deno only port of the uuid module
 |  | ||||||
| 
 |  | ||||||
| function defaultFactory() { |  | ||||||
|   return v4.uuid().replace(/-/g, "") |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| let factory = defaultFactory |  | ||||||
| 
 |  | ||||||
| interface UUIDFactory extends Function { |  | ||||||
|   setFactory(f: typeof factory): void |  | ||||||
|   reset(): void |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| export const uuid: UUIDFactory = () => { |  | ||||||
|   return factory() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| uuid.setFactory = newFactory => { |  | ||||||
|   factory = newFactory |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| uuid.reset = () => { |  | ||||||
|   factory = defaultFactory |  | ||||||
| } |  | ||||||
|  | @ -1,24 +1,21 @@ | ||||||
| import { v4 } from "uuid" | import { v4 } from 'uuid' | ||||||
| 
 | 
 | ||||||
| function defaultFactory() { | function defaultFactory() { | ||||||
|   return v4().replace(/-/g, "") |   return v4().replace(/-/g, '') | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| let factory = defaultFactory | let factory = defaultFactory | ||||||
| 
 | 
 | ||||||
| interface UUIDFactory extends Function { | interface UUIDFactory extends Function { | ||||||
|   setFactory(f: typeof factory): void |   setFactory(f: typeof factory): void; | ||||||
|   reset(): void |   reset(): void; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| export const uuid : UUIDFactory = () => { | export const uuid : UUIDFactory = () => { | ||||||
|   return factory() |   return factory() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| uuid.setFactory = newFactory => { | uuid.setFactory = newFactory => { factory = newFactory } | ||||||
|   factory = newFactory | 
 | ||||||
| } | uuid.reset = () => { factory = defaultFactory } | ||||||
| 
 | 
 | ||||||
| uuid.reset = () => { |  | ||||||
|   factory = defaultFactory |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -1,23 +1,24 @@ | ||||||
| import * as assert from "assert" | import * as assert from 'assert' | ||||||
| import { unstable as Automerge } from "../src" | import {Counter} from 'automerge' | ||||||
|  | import * as Automerge from '../src' | ||||||
| import * as WASM from "@automerge/automerge-wasm" | import * as WASM from "@automerge/automerge-wasm" | ||||||
| 
 | 
 | ||||||
| describe("Automerge", () => { | describe('Automerge', () => { | ||||||
|   describe("basics", () => { |     describe('basics', () => { | ||||||
|     it("should init clone and free", () => { |         it('should init clone and free', () => { | ||||||
|             let doc1 = Automerge.init() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.clone(doc1) |             let doc2 = Automerge.clone(doc1); | ||||||
| 
 | 
 | ||||||
|             // this is only needed if weakrefs are not supported
 |             // this is only needed if weakrefs are not supported
 | ||||||
|             Automerge.free(doc1) |             Automerge.free(doc1) | ||||||
|             Automerge.free(doc2) |             Automerge.free(doc2) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("should be able to make a view with specifc heads", () => { |         it('should be able to make a view with specifc heads', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => (d.value = 1)) |             let doc2 = Automerge.change(doc1, (d) => d.value = 1) | ||||||
|             let heads2 = Automerge.getHeads(doc2) |             let heads2 = Automerge.getHeads(doc2) | ||||||
|       let doc3 = Automerge.change(doc2, d => (d.value = 2)) |             let doc3 = Automerge.change(doc2, (d) => d.value = 2) | ||||||
|             let doc2_v2 = Automerge.view(doc3, heads2) |             let doc2_v2 = Automerge.view(doc3, heads2) | ||||||
|             assert.deepEqual(doc2, doc2_v2) |             assert.deepEqual(doc2, doc2_v2) | ||||||
|             let doc2_v2_clone = Automerge.clone(doc2, "aabbcc") |             let doc2_v2_clone = Automerge.clone(doc2, "aabbcc") | ||||||
|  | @ -27,54 +28,28 @@ describe("Automerge", () => { | ||||||
| 
 | 
 | ||||||
|         it("should allow you to change a clone of a view", () => { |         it("should allow you to change a clone of a view", () => { | ||||||
|             let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init<any>() | ||||||
|       doc1 = Automerge.change(doc1, d => (d.key = "value")) |             doc1 = Automerge.change(doc1, d => d.key = "value") | ||||||
|             let heads = Automerge.getHeads(doc1) |             let heads = Automerge.getHeads(doc1) | ||||||
|       doc1 = Automerge.change(doc1, d => (d.key = "value2")) |             doc1 = Automerge.change(doc1, d => d.key = "value2") | ||||||
|             let fork = Automerge.clone(Automerge.view(doc1, heads)) |             let fork = Automerge.clone(Automerge.view(doc1, heads)) | ||||||
|             assert.deepEqual(fork, {key: "value"}) |             assert.deepEqual(fork, {key: "value"}) | ||||||
|       fork = Automerge.change(fork, d => (d.key = "value3")) |             fork = Automerge.change(fork, d => d.key = "value3") | ||||||
|             assert.deepEqual(fork, {key: "value3"}) |             assert.deepEqual(fork, {key: "value3"}) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle basic set and read on root object", () => { |         it('handle basic set and read on root object', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.hello = "world" |               d.hello = "world" | ||||||
|               d.big = "little" |               d.big = "little" | ||||||
|               d.zip = "zop" |               d.zip = "zop" | ||||||
|               d.app = "dap" |               d.app = "dap" | ||||||
|         assert.deepEqual(d, { |               assert.deepEqual(d, {  hello: "world", big: "little", zip: "zop", app: "dap" }) | ||||||
|           hello: "world", |  | ||||||
|           big: "little", |  | ||||||
|           zip: "zop", |  | ||||||
|           app: "dap", |  | ||||||
|         }) |  | ||||||
|       }) |  | ||||||
|       assert.deepEqual(doc2, { |  | ||||||
|         hello: "world", |  | ||||||
|         big: "little", |  | ||||||
|         zip: "zop", |  | ||||||
|         app: "dap", |  | ||||||
|             }) |             }) | ||||||
|  |             assert.deepEqual(doc2, {  hello: "world", big: "little", zip: "zop", app: "dap" }) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("should be able to insert and delete a large number of properties", () => { |         it('can detect an automerge doc with isAutomerge()', () => { | ||||||
|       let doc = Automerge.init<any>() |  | ||||||
| 
 |  | ||||||
|       doc = Automerge.change(doc, doc => { |  | ||||||
|         doc["k1"] = true |  | ||||||
|       }) |  | ||||||
| 
 |  | ||||||
|       for (let idx = 1; idx <= 200; idx++) { |  | ||||||
|         doc = Automerge.change(doc, doc => { |  | ||||||
|           delete doc["k" + idx] |  | ||||||
|           doc["k" + (idx + 1)] = true |  | ||||||
|           assert(Object.keys(doc).length == 1) |  | ||||||
|         }) |  | ||||||
|       } |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("can detect an automerge doc with isAutomerge()", () => { |  | ||||||
|             const doc1 = Automerge.from({ sub: { object: true } }) |             const doc1 = Automerge.from({ sub: { object: true } }) | ||||||
|             assert(Automerge.isAutomerge(doc1)) |             assert(Automerge.isAutomerge(doc1)) | ||||||
|             assert(!Automerge.isAutomerge(doc1.sub)) |             assert(!Automerge.isAutomerge(doc1.sub)) | ||||||
|  | @ -86,18 +61,15 @@ describe("Automerge", () => { | ||||||
|             assert.deepEqual(jsObj, doc1) |             assert.deepEqual(jsObj, doc1) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("it should recursively freeze the document if requested", () => { |         it('it should recursively freeze the document if requested', () => { | ||||||
|       let doc1 = Automerge.init<any>({ freeze: true }) |             let doc1 = Automerge.init({ freeze: true } ) | ||||||
|       let doc2 = Automerge.init<any>() |             let doc2 = Automerge.init() | ||||||
| 
 | 
 | ||||||
|             assert(Object.isFrozen(doc1)) |             assert(Object.isFrozen(doc1)) | ||||||
|             assert(!Object.isFrozen(doc2)) |             assert(!Object.isFrozen(doc2)) | ||||||
| 
 | 
 | ||||||
|             // will also freeze sub objects
 |             // will also freeze sub objects
 | ||||||
|       doc1 = Automerge.change( |             doc1 = Automerge.change(doc1, (doc) => doc.book = { title: "how to win friends" }) | ||||||
|         doc1, |  | ||||||
|         doc => (doc.book = { title: "how to win friends" }) |  | ||||||
|       ) |  | ||||||
|             doc2 = Automerge.merge(doc2,doc1) |             doc2 = Automerge.merge(doc2,doc1) | ||||||
|             assert(Object.isFrozen(doc1)) |             assert(Object.isFrozen(doc1)) | ||||||
|             assert(Object.isFrozen(doc1.book)) |             assert(Object.isFrozen(doc1.book)) | ||||||
|  | @ -110,7 +82,7 @@ describe("Automerge", () => { | ||||||
|             assert(Object.isFrozen(doc3.sub)) |             assert(Object.isFrozen(doc3.sub)) | ||||||
| 
 | 
 | ||||||
|             // works on load
 |             // works on load
 | ||||||
|       let doc4 = Automerge.load<any>(Automerge.save(doc3), { freeze: true }) |             let doc4 = Automerge.load(Automerge.save(doc3), { freeze: true }) | ||||||
|             assert(Object.isFrozen(doc4)) |             assert(Object.isFrozen(doc4)) | ||||||
|             assert(Object.isFrozen(doc4.sub)) |             assert(Object.isFrozen(doc4.sub)) | ||||||
| 
 | 
 | ||||||
|  | @ -124,27 +96,27 @@ describe("Automerge", () => { | ||||||
|             assert(!Object.isFrozen(exported)) |             assert(!Object.isFrozen(exported)) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle basic sets over many changes", () => { |         it('handle basic sets over many changes', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let timestamp = new Date() |             let timestamp = new Date(); | ||||||
|       let counter = new Automerge.Counter(100) |             let counter = new Automerge.Counter(100); | ||||||
|       let bytes = new Uint8Array([10, 11, 12]) |             let bytes = new Uint8Array([10,11,12]); | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.hello = "world" |               d.hello = "world" | ||||||
|             }) |             }) | ||||||
|       let doc3 = Automerge.change(doc2, d => { |             let doc3 = Automerge.change(doc2, (d) => { | ||||||
|               d.counter1 = counter |               d.counter1 = counter | ||||||
|             }) |             }) | ||||||
|       let doc4 = Automerge.change(doc3, d => { |             let doc4 = Automerge.change(doc3, (d) => { | ||||||
|               d.timestamp1 = timestamp |               d.timestamp1 = timestamp | ||||||
|             }) |             }) | ||||||
|       let doc5 = Automerge.change(doc4, d => { |             let doc5 = Automerge.change(doc4, (d) => { | ||||||
|               d.app = null |               d.app = null | ||||||
|             }) |             }) | ||||||
|       let doc6 = Automerge.change(doc5, d => { |             let doc6 = Automerge.change(doc5, (d) => { | ||||||
|               d.bytes1 = bytes |               d.bytes1 = bytes | ||||||
|             }) |             }) | ||||||
|       let doc7 = Automerge.change(doc6, d => { |             let doc7 = Automerge.change(doc6, (d) => { | ||||||
|               d.uint = new Automerge.Uint(1) |               d.uint = new Automerge.Uint(1) | ||||||
|               d.int = new Automerge.Int(-1) |               d.int = new Automerge.Int(-1) | ||||||
|               d.float64 = new Automerge.Float64(5.5) |               d.float64 = new Automerge.Float64(5.5) | ||||||
|  | @ -154,63 +126,48 @@ describe("Automerge", () => { | ||||||
|               d.false = false |               d.false = false | ||||||
|             }) |             }) | ||||||
| 
 | 
 | ||||||
|       assert.deepEqual(doc7, { |             assert.deepEqual(doc7, {  hello: "world", true: true, false: false, int: -1, uint: 1, float64: 5.5, number1: 100, number2: -45.67, counter1: counter, timestamp1: timestamp, bytes1: bytes, app: null }) | ||||||
|         hello: "world", |  | ||||||
|         true: true, |  | ||||||
|         false: false, |  | ||||||
|         int: -1, |  | ||||||
|         uint: 1, |  | ||||||
|         float64: 5.5, |  | ||||||
|         number1: 100, |  | ||||||
|         number2: -45.67, |  | ||||||
|         counter1: counter, |  | ||||||
|         timestamp1: timestamp, |  | ||||||
|         bytes1: bytes, |  | ||||||
|         app: null, |  | ||||||
|       }) |  | ||||||
| 
 | 
 | ||||||
|             let changes = Automerge.getAllChanges(doc7) |             let changes = Automerge.getAllChanges(doc7) | ||||||
|             let t1 = Automerge.init() |             let t1 = Automerge.init() | ||||||
|       let [t2] = Automerge.applyChanges(t1, changes) |             ;let [t2] = Automerge.applyChanges(t1, changes) | ||||||
|             assert.deepEqual(doc7,t2) |             assert.deepEqual(doc7,t2) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle overwrites to values", () => { |         it('handle overwrites to values', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.hello = "world1" |               d.hello = "world1" | ||||||
|             }) |             }) | ||||||
|       let doc3 = Automerge.change(doc2, d => { |             let doc3 = Automerge.change(doc2, (d) => { | ||||||
|               d.hello = "world2" |               d.hello = "world2" | ||||||
|             }) |             }) | ||||||
|       let doc4 = Automerge.change(doc3, d => { |             let doc4 = Automerge.change(doc3, (d) => { | ||||||
|               d.hello = "world3" |               d.hello = "world3" | ||||||
|             }) |             }) | ||||||
|       let doc5 = Automerge.change(doc4, d => { |             let doc5 = Automerge.change(doc4, (d) => { | ||||||
|               d.hello = "world4" |               d.hello = "world4" | ||||||
|             }) |             }) | ||||||
|             assert.deepEqual(doc5, {  hello: "world4" } ) |             assert.deepEqual(doc5, {  hello: "world4" } ) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle set with object value", () => { |         it('handle set with object value', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.subobj = { hello: "world", subsubobj: { zip: "zop" } } |               d.subobj = { hello: "world", subsubobj: { zip: "zop" } } | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc2, { |             assert.deepEqual(doc2, { subobj:  { hello: "world", subsubobj: { zip: "zop" } } }) | ||||||
|         subobj: { hello: "world", subsubobj: { zip: "zop" } }, |  | ||||||
|       }) |  | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle simple list creation", () => { |         it('handle simple list creation', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => (d.list = [])) |             let doc2 = Automerge.change(doc1, (d) => d.list = []) | ||||||
|             assert.deepEqual(doc2, { list: []}) |             assert.deepEqual(doc2, { list: []}) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle simple lists", () => { |         it('handle simple lists', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.list = [ 1, 2, 3 ] |               d.list = [ 1, 2, 3 ] | ||||||
|             }) |             }) | ||||||
|             assert.deepEqual(doc2.list.length, 3) |             assert.deepEqual(doc2.list.length, 3) | ||||||
|  | @ -220,7 +177,7 @@ describe("Automerge", () => { | ||||||
|             assert.deepEqual(doc2, { list: [1,2,3] }) |             assert.deepEqual(doc2, { list: [1,2,3] }) | ||||||
|            // assert.deepStrictEqual(Automerge.toJS(doc2), { list: [1,2,3] })
 |            // assert.deepStrictEqual(Automerge.toJS(doc2), { list: [1,2,3] })
 | ||||||
| 
 | 
 | ||||||
|       let doc3 = Automerge.change(doc2, d => { |             let doc3 = Automerge.change(doc2, (d) => { | ||||||
|               d.list[1] = "a" |               d.list[1] = "a" | ||||||
|             }) |             }) | ||||||
| 
 | 
 | ||||||
|  | @ -230,86 +187,81 @@ describe("Automerge", () => { | ||||||
|             assert.deepEqual(doc3.list[2], 3) |             assert.deepEqual(doc3.list[2], 3) | ||||||
|             assert.deepEqual(doc3, { list: [1,"a",3] }) |             assert.deepEqual(doc3, { list: [1,"a",3] }) | ||||||
|         }) |         }) | ||||||
|     it("handle simple lists", () => { |         it('handle simple lists', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.list = [ 1, 2, 3 ] |               d.list = [ 1, 2, 3 ] | ||||||
|             }) |             }) | ||||||
|             let changes = Automerge.getChanges(doc1, doc2) |             let changes = Automerge.getChanges(doc1, doc2) | ||||||
|             let docB1 = Automerge.init() |             let docB1 = Automerge.init() | ||||||
|       let [docB2] = Automerge.applyChanges(docB1, changes) |             ;let [docB2] = Automerge.applyChanges(docB1, changes) | ||||||
|       assert.deepEqual(docB2, doc2) |             assert.deepEqual(docB2, doc2); | ||||||
|         }) |         }) | ||||||
|     it("handle text", () => { |         it('handle text', () => { | ||||||
|       let doc1 = Automerge.init<any>() |             let doc1 = Automerge.init() | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.list = "hello" |               d.list = "hello" | ||||||
|               Automerge.splice(d, "list", 2, 0, "Z") |               Automerge.splice(d, "list", 2, 0, "Z") | ||||||
|             }) |             }) | ||||||
|             let changes = Automerge.getChanges(doc1, doc2) |             let changes = Automerge.getChanges(doc1, doc2) | ||||||
|             let docB1 = Automerge.init() |             let docB1 = Automerge.init() | ||||||
|       let [docB2] = Automerge.applyChanges(docB1, changes) |             ;let [docB2] = Automerge.applyChanges(docB1, changes) | ||||||
|       assert.deepEqual(docB2, doc2) |             assert.deepEqual(docB2, doc2); | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("handle non-text strings", () => { |         it('handle non-text strings', () => { | ||||||
|       let doc1 = WASM.create(true) |             let doc1 = WASM.create(); | ||||||
|       doc1.put("_root", "text", "hello world") |             doc1.put("_root", "text", "hello world"); | ||||||
|       let doc2 = Automerge.load<any>(doc1.save()) |             let doc2 = Automerge.load(doc1.save()) | ||||||
|             assert.throws(() => { |             assert.throws(() => { | ||||||
|         Automerge.change(doc2, d => { |               Automerge.change(doc2, (d) => { Automerge.splice(d, "text", 1, 0, "Z") }) | ||||||
|           Automerge.splice(d, "text", 1, 0, "Z") |  | ||||||
|         }) |  | ||||||
|             }, /Cannot splice/) |             }, /Cannot splice/) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("have many list methods", () => { |         it('have many list methods', () => { | ||||||
|             let doc1 = Automerge.from({ list: [1,2,3] }) |             let doc1 = Automerge.from({ list: [1,2,3] }) | ||||||
|       assert.deepEqual(doc1, { list: [1, 2, 3] }) |             assert.deepEqual(doc1, { list: [1,2,3] }); | ||||||
|       let doc2 = Automerge.change(doc1, d => { |             let doc2 = Automerge.change(doc1, (d) => { | ||||||
|               d.list.splice(1,1,9,10) |               d.list.splice(1,1,9,10) | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc2, { list: [1, 9, 10, 3] }) |             assert.deepEqual(doc2, { list: [1,9,10,3] }); | ||||||
|       let doc3 = Automerge.change(doc2, d => { |             let doc3 = Automerge.change(doc2, (d) => { | ||||||
|               d.list.push(11,12) |               d.list.push(11,12) | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc3, { list: [1, 9, 10, 3, 11, 12] }) |             assert.deepEqual(doc3, { list: [1,9,10,3,11,12] }); | ||||||
|       let doc4 = Automerge.change(doc3, d => { |             let doc4 = Automerge.change(doc3, (d) => { | ||||||
|               d.list.unshift(2,2) |               d.list.unshift(2,2) | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc4, { list: [2, 2, 1, 9, 10, 3, 11, 12] }) |             assert.deepEqual(doc4, { list: [2,2,1,9,10,3,11,12] }); | ||||||
|       let doc5 = Automerge.change(doc4, d => { |             let doc5 = Automerge.change(doc4, (d) => { | ||||||
|               d.list.shift() |               d.list.shift() | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc5, { list: [2, 1, 9, 10, 3, 11, 12] }) |             assert.deepEqual(doc5, { list: [2,1,9,10,3,11,12] }); | ||||||
|       let doc6 = Automerge.change(doc5, d => { |             let doc6 = Automerge.change(doc5, (d) => { | ||||||
|               d.list.insertAt(3,100,101) |               d.list.insertAt(3,100,101) | ||||||
|             }) |             }) | ||||||
|       assert.deepEqual(doc6, { list: [2, 1, 9, 100, 101, 10, 3, 11, 12] }) |             assert.deepEqual(doc6, { list: [2,1,9,100,101,10,3,11,12] }); | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("allows access to the backend", () => { |         it('allows access to the backend', () => { | ||||||
|           let doc = Automerge.init() |           let doc = Automerge.init() | ||||||
|           assert.deepEqual(Object.keys(Automerge.getBackend(doc)), ["ptr"]) |           assert.deepEqual(Object.keys(Automerge.getBackend(doc)), ["ptr"]) | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|     it("lists and text have indexof", () => { |         it('lists and text have indexof', () => { | ||||||
|       let doc = Automerge.from({ |           let doc = Automerge.from({ list: [0,1,2,3,4,5,6], text: "hello world" }) | ||||||
|         list: [0, 1, 2, 3, 4, 5, 6], |  | ||||||
|         text: "hello world", |  | ||||||
|       }) |  | ||||||
|           assert.deepEqual(doc.list.indexOf(5), 5) |           assert.deepEqual(doc.list.indexOf(5), 5) | ||||||
|           assert.deepEqual(doc.text.indexOf("world"), 6) |           assert.deepEqual(doc.text.indexOf("world"), 6) | ||||||
|         }) |         }) | ||||||
|     }) |     }) | ||||||
| 
 | 
 | ||||||
|   describe("emptyChange", () => { |     describe('emptyChange', () => { | ||||||
|     it("should generate a hash", () => { |         it('should generate a hash', () => { | ||||||
|             let doc = Automerge.init() |             let doc = Automerge.init() | ||||||
|             doc = Automerge.change<any>(doc, d => { |             doc = Automerge.change<any>(doc, d => { | ||||||
|                 d.key = "value" |                 d.key = "value" | ||||||
|             }) |             }) | ||||||
|       Automerge.save(doc) |             let _ = Automerge.save(doc) | ||||||
|             let headsBefore = Automerge.getHeads(doc) |             let headsBefore = Automerge.getHeads(doc) | ||||||
|             headsBefore.sort() |             headsBefore.sort() | ||||||
|             doc = Automerge.emptyChange(doc, "empty change") |             doc = Automerge.emptyChange(doc, "empty change") | ||||||
|  | @ -319,112 +271,46 @@ describe("Automerge", () => { | ||||||
|         }) |         }) | ||||||
|     }) |     }) | ||||||
| 
 | 
 | ||||||
|   describe("proxy lists", () => { |     describe('proxy lists', () => { | ||||||
|     it("behave like arrays", () => { |         it('behave like arrays', () => { | ||||||
|           let doc = Automerge.from({ |           let doc = Automerge.from({ | ||||||
|             chars: ["a","b","c"], |             chars: ["a","b","c"], | ||||||
|             numbers: [20,3,100], |             numbers: [20,3,100], | ||||||
|         repeats: [20, 20, 3, 3, 3, 3, 100, 100], |             repeats: [20,20,3,3,3,3,100,100] | ||||||
|           }) |           }) | ||||||
|       let r1: Array<number> = [] |           let r1 = [] | ||||||
|       doc = Automerge.change(doc, d => { |           doc = Automerge.change(doc, (d) => { | ||||||
|         assert.deepEqual((d.chars as any[]).concat([1, 2]), [ |             assert.deepEqual(d.chars.concat([1,2]), ["a","b","c",1,2]) | ||||||
|           "a", |             assert.deepEqual(d.chars.map((n) => n + "!"), ["a!", "b!", "c!"]) | ||||||
|           "b", |             assert.deepEqual(d.numbers.map((n) => n + 10), [30, 13, 110]) | ||||||
|           "c", |  | ||||||
|           1, |  | ||||||
|           2, |  | ||||||
|         ]) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.chars.map(n => n + "!"), |  | ||||||
|           ["a!", "b!", "c!"] |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.numbers.map(n => n + 10), |  | ||||||
|           [30, 13, 110] |  | ||||||
|         ) |  | ||||||
|             assert.deepEqual(d.numbers.toString(), "20,3,100") |             assert.deepEqual(d.numbers.toString(), "20,3,100") | ||||||
|             assert.deepEqual(d.numbers.toLocaleString(), "20,3,100") |             assert.deepEqual(d.numbers.toLocaleString(), "20,3,100") | ||||||
|         assert.deepEqual( |             assert.deepEqual(d.numbers.forEach((n) => r1.push(n)), undefined) | ||||||
|           d.numbers.forEach((n: number) => r1.push(n)), |             assert.deepEqual(d.numbers.every((n) => n > 1), true) | ||||||
|           undefined |             assert.deepEqual(d.numbers.every((n) => n > 10), false) | ||||||
|         ) |             assert.deepEqual(d.numbers.filter((n) => n > 10), [20,100]) | ||||||
|         assert.deepEqual( |             assert.deepEqual(d.repeats.find((n) => n < 10), 3) | ||||||
|           d.numbers.every(n => n > 1), |             assert.deepEqual(d.repeats.toArray().find((n) => n < 10), 3) | ||||||
|           true |             assert.deepEqual(d.repeats.find((n) => n < 0), undefined) | ||||||
|         ) |             assert.deepEqual(d.repeats.findIndex((n) => n < 10), 2) | ||||||
|         assert.deepEqual( |             assert.deepEqual(d.repeats.findIndex((n) => n < 0), -1) | ||||||
|           d.numbers.every(n => n > 10), |             assert.deepEqual(d.repeats.toArray().findIndex((n) => n < 10), 2) | ||||||
|           false |             assert.deepEqual(d.repeats.toArray().findIndex((n) => n < 0), -1) | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.numbers.filter(n => n > 10), |  | ||||||
|           [20, 100] |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.find(n => n < 10), |  | ||||||
|           3 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.find(n => n < 10), |  | ||||||
|           3 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.find(n => n < 0), |  | ||||||
|           undefined |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.findIndex(n => n < 10), |  | ||||||
|           2 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.findIndex(n => n < 0), |  | ||||||
|           -1 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.findIndex(n => n < 10), |  | ||||||
|           2 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.findIndex(n => n < 0), |  | ||||||
|           -1 |  | ||||||
|         ) |  | ||||||
|             assert.deepEqual(d.numbers.includes(3), true) |             assert.deepEqual(d.numbers.includes(3), true) | ||||||
|             assert.deepEqual(d.numbers.includes(-3), false) |             assert.deepEqual(d.numbers.includes(-3), false) | ||||||
|             assert.deepEqual(d.numbers.join("|"), "20|3|100") |             assert.deepEqual(d.numbers.join("|"), "20|3|100") | ||||||
|             assert.deepEqual(d.numbers.join(), "20,3,100") |             assert.deepEqual(d.numbers.join(), "20,3,100") | ||||||
|         assert.deepEqual( |             assert.deepEqual(d.numbers.some((f) => f === 3), true) | ||||||
|           d.numbers.some(f => f === 3), |             assert.deepEqual(d.numbers.some((f) => f < 0), false) | ||||||
|           true |             assert.deepEqual(d.numbers.reduce((sum,n) => sum + n, 100), 223) | ||||||
|         ) |             assert.deepEqual(d.repeats.reduce((sum,n) => sum + n, 100), 352) | ||||||
|         assert.deepEqual( |             assert.deepEqual(d.chars.reduce((sum,n) => sum + n, "="), "=abc") | ||||||
|           d.numbers.some(f => f < 0), |             assert.deepEqual(d.chars.reduceRight((sum,n) => sum + n, "="), "=cba") | ||||||
|           false |             assert.deepEqual(d.numbers.reduceRight((sum,n) => sum + n, 100), 223) | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.numbers.reduce((sum, n) => sum + n, 100), |  | ||||||
|           223 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.repeats.reduce((sum, n) => sum + n, 100), |  | ||||||
|           352 |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.chars.reduce((sum, n) => sum + n, "="), |  | ||||||
|           "=abc" |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.chars.reduceRight((sum, n) => sum + n, "="), |  | ||||||
|           "=cba" |  | ||||||
|         ) |  | ||||||
|         assert.deepEqual( |  | ||||||
|           d.numbers.reduceRight((sum, n) => sum + n, 100), |  | ||||||
|           223 |  | ||||||
|         ) |  | ||||||
|             assert.deepEqual(d.repeats.lastIndexOf(3), 5) |             assert.deepEqual(d.repeats.lastIndexOf(3), 5) | ||||||
|             assert.deepEqual(d.repeats.lastIndexOf(3,3), 3) |             assert.deepEqual(d.repeats.lastIndexOf(3,3), 3) | ||||||
|           }) |           }) | ||||||
|       doc = Automerge.change(doc, d => { |           doc = Automerge.change(doc, (d) => { | ||||||
|             assert.deepEqual(d.numbers.fill(-1,1,2), [20,-1,100]) |             assert.deepEqual(d.numbers.fill(-1,1,2), [20,-1,100]) | ||||||
|             assert.deepEqual(d.chars.fill("z",1,100), ["a","z","z"]) |             assert.deepEqual(d.chars.fill("z",1,100), ["a","z","z"]) | ||||||
|           }) |           }) | ||||||
|  | @ -434,42 +320,33 @@ describe("Automerge", () => { | ||||||
|         }) |         }) | ||||||
|     }) |     }) | ||||||
|      |      | ||||||
|   it("should obtain the same conflicts, regardless of merge order", () => { |     it('should obtain the same conflicts, regardless of merge order', () => { | ||||||
|     let s1 = Automerge.init<any>() |       let s1 = Automerge.init() | ||||||
|     let s2 = Automerge.init<any>() |       let s2 = Automerge.init() | ||||||
|     s1 = Automerge.change(s1, doc => { |       s1 = Automerge.change(s1, doc => { doc.x = 1; doc.y = 2 }) | ||||||
|       doc.x = 1 |       s2 = Automerge.change(s2, doc => { doc.x = 3; doc.y = 4 }) | ||||||
|       doc.y = 2 |  | ||||||
|     }) |  | ||||||
|     s2 = Automerge.change(s2, doc => { |  | ||||||
|       doc.x = 3 |  | ||||||
|       doc.y = 4 |  | ||||||
|     }) |  | ||||||
|       const m1 = Automerge.merge(Automerge.clone(s1), Automerge.clone(s2)) |       const m1 = Automerge.merge(Automerge.clone(s1), Automerge.clone(s2)) | ||||||
|       const m2 = Automerge.merge(Automerge.clone(s2), Automerge.clone(s1)) |       const m2 = Automerge.merge(Automerge.clone(s2), Automerge.clone(s1)) | ||||||
|     assert.deepStrictEqual( |       assert.deepStrictEqual(Automerge.getConflicts(m1, 'x'), Automerge.getConflicts(m2, 'x')) | ||||||
|       Automerge.getConflicts(m1, "x"), |  | ||||||
|       Automerge.getConflicts(m2, "x") |  | ||||||
|     ) |  | ||||||
|     }) |     }) | ||||||
| 
 | 
 | ||||||
|     describe("getObjectId", () => { |     describe("getObjectId", () => { | ||||||
|         let s1 = Automerge.from({ |         let s1 = Automerge.from({ | ||||||
|       string: "string", |             "string": "string", | ||||||
|       number: 1, |             "number": 1, | ||||||
|       null: null, |             "null": null, | ||||||
|       date: new Date(), |             "date": new Date(), | ||||||
|       counter: new Automerge.Counter(), |             "counter": new Automerge.Counter(), | ||||||
|       bytes: new Uint8Array(10), |             "bytes": new Uint8Array(10), | ||||||
|       text: "", |             "text": "", | ||||||
|       list: [], |             "list": [], | ||||||
|       map: {}, |             "map": {} | ||||||
|         }) |         }) | ||||||
| 
 | 
 | ||||||
|         it("should return null for scalar values", () => { |         it("should return null for scalar values", () => { | ||||||
|             assert.equal(Automerge.getObjectId(s1.string), null) |             assert.equal(Automerge.getObjectId(s1.string), null) | ||||||
|             assert.equal(Automerge.getObjectId(s1.number), null) |             assert.equal(Automerge.getObjectId(s1.number), null) | ||||||
|       assert.equal(Automerge.getObjectId(s1.null!), null) |             assert.equal(Automerge.getObjectId(s1.null), null) | ||||||
|             assert.equal(Automerge.getObjectId(s1.date), null) |             assert.equal(Automerge.getObjectId(s1.date), null) | ||||||
|             assert.equal(Automerge.getObjectId(s1.counter), null) |             assert.equal(Automerge.getObjectId(s1.counter), null) | ||||||
|             assert.equal(Automerge.getObjectId(s1.bytes), null) |             assert.equal(Automerge.getObjectId(s1.bytes), null) | ||||||
|  | @ -486,3 +363,4 @@ describe("Automerge", () => { | ||||||
|         }) |         }) | ||||||
|     }) |     }) | ||||||
| }) | }) | ||||||
|  | 
 | ||||||
|  |  | ||||||
							
								
								
									
										97
									
								
								javascript/test/columnar_test.ts
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								javascript/test/columnar_test.ts
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,97 @@ | ||||||
|  | import * as assert from 'assert' | ||||||
|  | import { checkEncoded } from './helpers' | ||||||
|  | import * as Automerge from '../src' | ||||||
|  | import { encodeChange, decodeChange } from '../src' | ||||||
|  | 
 | ||||||
|  | describe('change encoding', () => { | ||||||
|  |   it('should encode text edits', () => { | ||||||
|  |     /* | ||||||
|  |     const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: '', deps: [], ops: [ | ||||||
|  |       {action: 'makeText', obj: '_root', key: 'text', insert: false, pred: []}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []}, | ||||||
|  |       {action: 'del', obj: '1@aaaa', elemId: '2@aaaa', insert: false, pred: ['2@aaaa']}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []} | ||||||
|  |     ]} | ||||||
|  |     */ | ||||||
|  |     const change1 = {actor: 'aaaa', seq: 1, startOp: 1, time: 9, message: null, deps: [], ops: [ | ||||||
|  |       {action: 'makeText', obj: '_root', key: 'text', pred: []}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'h', pred: []}, | ||||||
|  |       {action: 'del', obj: '1@aaaa', elemId: '2@aaaa', pred: ['2@aaaa']}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '_head', insert: true, value: 'H', pred: []}, | ||||||
|  |       {action: 'set', obj: '1@aaaa', elemId: '4@aaaa', insert: true, value: 'i', pred: []} | ||||||
|  |     ]} | ||||||
|  |     checkEncoded(encodeChange(change1), [ | ||||||
|  |       0x85, 0x6f, 0x4a, 0x83, // magic bytes
 | ||||||
|  |       0xe2, 0xbd, 0xfb, 0xf5, // checksum
 | ||||||
|  |       1, 94, 0, 2, 0xaa, 0xaa, // chunkType: change, length, deps, actor 'aaaa'
 | ||||||
|  |       1, 1, 9, 0, 0, // seq, startOp, time, message, actor list
 | ||||||
|  |       12, 0x01, 4, 0x02, 4, // column count, objActor, objCtr
 | ||||||
|  |       0x11, 8, 0x13, 7, 0x15, 8, // keyActor, keyCtr, keyStr
 | ||||||
|  |       0x34, 4, 0x42, 6, // insert, action
 | ||||||
|  |       0x56, 6, 0x57, 3, // valLen, valRaw
 | ||||||
|  |       0x70, 6, 0x71, 2, 0x73, 2, // predNum, predActor, predCtr
 | ||||||
|  |       0, 1, 4, 0, // objActor column: null, 0, 0, 0, 0
 | ||||||
|  |       0, 1, 4, 1, // objCtr column: null, 1, 1, 1, 1
 | ||||||
|  |       0, 2, 0x7f, 0, 0, 1, 0x7f, 0, // keyActor column: null, null, 0, null, 0
 | ||||||
|  |       0, 1, 0x7c, 0, 2, 0x7e, 4, // keyCtr column: null, 0, 2, 0, 4
 | ||||||
|  |       0x7f, 4, 0x74, 0x65, 0x78, 0x74, 0, 4, // keyStr column: 'text', null, null, null, null
 | ||||||
|  |       1, 1, 1, 2, // insert column: false, true, false, true, true
 | ||||||
|  |       0x7d, 4, 1, 3, 2, 1, // action column: makeText, set, del, set, set
 | ||||||
|  |       0x7d, 0, 0x16, 0, 2, 0x16, // valLen column: 0, 0x16, 0, 0x16, 0x16
 | ||||||
|  |       0x68, 0x48, 0x69, // valRaw column: 'h', 'H', 'i'
 | ||||||
|  |       2, 0, 0x7f, 1, 2, 0, // predNum column: 0, 0, 1, 0, 0
 | ||||||
|  |       0x7f, 0, // predActor column: 0
 | ||||||
|  |       0x7f, 2 // predCtr column: 2
 | ||||||
|  |     ]) | ||||||
|  |     const decoded = decodeChange(encodeChange(change1)) | ||||||
|  |     assert.deepStrictEqual(decoded, Object.assign({hash: decoded.hash}, change1)) | ||||||
|  |   }) | ||||||
|  | 
 | ||||||
|  |   // FIXME - skipping this b/c it was never implemented in the rust impl and isnt trivial
 | ||||||
|  | /* | ||||||
|  |   it.skip('should require strict ordering of preds', () => { | ||||||
|  |     const change = new Uint8Array([ | ||||||
|  |       133, 111, 74, 131, 31, 229, 112, 44, 1, 105, 1, 58, 30, 190, 100, 253, 180, 180, 66, 49, 126, | ||||||
|  |       81, 142, 10, 3, 35, 140, 189, 231, 34, 145, 57, 66, 23, 224, 149, 64, 97, 88, 140, 168, 194, | ||||||
|  |       229, 4, 244, 209, 58, 138, 67, 140, 1, 152, 236, 250, 2, 0, 1, 4, 55, 234, 66, 242, 8, 21, 11, | ||||||
|  |       52, 1, 66, 2, 86, 3, 87, 10, 112, 2, 113, 3, 115, 4, 127, 9, 99, 111, 109, 109, 111, 110, 86, | ||||||
|  |       97, 114, 1, 127, 1, 127, 166, 1, 52, 48, 57, 49, 52, 57, 52, 53, 56, 50, 127, 2, 126, 0, 1, | ||||||
|  |       126, 139, 1, 0 | ||||||
|  |     ]) | ||||||
|  |     assert.throws(() => { decodeChange(change) }, /operation IDs are not in ascending order/) | ||||||
|  |   }) | ||||||
|  | */ | ||||||
|  | 
 | ||||||
|  |   describe('with trailing bytes', () => { | ||||||
|  |     let change = new Uint8Array([ | ||||||
|  |       0x85, 0x6f, 0x4a, 0x83, // magic bytes
 | ||||||
|  |       0xb2, 0x98, 0x9e, 0xa9, // checksum
 | ||||||
|  |       1, 61, 0, 2, 0x12, 0x34, // chunkType: change, length, deps, actor '1234'
 | ||||||
|  |       1, 1, 252, 250, 220, 255, 5, // seq, startOp, time
 | ||||||
|  |       14, 73, 110, 105, 116, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, // message: 'Initialization'
 | ||||||
|  |       0, 6, // actor list, column count
 | ||||||
|  |       0x15, 3, 0x34, 1, 0x42, 2, // keyStr, insert, action
 | ||||||
|  |       0x56, 2, 0x57, 1, 0x70, 2, // valLen, valRaw, predNum
 | ||||||
|  |       0x7f, 1, 0x78, // keyStr: 'x'
 | ||||||
|  |       1, // insert: false
 | ||||||
|  |       0x7f, 1, // action: set
 | ||||||
|  |       0x7f, 19, // valLen: 1 byte of type uint
 | ||||||
|  |       1, // valRaw: 1
 | ||||||
|  |       0x7f, 0, // predNum: 0
 | ||||||
|  |       0, 1, 2, 3, 4, 5, 6, 7, 8, 9 // 10 trailing bytes
 | ||||||
|  |     ]) | ||||||
|  | 
 | ||||||
|  |     it('should allow decoding and re-encoding', () => { | ||||||
|  |       // NOTE: This calls the JavaScript encoding and decoding functions, even when the WebAssembly
 | ||||||
|  |       // backend is loaded. Should the wasm backend export its own functions for testing?
 | ||||||
|  |       checkEncoded(change, encodeChange(decodeChange(change))) | ||||||
|  |     }) | ||||||
|  | 
 | ||||||
|  |     it('should be preserved in document encoding', () => { | ||||||
|  |       const [doc] = Automerge.applyChanges(Automerge.init(), [change]) | ||||||
|  |       const [reconstructed] = Automerge.getAllChanges(Automerge.load(Automerge.save(doc))) | ||||||
|  |       checkEncoded(change, reconstructed) | ||||||
|  |     }) | ||||||
|  |   }) | ||||||
|  | }) | ||||||
|  | @ -1,27 +1,19 @@ | ||||||
| import * as assert from "assert" |  | ||||||
| import { unstable as Automerge } from "../src" |  | ||||||
| 
 | 
 | ||||||
| describe("Automerge", () => { | import * as assert from 'assert' | ||||||
|   describe("basics", () => { | import * as Automerge from '../src' | ||||||
|     it("should allow you to load incrementally", () => { | 
 | ||||||
|       let doc1 = Automerge.from<any>({ foo: "bar" }) | describe('Automerge', () => { | ||||||
|       let doc2 = Automerge.init<any>() |     describe('basics', () => { | ||||||
|  |         it('should allow you to load incrementally', () => { | ||||||
|  |           let doc1 = Automerge.from({ foo: "bar" }) | ||||||
|  |           let doc2 = Automerge.init(); | ||||||
|           doc2 = Automerge.loadIncremental(doc2, Automerge.save(doc1)) |           doc2 = Automerge.loadIncremental(doc2, Automerge.save(doc1)) | ||||||
|       doc1 = Automerge.change(doc1, d => (d.foo2 = "bar2")) |           doc1 = Automerge.change(doc1, (d) => d.foo2 = "bar2") | ||||||
|       doc2 = Automerge.loadIncremental( |           doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) | ||||||
|         doc2, |           doc1 = Automerge.change(doc1, (d) => d.foo = "bar2") | ||||||
|         Automerge.getBackend(doc1).saveIncremental() |           doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) | ||||||
|       ) |           doc1 = Automerge.change(doc1, (d) => d.x = "y") | ||||||
|       doc1 = Automerge.change(doc1, d => (d.foo = "bar2")) |           doc2 = Automerge.loadIncremental(doc2, Automerge.getBackend(doc1).saveIncremental() ) | ||||||
|       doc2 = Automerge.loadIncremental( |  | ||||||
|         doc2, |  | ||||||
|         Automerge.getBackend(doc1).saveIncremental() |  | ||||||
|       ) |  | ||||||
|       doc1 = Automerge.change(doc1, d => (d.x = "y")) |  | ||||||
|       doc2 = Automerge.loadIncremental( |  | ||||||
|         doc2, |  | ||||||
|         Automerge.getBackend(doc1).saveIncremental() |  | ||||||
|       ) |  | ||||||
|           assert.deepEqual(doc1,doc2) |           assert.deepEqual(doc1,doc2) | ||||||
|         }) |         }) | ||||||
|     }) |     }) | ||||||
|  |  | ||||||
|  | @ -1,21 +1,16 @@ | ||||||
| import * as assert from "assert" | import * as assert from 'assert' | ||||||
| import { Encoder } from "./legacy/encoding" | import { Encoder } from './legacy/encoding' | ||||||
| 
 | 
 | ||||||
| // Assertion that succeeds if the first argument deepStrictEquals at least one of the
 | // Assertion that succeeds if the first argument deepStrictEquals at least one of the
 | ||||||
| // subsequent arguments (but we don't care which one)
 | // subsequent arguments (but we don't care which one)
 | ||||||
| export function assertEqualsOneOf(actual, ...expected) { | function assertEqualsOneOf(actual, ...expected) { | ||||||
|   assert(expected.length > 0) |   assert(expected.length > 0) | ||||||
|   for (let i = 0; i < expected.length; i++) { |   for (let i = 0; i < expected.length; i++) { | ||||||
|     try { |     try { | ||||||
|       assert.deepStrictEqual(actual, expected[i]) |       assert.deepStrictEqual(actual, expected[i]) | ||||||
|       return // if we get here without an exception, that means success
 |       return // if we get here without an exception, that means success
 | ||||||
|     } catch (e) { |     } catch (e) { | ||||||
|       if (e instanceof assert.AssertionError) { |       if (!e.name.match(/^AssertionError/) || i === expected.length - 1) throw e | ||||||
|         if (!e.name.match(/^AssertionError/) || i === expected.length - 1) |  | ||||||
|           throw e |  | ||||||
|       } else { |  | ||||||
|         throw e |  | ||||||
|       } |  | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | @ -24,13 +19,14 @@ export function assertEqualsOneOf(actual, ...expected) { | ||||||
|  * Asserts that the byte array maintained by `encoder` contains the same byte |  * Asserts that the byte array maintained by `encoder` contains the same byte | ||||||
|  * sequence as the array `bytes`. |  * sequence as the array `bytes`. | ||||||
|  */ |  */ | ||||||
| export function checkEncoded(encoder, bytes, detail?) { | function checkEncoded(encoder, bytes, detail) { | ||||||
|   const encoded = encoder instanceof Encoder ? encoder.buffer : encoder |   const encoded = (encoder instanceof Encoder) ? encoder.buffer : encoder | ||||||
|   const expected = new Uint8Array(bytes) |   const expected = new Uint8Array(bytes) | ||||||
|   const message = |   const message = (detail ? `${detail}: ` : '') + `${encoded} expected to equal ${expected}` | ||||||
|     (detail ? `${detail}: ` : "") + `${encoded} expected to equal ${expected}` |  | ||||||
|   assert(encoded.byteLength === expected.byteLength, message) |   assert(encoded.byteLength === expected.byteLength, message) | ||||||
|   for (let i = 0; i < encoded.byteLength; i++) { |   for (let i = 0; i < encoded.byteLength; i++) { | ||||||
|     assert(encoded[i] === expected[i], message) |     assert(encoded[i] === expected[i], message) | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | module.exports = { assertEqualsOneOf, checkEncoded } | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,5 +1,5 @@ | ||||||
| function isObject(obj) { | function isObject(obj) { | ||||||
|   return typeof obj === "object" && obj !== null |   return typeof obj === 'object' && obj !== null | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  | @ -20,7 +20,7 @@ function copyObject(obj) { | ||||||
|  * with an actor ID, separated by an `@` sign) and returns an object `{counter, actorId}`. |  * with an actor ID, separated by an `@` sign) and returns an object `{counter, actorId}`. | ||||||
|  */ |  */ | ||||||
| function parseOpId(opId) { | function parseOpId(opId) { | ||||||
|   const match = /^(\d+)@(.*)$/.exec(opId || "") |   const match = /^(\d+)@(.*)$/.exec(opId || '') | ||||||
|   if (!match) { |   if (!match) { | ||||||
|     throw new RangeError(`Not a valid opId: ${opId}`) |     throw new RangeError(`Not a valid opId: ${opId}`) | ||||||
|   } |   } | ||||||
|  | @ -32,7 +32,7 @@ function parseOpId(opId) { | ||||||
|  */ |  */ | ||||||
| function equalBytes(array1, array2) { | function equalBytes(array1, array2) { | ||||||
|   if (!(array1 instanceof Uint8Array) || !(array2 instanceof Uint8Array)) { |   if (!(array1 instanceof Uint8Array) || !(array2 instanceof Uint8Array)) { | ||||||
|     throw new TypeError("equalBytes can only compare Uint8Arrays") |     throw new TypeError('equalBytes can only compare Uint8Arrays') | ||||||
|   } |   } | ||||||
|   if (array1.byteLength !== array2.byteLength) return false |   if (array1.byteLength !== array2.byteLength) return false | ||||||
|   for (let i = 0; i < array1.byteLength; i++) { |   for (let i = 0; i < array1.byteLength; i++) { | ||||||
|  | @ -51,9 +51,5 @@ function createArrayOfNulls(length) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| module.exports = { | module.exports = { | ||||||
|   isObject, |   isObject, copyObject, parseOpId, equalBytes, createArrayOfNulls | ||||||
|   copyObject, |  | ||||||
|   parseOpId, |  | ||||||
|   equalBytes, |  | ||||||
|   createArrayOfNulls, |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -6,7 +6,7 @@ | ||||||
|  * https://github.com/anonyco/FastestSmallestTextEncoderDecoder
 |  * https://github.com/anonyco/FastestSmallestTextEncoderDecoder
 | ||||||
|  */ |  */ | ||||||
| const utf8encoder = new TextEncoder() | const utf8encoder = new TextEncoder() | ||||||
| const utf8decoder = new TextDecoder("utf-8") | const utf8decoder = new TextDecoder('utf-8') | ||||||
| 
 | 
 | ||||||
| function stringToUtf8(string) { | function stringToUtf8(string) { | ||||||
|   return utf8encoder.encode(string) |   return utf8encoder.encode(string) | ||||||
|  | @ -20,48 +20,30 @@ function utf8ToString(buffer) { | ||||||
|  * Converts a string consisting of hexadecimal digits into an Uint8Array. |  * Converts a string consisting of hexadecimal digits into an Uint8Array. | ||||||
|  */ |  */ | ||||||
| function hexStringToBytes(value) { | function hexStringToBytes(value) { | ||||||
|   if (typeof value !== "string") { |   if (typeof value !== 'string') { | ||||||
|     throw new TypeError("value is not a string") |     throw new TypeError('value is not a string') | ||||||
|   } |   } | ||||||
|   if (!/^([0-9a-f][0-9a-f])*$/.test(value)) { |   if (!/^([0-9a-f][0-9a-f])*$/.test(value)) { | ||||||
|     throw new RangeError("value is not hexadecimal") |     throw new RangeError('value is not hexadecimal') | ||||||
|   } |   } | ||||||
|   if (value === "") { |   if (value === '') { | ||||||
|     return new Uint8Array(0) |     return new Uint8Array(0) | ||||||
|   } else { |   } else { | ||||||
|     return new Uint8Array(value.match(/../g).map(b => parseInt(b, 16))) |     return new Uint8Array(value.match(/../g).map(b => parseInt(b, 16))) | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const NIBBLE_TO_HEX = [ | const NIBBLE_TO_HEX = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] | ||||||
|   "0", |  | ||||||
|   "1", |  | ||||||
|   "2", |  | ||||||
|   "3", |  | ||||||
|   "4", |  | ||||||
|   "5", |  | ||||||
|   "6", |  | ||||||
|   "7", |  | ||||||
|   "8", |  | ||||||
|   "9", |  | ||||||
|   "a", |  | ||||||
|   "b", |  | ||||||
|   "c", |  | ||||||
|   "d", |  | ||||||
|   "e", |  | ||||||
|   "f", |  | ||||||
| ] |  | ||||||
| const BYTE_TO_HEX = new Array(256) | const BYTE_TO_HEX = new Array(256) | ||||||
| for (let i = 0; i < 256; i++) { | for (let i = 0; i < 256; i++) { | ||||||
|   BYTE_TO_HEX[i] = `${NIBBLE_TO_HEX[(i >>> 4) & 0xf]}${NIBBLE_TO_HEX[i & 0xf]}` |   BYTE_TO_HEX[i] = `${NIBBLE_TO_HEX[(i >>> 4) & 0xf]}${NIBBLE_TO_HEX[i & 0xf]}`; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  * Converts a Uint8Array into the equivalent hexadecimal string. |  * Converts a Uint8Array into the equivalent hexadecimal string. | ||||||
|  */ |  */ | ||||||
| function bytesToHexString(bytes) { | function bytesToHexString(bytes) { | ||||||
|   let hex = "", |   let hex = '', len = bytes.byteLength | ||||||
|     len = bytes.byteLength |  | ||||||
|   for (let i = 0; i < len; i++) { |   for (let i = 0; i < len; i++) { | ||||||
|     hex += BYTE_TO_HEX[bytes[i]] |     hex += BYTE_TO_HEX[bytes[i]] | ||||||
|   } |   } | ||||||
|  | @ -113,17 +95,14 @@ class Encoder { | ||||||
|    * appends it to the buffer. Returns the number of bytes written. |    * appends it to the buffer. Returns the number of bytes written. | ||||||
|    */ |    */ | ||||||
|   appendUint32(value) { |   appendUint32(value) { | ||||||
|     if (!Number.isInteger(value)) |     if (!Number.isInteger(value)) throw new RangeError('value is not an integer') | ||||||
|       throw new RangeError("value is not an integer") |     if (value < 0 || value > 0xffffffff) throw new RangeError('number out of range') | ||||||
|     if (value < 0 || value > 0xffffffff) |  | ||||||
|       throw new RangeError("number out of range") |  | ||||||
| 
 | 
 | ||||||
|     const numBytes = Math.max(1, Math.ceil((32 - Math.clz32(value)) / 7)) |     const numBytes = Math.max(1, Math.ceil((32 - Math.clz32(value)) / 7)) | ||||||
|     if (this.offset + numBytes > this.buf.byteLength) this.grow() |     if (this.offset + numBytes > this.buf.byteLength) this.grow() | ||||||
| 
 | 
 | ||||||
|     for (let i = 0; i < numBytes; i++) { |     for (let i = 0; i < numBytes; i++) { | ||||||
|       this.buf[this.offset + i] = |       this.buf[this.offset + i] = (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) | ||||||
|         (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) |  | ||||||
|       value >>>= 7 // zero-filling right shift
 |       value >>>= 7 // zero-filling right shift
 | ||||||
|     } |     } | ||||||
|     this.offset += numBytes |     this.offset += numBytes | ||||||
|  | @ -136,19 +115,14 @@ class Encoder { | ||||||
|    * it to the buffer. Returns the number of bytes written. |    * it to the buffer. Returns the number of bytes written. | ||||||
|    */ |    */ | ||||||
|   appendInt32(value) { |   appendInt32(value) { | ||||||
|     if (!Number.isInteger(value)) |     if (!Number.isInteger(value)) throw new RangeError('value is not an integer') | ||||||
|       throw new RangeError("value is not an integer") |     if (value < -0x80000000 || value > 0x7fffffff) throw new RangeError('number out of range') | ||||||
|     if (value < -0x80000000 || value > 0x7fffffff) |  | ||||||
|       throw new RangeError("number out of range") |  | ||||||
| 
 | 
 | ||||||
|     const numBytes = Math.ceil( |     const numBytes = Math.ceil((33 - Math.clz32(value >= 0 ? value : -value - 1)) / 7) | ||||||
|       (33 - Math.clz32(value >= 0 ? value : -value - 1)) / 7 |  | ||||||
|     ) |  | ||||||
|     if (this.offset + numBytes > this.buf.byteLength) this.grow() |     if (this.offset + numBytes > this.buf.byteLength) this.grow() | ||||||
| 
 | 
 | ||||||
|     for (let i = 0; i < numBytes; i++) { |     for (let i = 0; i < numBytes; i++) { | ||||||
|       this.buf[this.offset + i] = |       this.buf[this.offset + i] = (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) | ||||||
|         (value & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) |  | ||||||
|       value >>= 7 // sign-propagating right shift
 |       value >>= 7 // sign-propagating right shift
 | ||||||
|     } |     } | ||||||
|     this.offset += numBytes |     this.offset += numBytes | ||||||
|  | @ -161,10 +135,9 @@ class Encoder { | ||||||
|    * (53 bits). |    * (53 bits). | ||||||
|    */ |    */ | ||||||
|   appendUint53(value) { |   appendUint53(value) { | ||||||
|     if (!Number.isInteger(value)) |     if (!Number.isInteger(value)) throw new RangeError('value is not an integer') | ||||||
|       throw new RangeError("value is not an integer") |  | ||||||
|     if (value < 0 || value > Number.MAX_SAFE_INTEGER) { |     if (value < 0 || value > Number.MAX_SAFE_INTEGER) { | ||||||
|       throw new RangeError("number out of range") |       throw new RangeError('number out of range') | ||||||
|     } |     } | ||||||
|     const high32 = Math.floor(value / 0x100000000) |     const high32 = Math.floor(value / 0x100000000) | ||||||
|     const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned
 |     const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned
 | ||||||
|  | @ -177,10 +150,9 @@ class Encoder { | ||||||
|    * (53 bits). |    * (53 bits). | ||||||
|    */ |    */ | ||||||
|   appendInt53(value) { |   appendInt53(value) { | ||||||
|     if (!Number.isInteger(value)) |     if (!Number.isInteger(value)) throw new RangeError('value is not an integer') | ||||||
|       throw new RangeError("value is not an integer") |  | ||||||
|     if (value < Number.MIN_SAFE_INTEGER || value > Number.MAX_SAFE_INTEGER) { |     if (value < Number.MIN_SAFE_INTEGER || value > Number.MAX_SAFE_INTEGER) { | ||||||
|       throw new RangeError("number out of range") |       throw new RangeError('number out of range') | ||||||
|     } |     } | ||||||
|     const high32 = Math.floor(value / 0x100000000) |     const high32 = Math.floor(value / 0x100000000) | ||||||
|     const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned
 |     const low32 = (value & 0xffffffff) >>> 0 // right shift to interpret as unsigned
 | ||||||
|  | @ -195,10 +167,10 @@ class Encoder { | ||||||
|    */ |    */ | ||||||
|   appendUint64(high32, low32) { |   appendUint64(high32, low32) { | ||||||
|     if (!Number.isInteger(high32) || !Number.isInteger(low32)) { |     if (!Number.isInteger(high32) || !Number.isInteger(low32)) { | ||||||
|       throw new RangeError("value is not an integer") |       throw new RangeError('value is not an integer') | ||||||
|     } |     } | ||||||
|     if (high32 < 0 || high32 > 0xffffffff || low32 < 0 || low32 > 0xffffffff) { |     if (high32 < 0 || high32 > 0xffffffff || low32 < 0 || low32 > 0xffffffff) { | ||||||
|       throw new RangeError("number out of range") |       throw new RangeError('number out of range') | ||||||
|     } |     } | ||||||
|     if (high32 === 0) return this.appendUint32(low32) |     if (high32 === 0) return this.appendUint32(low32) | ||||||
| 
 | 
 | ||||||
|  | @ -208,12 +180,10 @@ class Encoder { | ||||||
|       this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 |       this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 | ||||||
|       low32 >>>= 7 // zero-filling right shift
 |       low32 >>>= 7 // zero-filling right shift
 | ||||||
|     } |     } | ||||||
|     this.buf[this.offset + 4] = |     this.buf[this.offset + 4] = (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) | ||||||
|       (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) |  | ||||||
|     high32 >>>= 3 |     high32 >>>= 3 | ||||||
|     for (let i = 5; i < numBytes; i++) { |     for (let i = 5; i < numBytes; i++) { | ||||||
|       this.buf[this.offset + i] = |       this.buf[this.offset + i] = (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) | ||||||
|         (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) |  | ||||||
|       high32 >>>= 7 |       high32 >>>= 7 | ||||||
|     } |     } | ||||||
|     this.offset += numBytes |     this.offset += numBytes | ||||||
|  | @ -230,35 +200,25 @@ class Encoder { | ||||||
|    */ |    */ | ||||||
|   appendInt64(high32, low32) { |   appendInt64(high32, low32) { | ||||||
|     if (!Number.isInteger(high32) || !Number.isInteger(low32)) { |     if (!Number.isInteger(high32) || !Number.isInteger(low32)) { | ||||||
|       throw new RangeError("value is not an integer") |       throw new RangeError('value is not an integer') | ||||||
|     } |     } | ||||||
|     if ( |     if (high32 < -0x80000000 || high32 > 0x7fffffff || low32 < -0x80000000 || low32 > 0xffffffff) { | ||||||
|       high32 < -0x80000000 || |       throw new RangeError('number out of range') | ||||||
|       high32 > 0x7fffffff || |  | ||||||
|       low32 < -0x80000000 || |  | ||||||
|       low32 > 0xffffffff |  | ||||||
|     ) { |  | ||||||
|       throw new RangeError("number out of range") |  | ||||||
|     } |     } | ||||||
|     low32 >>>= 0 // interpret as unsigned
 |     low32 >>>= 0 // interpret as unsigned
 | ||||||
|     if (high32 === 0 && low32 <= 0x7fffffff) return this.appendInt32(low32) |     if (high32 === 0 && low32 <= 0x7fffffff) return this.appendInt32(low32) | ||||||
|     if (high32 === -1 && low32 >= 0x80000000) |     if (high32 === -1 && low32 >= 0x80000000) return this.appendInt32(low32 - 0x100000000) | ||||||
|       return this.appendInt32(low32 - 0x100000000) |  | ||||||
| 
 | 
 | ||||||
|     const numBytes = Math.ceil( |     const numBytes = Math.ceil((65 - Math.clz32(high32 >= 0 ? high32 : -high32 - 1)) / 7) | ||||||
|       (65 - Math.clz32(high32 >= 0 ? high32 : -high32 - 1)) / 7 |  | ||||||
|     ) |  | ||||||
|     if (this.offset + numBytes > this.buf.byteLength) this.grow() |     if (this.offset + numBytes > this.buf.byteLength) this.grow() | ||||||
|     for (let i = 0; i < 4; i++) { |     for (let i = 0; i < 4; i++) { | ||||||
|       this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 |       this.buf[this.offset + i] = (low32 & 0x7f) | 0x80 | ||||||
|       low32 >>>= 7 // zero-filling right shift
 |       low32 >>>= 7 // zero-filling right shift
 | ||||||
|     } |     } | ||||||
|     this.buf[this.offset + 4] = |     this.buf[this.offset + 4] = (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) | ||||||
|       (low32 & 0x0f) | ((high32 & 0x07) << 4) | (numBytes === 5 ? 0x00 : 0x80) |  | ||||||
|     high32 >>= 3 // sign-propagating right shift
 |     high32 >>= 3 // sign-propagating right shift
 | ||||||
|     for (let i = 5; i < numBytes; i++) { |     for (let i = 5; i < numBytes; i++) { | ||||||
|       this.buf[this.offset + i] = |       this.buf[this.offset + i] = (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) | ||||||
|         (high32 & 0x7f) | (i === numBytes - 1 ? 0x00 : 0x80) |  | ||||||
|       high32 >>= 7 |       high32 >>= 7 | ||||||
|     } |     } | ||||||
|     this.offset += numBytes |     this.offset += numBytes | ||||||
|  | @ -283,7 +243,7 @@ class Encoder { | ||||||
|    * number of bytes appended. |    * number of bytes appended. | ||||||
|    */ |    */ | ||||||
|   appendRawString(value) { |   appendRawString(value) { | ||||||
|     if (typeof value !== "string") throw new TypeError("value is not a string") |     if (typeof value !== 'string') throw new TypeError('value is not a string') | ||||||
|     return this.appendRawBytes(stringToUtf8(value)) |     return this.appendRawBytes(stringToUtf8(value)) | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -302,7 +262,7 @@ class Encoder { | ||||||
|    * (where the length is encoded as an unsigned LEB128 integer). |    * (where the length is encoded as an unsigned LEB128 integer). | ||||||
|    */ |    */ | ||||||
|   appendPrefixedString(value) { |   appendPrefixedString(value) { | ||||||
|     if (typeof value !== "string") throw new TypeError("value is not a string") |     if (typeof value !== 'string') throw new TypeError('value is not a string') | ||||||
|     this.appendPrefixedBytes(stringToUtf8(value)) |     this.appendPrefixedBytes(stringToUtf8(value)) | ||||||
|     return this |     return this | ||||||
|   } |   } | ||||||
|  | @ -321,7 +281,8 @@ class Encoder { | ||||||
|    * Flushes any unwritten data to the buffer. Call this before reading from |    * Flushes any unwritten data to the buffer. Call this before reading from | ||||||
|    * the buffer constructed by this Encoder. |    * the buffer constructed by this Encoder. | ||||||
|    */ |    */ | ||||||
|   finish() {} |   finish() { | ||||||
|  |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  | @ -360,7 +321,7 @@ class Decoder { | ||||||
|    */ |    */ | ||||||
|   skip(bytes) { |   skip(bytes) { | ||||||
|     if (this.offset + bytes > this.buf.byteLength) { |     if (this.offset + bytes > this.buf.byteLength) { | ||||||
|       throw new RangeError("cannot skip beyond end of buffer") |       throw new RangeError('cannot skip beyond end of buffer') | ||||||
|     } |     } | ||||||
|     this.offset += bytes |     this.offset += bytes | ||||||
|   } |   } | ||||||
|  | @ -378,20 +339,18 @@ class Decoder { | ||||||
|    * Throws an exception if the value doesn't fit in a 32-bit unsigned int. |    * Throws an exception if the value doesn't fit in a 32-bit unsigned int. | ||||||
|    */ |    */ | ||||||
|   readUint32() { |   readUint32() { | ||||||
|     let result = 0, |     let result = 0, shift = 0 | ||||||
|       shift = 0 |  | ||||||
|     while (this.offset < this.buf.byteLength) { |     while (this.offset < this.buf.byteLength) { | ||||||
|       const nextByte = this.buf[this.offset] |       const nextByte = this.buf[this.offset] | ||||||
|       if (shift === 28 && (nextByte & 0xf0) !== 0) { |       if (shift === 28 && (nextByte & 0xf0) !== 0) { // more than 5 bytes, or value > 0xffffffff
 | ||||||
|         // more than 5 bytes, or value > 0xffffffff
 |         throw new RangeError('number out of range') | ||||||
|         throw new RangeError("number out of range") |  | ||||||
|       } |       } | ||||||
|       result = (result | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned
 |       result = (result | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned
 | ||||||
|       shift += 7 |       shift += 7 | ||||||
|       this.offset++ |       this.offset++ | ||||||
|       if ((nextByte & 0x80) === 0) return result |       if ((nextByte & 0x80) === 0) return result | ||||||
|     } |     } | ||||||
|     throw new RangeError("buffer ended with incomplete number") |     throw new RangeError('buffer ended with incomplete number') | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -399,17 +358,13 @@ class Decoder { | ||||||
|    * Throws an exception if the value doesn't fit in a 32-bit signed int. |    * Throws an exception if the value doesn't fit in a 32-bit signed int. | ||||||
|    */ |    */ | ||||||
|   readInt32() { |   readInt32() { | ||||||
|     let result = 0, |     let result = 0, shift = 0 | ||||||
|       shift = 0 |  | ||||||
|     while (this.offset < this.buf.byteLength) { |     while (this.offset < this.buf.byteLength) { | ||||||
|       const nextByte = this.buf[this.offset] |       const nextByte = this.buf[this.offset] | ||||||
|       if ( |       if ((shift === 28 && (nextByte & 0x80) !== 0) || // more than 5 bytes
 | ||||||
|         (shift === 28 && (nextByte & 0x80) !== 0) || // more than 5 bytes
 |  | ||||||
|           (shift === 28 && (nextByte & 0x40) === 0 && (nextByte & 0x38) !== 0) || // positive int > 0x7fffffff
 |           (shift === 28 && (nextByte & 0x40) === 0 && (nextByte & 0x38) !== 0) || // positive int > 0x7fffffff
 | ||||||
|         (shift === 28 && (nextByte & 0x40) !== 0 && (nextByte & 0x38) !== 0x38) |           (shift === 28 && (nextByte & 0x40) !== 0 && (nextByte & 0x38) !== 0x38)) { // negative int < -0x80000000
 | ||||||
|       ) { |         throw new RangeError('number out of range') | ||||||
|         // negative int < -0x80000000
 |  | ||||||
|         throw new RangeError("number out of range") |  | ||||||
|       } |       } | ||||||
|       result |= (nextByte & 0x7f) << shift |       result |= (nextByte & 0x7f) << shift | ||||||
|       shift += 7 |       shift += 7 | ||||||
|  | @ -423,7 +378,7 @@ class Decoder { | ||||||
|         } |         } | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|     throw new RangeError("buffer ended with incomplete number") |     throw new RangeError('buffer ended with incomplete number') | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -434,7 +389,7 @@ class Decoder { | ||||||
|   readUint53() { |   readUint53() { | ||||||
|     const { low32, high32 } = this.readUint64() |     const { low32, high32 } = this.readUint64() | ||||||
|     if (high32 < 0 || high32 > 0x1fffff) { |     if (high32 < 0 || high32 > 0x1fffff) { | ||||||
|       throw new RangeError("number out of range") |       throw new RangeError('number out of range') | ||||||
|     } |     } | ||||||
|     return high32 * 0x100000000 + low32 |     return high32 * 0x100000000 + low32 | ||||||
|   } |   } | ||||||
|  | @ -446,12 +401,8 @@ class Decoder { | ||||||
|    */ |    */ | ||||||
|   readInt53() { |   readInt53() { | ||||||
|     const { low32, high32 } = this.readInt64() |     const { low32, high32 } = this.readInt64() | ||||||
|     if ( |     if (high32 < -0x200000 || (high32 === -0x200000 && low32 === 0) || high32 > 0x1fffff) { | ||||||
|       high32 < -0x200000 || |       throw new RangeError('number out of range') | ||||||
|       (high32 === -0x200000 && low32 === 0) || |  | ||||||
|       high32 > 0x1fffff |  | ||||||
|     ) { |  | ||||||
|       throw new RangeError("number out of range") |  | ||||||
|     } |     } | ||||||
|     return high32 * 0x100000000 + low32 |     return high32 * 0x100000000 + low32 | ||||||
|   } |   } | ||||||
|  | @ -463,12 +414,10 @@ class Decoder { | ||||||
|    * `{high32, low32}`. |    * `{high32, low32}`. | ||||||
|    */ |    */ | ||||||
|   readUint64() { |   readUint64() { | ||||||
|     let low32 = 0, |     let low32 = 0, high32 = 0, shift = 0 | ||||||
|       high32 = 0, |  | ||||||
|       shift = 0 |  | ||||||
|     while (this.offset < this.buf.byteLength && shift <= 28) { |     while (this.offset < this.buf.byteLength && shift <= 28) { | ||||||
|       const nextByte = this.buf[this.offset] |       const nextByte = this.buf[this.offset] | ||||||
|       low32 = (low32 | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned
 |       low32 = (low32 | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned
 | ||||||
|       if (shift === 28) { |       if (shift === 28) { | ||||||
|         high32 = (nextByte & 0x70) >>> 4 |         high32 = (nextByte & 0x70) >>> 4 | ||||||
|       } |       } | ||||||
|  | @ -480,16 +429,15 @@ class Decoder { | ||||||
|     shift = 3 |     shift = 3 | ||||||
|     while (this.offset < this.buf.byteLength) { |     while (this.offset < this.buf.byteLength) { | ||||||
|       const nextByte = this.buf[this.offset] |       const nextByte = this.buf[this.offset] | ||||||
|       if (shift === 31 && (nextByte & 0xfe) !== 0) { |       if (shift === 31 && (nextByte & 0xfe) !== 0) { // more than 10 bytes, or value > 2^64 - 1
 | ||||||
|         // more than 10 bytes, or value > 2^64 - 1
 |         throw new RangeError('number out of range') | ||||||
|         throw new RangeError("number out of range") |  | ||||||
|       } |       } | ||||||
|       high32 = (high32 | ((nextByte & 0x7f) << shift)) >>> 0 |       high32 = (high32 | (nextByte & 0x7f) << shift) >>> 0 | ||||||
|       shift += 7 |       shift += 7 | ||||||
|       this.offset++ |       this.offset++ | ||||||
|       if ((nextByte & 0x80) === 0) return { high32, low32 } |       if ((nextByte & 0x80) === 0) return { high32, low32 } | ||||||
|     } |     } | ||||||
|     throw new RangeError("buffer ended with incomplete number") |     throw new RangeError('buffer ended with incomplete number') | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -500,20 +448,17 @@ class Decoder { | ||||||
|    * sign of the `high32` half indicates the sign of the 64-bit number. |    * sign of the `high32` half indicates the sign of the 64-bit number. | ||||||
|    */ |    */ | ||||||
|   readInt64() { |   readInt64() { | ||||||
|     let low32 = 0, |     let low32 = 0, high32 = 0, shift = 0 | ||||||
|       high32 = 0, |  | ||||||
|       shift = 0 |  | ||||||
|     while (this.offset < this.buf.byteLength && shift <= 28) { |     while (this.offset < this.buf.byteLength && shift <= 28) { | ||||||
|       const nextByte = this.buf[this.offset] |       const nextByte = this.buf[this.offset] | ||||||
|       low32 = (low32 | ((nextByte & 0x7f) << shift)) >>> 0 // right shift to interpret value as unsigned
 |       low32 = (low32 | (nextByte & 0x7f) << shift) >>> 0 // right shift to interpret value as unsigned
 | ||||||
|       if (shift === 28) { |       if (shift === 28) { | ||||||
|         high32 = (nextByte & 0x70) >>> 4 |         high32 = (nextByte & 0x70) >>> 4 | ||||||
|       } |       } | ||||||
|       shift += 7 |       shift += 7 | ||||||
|       this.offset++ |       this.offset++ | ||||||
|       if ((nextByte & 0x80) === 0) { |       if ((nextByte & 0x80) === 0) { | ||||||
|         if ((nextByte & 0x40) !== 0) { |         if ((nextByte & 0x40) !== 0) { // sign-extend negative integer
 | ||||||
|           // sign-extend negative integer
 |  | ||||||
|           if (shift < 32) low32 = (low32 | (-1 << shift)) >>> 0 |           if (shift < 32) low32 = (low32 | (-1 << shift)) >>> 0 | ||||||
|           high32 |= -1 << Math.max(shift - 32, 0) |           high32 |= -1 << Math.max(shift - 32, 0) | ||||||
|         } |         } | ||||||
|  | @ -527,20 +472,19 @@ class Decoder { | ||||||
|       // On the 10th byte there are only two valid values: all 7 value bits zero
 |       // On the 10th byte there are only two valid values: all 7 value bits zero
 | ||||||
|       // (if the value is positive) or all 7 bits one (if the value is negative)
 |       // (if the value is positive) or all 7 bits one (if the value is negative)
 | ||||||
|       if (shift === 31 && nextByte !== 0 && nextByte !== 0x7f) { |       if (shift === 31 && nextByte !== 0 && nextByte !== 0x7f) { | ||||||
|         throw new RangeError("number out of range") |         throw new RangeError('number out of range') | ||||||
|       } |       } | ||||||
|       high32 |= (nextByte & 0x7f) << shift |       high32 |= (nextByte & 0x7f) << shift | ||||||
|       shift += 7 |       shift += 7 | ||||||
|       this.offset++ |       this.offset++ | ||||||
|       if ((nextByte & 0x80) === 0) { |       if ((nextByte & 0x80) === 0) { | ||||||
|         if ((nextByte & 0x40) !== 0 && shift < 32) { |         if ((nextByte & 0x40) !== 0 && shift < 32) { // sign-extend negative integer
 | ||||||
|           // sign-extend negative integer
 |  | ||||||
|           high32 |= -1 << shift |           high32 |= -1 << shift | ||||||
|         } |         } | ||||||
|         return { high32, low32 } |         return { high32, low32 } | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|     throw new RangeError("buffer ended with incomplete number") |     throw new RangeError('buffer ended with incomplete number') | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -550,7 +494,7 @@ class Decoder { | ||||||
|   readRawBytes(length) { |   readRawBytes(length) { | ||||||
|     const start = this.offset |     const start = this.offset | ||||||
|     if (start + length > this.buf.byteLength) { |     if (start + length > this.buf.byteLength) { | ||||||
|       throw new RangeError("subarray exceeds buffer size") |       throw new RangeError('subarray exceeds buffer size') | ||||||
|     } |     } | ||||||
|     this.offset += length |     this.offset += length | ||||||
|     return this.buf.subarray(start, this.offset) |     return this.buf.subarray(start, this.offset) | ||||||
|  | @ -615,7 +559,7 @@ class RLEEncoder extends Encoder { | ||||||
|   constructor(type) { |   constructor(type) { | ||||||
|     super() |     super() | ||||||
|     this.type = type |     this.type = type | ||||||
|     this.state = "empty" |     this.state = 'empty' | ||||||
|     this.lastValue = undefined |     this.lastValue = undefined | ||||||
|     this.count = 0 |     this.count = 0 | ||||||
|     this.literal = [] |     this.literal = [] | ||||||
|  | @ -634,81 +578,76 @@ class RLEEncoder extends Encoder { | ||||||
|    */ |    */ | ||||||
|   _appendValue(value, repetitions = 1) { |   _appendValue(value, repetitions = 1) { | ||||||
|     if (repetitions <= 0) return |     if (repetitions <= 0) return | ||||||
|     if (this.state === "empty") { |     if (this.state === 'empty') { | ||||||
|       this.state = |       this.state = (value === null ? 'nulls' : (repetitions === 1 ? 'loneValue' : 'repetition')) | ||||||
|         value === null |  | ||||||
|           ? "nulls" |  | ||||||
|           : repetitions === 1 |  | ||||||
|           ? "loneValue" |  | ||||||
|           : "repetition" |  | ||||||
|       this.lastValue = value |       this.lastValue = value | ||||||
|       this.count = repetitions |       this.count = repetitions | ||||||
|     } else if (this.state === "loneValue") { |     } else if (this.state === 'loneValue') { | ||||||
|       if (value === null) { |       if (value === null) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "nulls" |         this.state = 'nulls' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|       } else if (value === this.lastValue) { |       } else if (value === this.lastValue) { | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = 1 + repetitions |         this.count = 1 + repetitions | ||||||
|       } else if (repetitions > 1) { |       } else if (repetitions > 1) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } else { |       } else { | ||||||
|         this.state = "literal" |         this.state = 'literal' | ||||||
|         this.literal = [this.lastValue] |         this.literal = [this.lastValue] | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } |       } | ||||||
|     } else if (this.state === "repetition") { |     } else if (this.state === 'repetition') { | ||||||
|       if (value === null) { |       if (value === null) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "nulls" |         this.state = 'nulls' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|       } else if (value === this.lastValue) { |       } else if (value === this.lastValue) { | ||||||
|         this.count += repetitions |         this.count += repetitions | ||||||
|       } else if (repetitions > 1) { |       } else if (repetitions > 1) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } else { |       } else { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "loneValue" |         this.state = 'loneValue' | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } |       } | ||||||
|     } else if (this.state === "literal") { |     } else if (this.state === 'literal') { | ||||||
|       if (value === null) { |       if (value === null) { | ||||||
|         this.literal.push(this.lastValue) |         this.literal.push(this.lastValue) | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "nulls" |         this.state = 'nulls' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|       } else if (value === this.lastValue) { |       } else if (value === this.lastValue) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = 1 + repetitions |         this.count = 1 + repetitions | ||||||
|       } else if (repetitions > 1) { |       } else if (repetitions > 1) { | ||||||
|         this.literal.push(this.lastValue) |         this.literal.push(this.lastValue) | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } else { |       } else { | ||||||
|         this.literal.push(this.lastValue) |         this.literal.push(this.lastValue) | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } |       } | ||||||
|     } else if (this.state === "nulls") { |     } else if (this.state === 'nulls') { | ||||||
|       if (value === null) { |       if (value === null) { | ||||||
|         this.count += repetitions |         this.count += repetitions | ||||||
|       } else if (repetitions > 1) { |       } else if (repetitions > 1) { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "repetition" |         this.state = 'repetition' | ||||||
|         this.count = repetitions |         this.count = repetitions | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } else { |       } else { | ||||||
|         this.flush() |         this.flush() | ||||||
|         this.state = "loneValue" |         this.state = 'loneValue' | ||||||
|         this.lastValue = value |         this.lastValue = value | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|  | @ -727,16 +666,13 @@ class RLEEncoder extends Encoder { | ||||||
|    */ |    */ | ||||||
|   copyFrom(decoder, options = {}) { |   copyFrom(decoder, options = {}) { | ||||||
|     const { count, sumValues, sumShift } = options |     const { count, sumValues, sumShift } = options | ||||||
|     if (!(decoder instanceof RLEDecoder) || decoder.type !== this.type) { |     if (!(decoder instanceof RLEDecoder) || (decoder.type !== this.type)) { | ||||||
|       throw new TypeError("incompatible type of decoder") |       throw new TypeError('incompatible type of decoder') | ||||||
|     } |     } | ||||||
|     let remaining = typeof count === "number" ? count : Number.MAX_SAFE_INTEGER |     let remaining = (typeof count === 'number' ? count : Number.MAX_SAFE_INTEGER) | ||||||
|     let nonNullValues = 0, |     let nonNullValues = 0, sum = 0 | ||||||
|       sum = 0 |     if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|     if (count && remaining > 0 && decoder.done) |     if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} | ||||||
|       throw new RangeError(`cannot copy ${count} values`) |  | ||||||
|     if (remaining === 0 || decoder.done) |  | ||||||
|       return sumValues ? { nonNullValues, sum } : { nonNullValues } |  | ||||||
| 
 | 
 | ||||||
|     // Copy a value so that we have a well-defined starting state. NB: when super.copyFrom() is
 |     // Copy a value so that we have a well-defined starting state. NB: when super.copyFrom() is
 | ||||||
|     // called by the DeltaEncoder subclass, the following calls to readValue() and appendValue()
 |     // called by the DeltaEncoder subclass, the following calls to readValue() and appendValue()
 | ||||||
|  | @ -748,69 +684,55 @@ class RLEEncoder extends Encoder { | ||||||
|       remaining -= numNulls |       remaining -= numNulls | ||||||
|       decoder.count -= numNulls - 1 |       decoder.count -= numNulls - 1 | ||||||
|       this.appendValue(null, numNulls) |       this.appendValue(null, numNulls) | ||||||
|       if (count && remaining > 0 && decoder.done) |       if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|         throw new RangeError(`cannot copy ${count} values`) |       if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} | ||||||
|       if (remaining === 0 || decoder.done) |  | ||||||
|         return sumValues ? { nonNullValues, sum } : { nonNullValues } |  | ||||||
|       firstValue = decoder.readValue() |       firstValue = decoder.readValue() | ||||||
|       if (firstValue === null) |       if (firstValue === null) throw new RangeError('null run must be followed by non-null value') | ||||||
|         throw new RangeError("null run must be followed by non-null value") |  | ||||||
|     } |     } | ||||||
|     this.appendValue(firstValue) |     this.appendValue(firstValue) | ||||||
|     remaining-- |     remaining-- | ||||||
|     nonNullValues++ |     nonNullValues++ | ||||||
|     if (sumValues) sum += sumShift ? firstValue >>> sumShift : firstValue |     if (sumValues) sum += (sumShift ? (firstValue >>> sumShift) : firstValue) | ||||||
|     if (count && remaining > 0 && decoder.done) |     if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|       throw new RangeError(`cannot copy ${count} values`) |     if (remaining === 0 || decoder.done) return sumValues ? {nonNullValues, sum} : {nonNullValues} | ||||||
|     if (remaining === 0 || decoder.done) |  | ||||||
|       return sumValues ? { nonNullValues, sum } : { nonNullValues } |  | ||||||
| 
 | 
 | ||||||
|     // Copy data at the record level without expanding repetitions
 |     // Copy data at the record level without expanding repetitions
 | ||||||
|     let firstRun = decoder.count > 0 |     let firstRun = (decoder.count > 0) | ||||||
|     while (remaining > 0 && !decoder.done) { |     while (remaining > 0 && !decoder.done) { | ||||||
|       if (!firstRun) decoder.readRecord() |       if (!firstRun) decoder.readRecord() | ||||||
|       const numValues = Math.min(decoder.count, remaining) |       const numValues = Math.min(decoder.count, remaining) | ||||||
|       decoder.count -= numValues |       decoder.count -= numValues | ||||||
| 
 | 
 | ||||||
|       if (decoder.state === "literal") { |       if (decoder.state === 'literal') { | ||||||
|         nonNullValues += numValues |         nonNullValues += numValues | ||||||
|         for (let i = 0; i < numValues; i++) { |         for (let i = 0; i < numValues; i++) { | ||||||
|           if (decoder.done) throw new RangeError("incomplete literal") |           if (decoder.done) throw new RangeError('incomplete literal') | ||||||
|           const value = decoder.readRawValue() |           const value = decoder.readRawValue() | ||||||
|           if (value === decoder.lastValue) |           if (value === decoder.lastValue) throw new RangeError('Repetition of values is not allowed in literal') | ||||||
|             throw new RangeError( |  | ||||||
|               "Repetition of values is not allowed in literal" |  | ||||||
|             ) |  | ||||||
|           decoder.lastValue = value |           decoder.lastValue = value | ||||||
|           this._appendValue(value) |           this._appendValue(value) | ||||||
|           if (sumValues) sum += sumShift ? value >>> sumShift : value |           if (sumValues) sum += (sumShift ? (value >>> sumShift) : value) | ||||||
|         } |         } | ||||||
|       } else if (decoder.state === "repetition") { |       } else if (decoder.state === 'repetition') { | ||||||
|         nonNullValues += numValues |         nonNullValues += numValues | ||||||
|         if (sumValues) |         if (sumValues) sum += numValues * (sumShift ? (decoder.lastValue >>> sumShift) : decoder.lastValue) | ||||||
|           sum += |  | ||||||
|             numValues * |  | ||||||
|             (sumShift ? decoder.lastValue >>> sumShift : decoder.lastValue) |  | ||||||
|         const value = decoder.lastValue |         const value = decoder.lastValue | ||||||
|         this._appendValue(value) |         this._appendValue(value) | ||||||
|         if (numValues > 1) { |         if (numValues > 1) { | ||||||
|           this._appendValue(value) |           this._appendValue(value) | ||||||
|           if (this.state !== "repetition") |           if (this.state !== 'repetition') throw new RangeError(`Unexpected state ${this.state}`) | ||||||
|             throw new RangeError(`Unexpected state ${this.state}`) |  | ||||||
|           this.count += numValues - 2 |           this.count += numValues - 2 | ||||||
|         } |         } | ||||||
|       } else if (decoder.state === "nulls") { |       } else if (decoder.state === 'nulls') { | ||||||
|         this._appendValue(null) |         this._appendValue(null) | ||||||
|         if (this.state !== "nulls") |         if (this.state !== 'nulls') throw new RangeError(`Unexpected state ${this.state}`) | ||||||
|           throw new RangeError(`Unexpected state ${this.state}`) |  | ||||||
|         this.count += numValues - 1 |         this.count += numValues - 1 | ||||||
|       } |       } | ||||||
| 
 | 
 | ||||||
|       firstRun = false |       firstRun = false | ||||||
|       remaining -= numValues |       remaining -= numValues | ||||||
|     } |     } | ||||||
|     if (count && remaining > 0 && decoder.done) |     if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|       throw new RangeError(`cannot copy ${count} values`) |  | ||||||
|     return sumValues ? {nonNullValues, sum} : {nonNullValues} |     return sumValues ? {nonNullValues, sum} : {nonNullValues} | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -818,31 +740,31 @@ class RLEEncoder extends Encoder { | ||||||
|    * Private method, do not call from outside the class. |    * Private method, do not call from outside the class. | ||||||
|    */ |    */ | ||||||
|   flush() { |   flush() { | ||||||
|     if (this.state === "loneValue") { |     if (this.state === 'loneValue') { | ||||||
|       this.appendInt32(-1) |       this.appendInt32(-1) | ||||||
|       this.appendRawValue(this.lastValue) |       this.appendRawValue(this.lastValue) | ||||||
|     } else if (this.state === "repetition") { |     } else if (this.state === 'repetition') { | ||||||
|       this.appendInt53(this.count) |       this.appendInt53(this.count) | ||||||
|       this.appendRawValue(this.lastValue) |       this.appendRawValue(this.lastValue) | ||||||
|     } else if (this.state === "literal") { |     } else if (this.state === 'literal') { | ||||||
|       this.appendInt53(-this.literal.length) |       this.appendInt53(-this.literal.length) | ||||||
|       for (let v of this.literal) this.appendRawValue(v) |       for (let v of this.literal) this.appendRawValue(v) | ||||||
|     } else if (this.state === "nulls") { |     } else if (this.state === 'nulls') { | ||||||
|       this.appendInt32(0) |       this.appendInt32(0) | ||||||
|       this.appendUint53(this.count) |       this.appendUint53(this.count) | ||||||
|     } |     } | ||||||
|     this.state = "empty" |     this.state = 'empty' | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|    * Private method, do not call from outside the class. |    * Private method, do not call from outside the class. | ||||||
|    */ |    */ | ||||||
|   appendRawValue(value) { |   appendRawValue(value) { | ||||||
|     if (this.type === "int") { |     if (this.type === 'int') { | ||||||
|       this.appendInt53(value) |       this.appendInt53(value) | ||||||
|     } else if (this.type === "uint") { |     } else if (this.type === 'uint') { | ||||||
|       this.appendUint53(value) |       this.appendUint53(value) | ||||||
|     } else if (this.type === "utf8") { |     } else if (this.type === 'utf8') { | ||||||
|       this.appendPrefixedString(value) |       this.appendPrefixedString(value) | ||||||
|     } else { |     } else { | ||||||
|       throw new RangeError(`Unknown RLEEncoder datatype: ${this.type}`) |       throw new RangeError(`Unknown RLEEncoder datatype: ${this.type}`) | ||||||
|  | @ -854,9 +776,9 @@ class RLEEncoder extends Encoder { | ||||||
|    * the buffer constructed by this Encoder. |    * the buffer constructed by this Encoder. | ||||||
|    */ |    */ | ||||||
|   finish() { |   finish() { | ||||||
|     if (this.state === "literal") this.literal.push(this.lastValue) |     if (this.state === 'literal') this.literal.push(this.lastValue) | ||||||
|     // Don't write anything if the only values we have seen are nulls
 |     // Don't write anything if the only values we have seen are nulls
 | ||||||
|     if (this.state !== "nulls" || this.offset > 0) this.flush() |     if (this.state !== 'nulls' || this.offset > 0) this.flush() | ||||||
|   } |   } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -878,7 +800,7 @@ class RLEDecoder extends Decoder { | ||||||
|    * position, and true if we are at the end of the buffer. |    * position, and true if we are at the end of the buffer. | ||||||
|    */ |    */ | ||||||
|   get done() { |   get done() { | ||||||
|     return this.count === 0 && this.offset === this.buf.byteLength |     return (this.count === 0) && (this.offset === this.buf.byteLength) | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -899,10 +821,9 @@ class RLEDecoder extends Decoder { | ||||||
|     if (this.done) return null |     if (this.done) return null | ||||||
|     if (this.count === 0) this.readRecord() |     if (this.count === 0) this.readRecord() | ||||||
|     this.count -= 1 |     this.count -= 1 | ||||||
|     if (this.state === "literal") { |     if (this.state === 'literal') { | ||||||
|       const value = this.readRawValue() |       const value = this.readRawValue() | ||||||
|       if (value === this.lastValue) |       if (value === this.lastValue) throw new RangeError('Repetition of values is not allowed in literal') | ||||||
|         throw new RangeError("Repetition of values is not allowed in literal") |  | ||||||
|       this.lastValue = value |       this.lastValue = value | ||||||
|       return value |       return value | ||||||
|     } else { |     } else { | ||||||
|  | @ -918,22 +839,20 @@ class RLEDecoder extends Decoder { | ||||||
|       if (this.count === 0) { |       if (this.count === 0) { | ||||||
|         this.count = this.readInt53() |         this.count = this.readInt53() | ||||||
|         if (this.count > 0) { |         if (this.count > 0) { | ||||||
|           this.lastValue = |           this.lastValue = (this.count <= numSkip) ? this.skipRawValues(1) : this.readRawValue() | ||||||
|             this.count <= numSkip ? this.skipRawValues(1) : this.readRawValue() |           this.state = 'repetition' | ||||||
|           this.state = "repetition" |  | ||||||
|         } else if (this.count < 0) { |         } else if (this.count < 0) { | ||||||
|           this.count = -this.count |           this.count = -this.count | ||||||
|           this.state = "literal" |           this.state = 'literal' | ||||||
|         } else { |         } else { // this.count == 0
 | ||||||
|           // this.count == 0
 |  | ||||||
|           this.count = this.readUint53() |           this.count = this.readUint53() | ||||||
|           this.lastValue = null |           this.lastValue = null | ||||||
|           this.state = "nulls" |           this.state = 'nulls' | ||||||
|         } |         } | ||||||
|       } |       } | ||||||
| 
 | 
 | ||||||
|       const consume = Math.min(numSkip, this.count) |       const consume = Math.min(numSkip, this.count) | ||||||
|       if (this.state === "literal") this.skipRawValues(consume) |       if (this.state === 'literal') this.skipRawValues(consume) | ||||||
|       numSkip -= consume |       numSkip -= consume | ||||||
|       this.count -= consume |       this.count -= consume | ||||||
|     } |     } | ||||||
|  | @ -947,34 +866,23 @@ class RLEDecoder extends Decoder { | ||||||
|     this.count = this.readInt53() |     this.count = this.readInt53() | ||||||
|     if (this.count > 1) { |     if (this.count > 1) { | ||||||
|       const value = this.readRawValue() |       const value = this.readRawValue() | ||||||
|       if ( |       if ((this.state === 'repetition' || this.state === 'literal') && this.lastValue === value) { | ||||||
|         (this.state === "repetition" || this.state === "literal") && |         throw new RangeError('Successive repetitions with the same value are not allowed') | ||||||
|         this.lastValue === value |  | ||||||
|       ) { |  | ||||||
|         throw new RangeError( |  | ||||||
|           "Successive repetitions with the same value are not allowed" |  | ||||||
|         ) |  | ||||||
|       } |       } | ||||||
|       this.state = "repetition" |       this.state = 'repetition' | ||||||
|       this.lastValue = value |       this.lastValue = value | ||||||
|     } else if (this.count === 1) { |     } else if (this.count === 1) { | ||||||
|       throw new RangeError( |       throw new RangeError('Repetition count of 1 is not allowed, use a literal instead') | ||||||
|         "Repetition count of 1 is not allowed, use a literal instead" |  | ||||||
|       ) |  | ||||||
|     } else if (this.count < 0) { |     } else if (this.count < 0) { | ||||||
|       this.count = -this.count |       this.count = -this.count | ||||||
|       if (this.state === "literal") |       if (this.state === 'literal') throw new RangeError('Successive literals are not allowed') | ||||||
|         throw new RangeError("Successive literals are not allowed") |       this.state = 'literal' | ||||||
|       this.state = "literal" |     } else { // this.count == 0
 | ||||||
|     } else { |       if (this.state === 'nulls') throw new RangeError('Successive null runs are not allowed') | ||||||
|       // this.count == 0
 |  | ||||||
|       if (this.state === "nulls") |  | ||||||
|         throw new RangeError("Successive null runs are not allowed") |  | ||||||
|       this.count = this.readUint53() |       this.count = this.readUint53() | ||||||
|       if (this.count === 0) |       if (this.count === 0) throw new RangeError('Zero-length null runs are not allowed') | ||||||
|         throw new RangeError("Zero-length null runs are not allowed") |  | ||||||
|       this.lastValue = null |       this.lastValue = null | ||||||
|       this.state = "nulls" |       this.state = 'nulls' | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -983,11 +891,11 @@ class RLEDecoder extends Decoder { | ||||||
|    * Reads one value of the datatype configured on construction. |    * Reads one value of the datatype configured on construction. | ||||||
|    */ |    */ | ||||||
|   readRawValue() { |   readRawValue() { | ||||||
|     if (this.type === "int") { |     if (this.type === 'int') { | ||||||
|       return this.readInt53() |       return this.readInt53() | ||||||
|     } else if (this.type === "uint") { |     } else if (this.type === 'uint') { | ||||||
|       return this.readUint53() |       return this.readUint53() | ||||||
|     } else if (this.type === "utf8") { |     } else if (this.type === 'utf8') { | ||||||
|       return this.readPrefixedString() |       return this.readPrefixedString() | ||||||
|     } else { |     } else { | ||||||
|       throw new RangeError(`Unknown RLEDecoder datatype: ${this.type}`) |       throw new RangeError(`Unknown RLEDecoder datatype: ${this.type}`) | ||||||
|  | @ -999,14 +907,14 @@ class RLEDecoder extends Decoder { | ||||||
|    * Skips over `num` values of the datatype configured on construction. |    * Skips over `num` values of the datatype configured on construction. | ||||||
|    */ |    */ | ||||||
|   skipRawValues(num) { |   skipRawValues(num) { | ||||||
|     if (this.type === "utf8") { |     if (this.type === 'utf8') { | ||||||
|       for (let i = 0; i < num; i++) this.skip(this.readUint53()) |       for (let i = 0; i < num; i++) this.skip(this.readUint53()) | ||||||
|     } else { |     } else { | ||||||
|       while (num > 0 && this.offset < this.buf.byteLength) { |       while (num > 0 && this.offset < this.buf.byteLength) { | ||||||
|         if ((this.buf[this.offset] & 0x80) === 0) num-- |         if ((this.buf[this.offset] & 0x80) === 0) num-- | ||||||
|         this.offset++ |         this.offset++ | ||||||
|       } |       } | ||||||
|       if (num > 0) throw new RangeError("cannot skip beyond end of buffer") |       if (num > 0) throw new RangeError('cannot skip beyond end of buffer') | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | @ -1023,7 +931,7 @@ class RLEDecoder extends Decoder { | ||||||
|  */ |  */ | ||||||
| class DeltaEncoder extends RLEEncoder { | class DeltaEncoder extends RLEEncoder { | ||||||
|   constructor() { |   constructor() { | ||||||
|     super("int") |     super('int') | ||||||
|     this.absoluteValue = 0 |     this.absoluteValue = 0 | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -1033,7 +941,7 @@ class DeltaEncoder extends RLEEncoder { | ||||||
|    */ |    */ | ||||||
|   appendValue(value, repetitions = 1) { |   appendValue(value, repetitions = 1) { | ||||||
|     if (repetitions <= 0) return |     if (repetitions <= 0) return | ||||||
|     if (typeof value === "number") { |     if (typeof value === 'number') { | ||||||
|       super.appendValue(value - this.absoluteValue, 1) |       super.appendValue(value - this.absoluteValue, 1) | ||||||
|       this.absoluteValue = value |       this.absoluteValue = value | ||||||
|       if (repetitions > 1) super.appendValue(0, repetitions - 1) |       if (repetitions > 1) super.appendValue(0, repetitions - 1) | ||||||
|  | @ -1049,29 +957,26 @@ class DeltaEncoder extends RLEEncoder { | ||||||
|    */ |    */ | ||||||
|   copyFrom(decoder, options = {}) { |   copyFrom(decoder, options = {}) { | ||||||
|     if (options.sumValues) { |     if (options.sumValues) { | ||||||
|       throw new RangeError("unsupported options for DeltaEncoder.copyFrom()") |       throw new RangeError('unsupported options for DeltaEncoder.copyFrom()') | ||||||
|     } |     } | ||||||
|     if (!(decoder instanceof DeltaDecoder)) { |     if (!(decoder instanceof DeltaDecoder)) { | ||||||
|       throw new TypeError("incompatible type of decoder") |       throw new TypeError('incompatible type of decoder') | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     let remaining = options.count |     let remaining = options.count | ||||||
|     if (remaining > 0 && decoder.done) |     if (remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${remaining} values`) | ||||||
|       throw new RangeError(`cannot copy ${remaining} values`) |  | ||||||
|     if (remaining === 0 || decoder.done) return |     if (remaining === 0 || decoder.done) return | ||||||
| 
 | 
 | ||||||
|     // Copy any null values, and the first non-null value, so that appendValue() computes the
 |     // Copy any null values, and the first non-null value, so that appendValue() computes the
 | ||||||
|     // difference between the encoder's last value and the decoder's first (absolute) value.
 |     // difference between the encoder's last value and the decoder's first (absolute) value.
 | ||||||
|     let value = decoder.readValue(), |     let value = decoder.readValue(), nulls = 0 | ||||||
|       nulls = 0 |  | ||||||
|     this.appendValue(value) |     this.appendValue(value) | ||||||
|     if (value === null) { |     if (value === null) { | ||||||
|       nulls = decoder.count + 1 |       nulls = decoder.count + 1 | ||||||
|       if (remaining !== undefined && remaining < nulls) nulls = remaining |       if (remaining !== undefined && remaining < nulls) nulls = remaining | ||||||
|       decoder.count -= nulls - 1 |       decoder.count -= nulls - 1 | ||||||
|       this.count += nulls - 1 |       this.count += nulls - 1 | ||||||
|       if (remaining > nulls && decoder.done) |       if (remaining > nulls && decoder.done) throw new RangeError(`cannot copy ${remaining} values`) | ||||||
|         throw new RangeError(`cannot copy ${remaining} values`) |  | ||||||
|       if (remaining === nulls || decoder.done) return |       if (remaining === nulls || decoder.done) return | ||||||
| 
 | 
 | ||||||
|       // The next value read is certain to be non-null because we're not at the end of the decoder,
 |       // The next value read is certain to be non-null because we're not at the end of the decoder,
 | ||||||
|  | @ -1084,10 +989,7 @@ class DeltaEncoder extends RLEEncoder { | ||||||
|     // value, while subsequent values are relative. Thus, the sum of all of the (non-null) copied
 |     // value, while subsequent values are relative. Thus, the sum of all of the (non-null) copied
 | ||||||
|     // values must equal the absolute value of the final element copied.
 |     // values must equal the absolute value of the final element copied.
 | ||||||
|     if (remaining !== undefined) remaining -= nulls + 1 |     if (remaining !== undefined) remaining -= nulls + 1 | ||||||
|     const { nonNullValues, sum } = super.copyFrom(decoder, { |     const { nonNullValues, sum } = super.copyFrom(decoder, {count: remaining, sumValues: true}) | ||||||
|       count: remaining, |  | ||||||
|       sumValues: true, |  | ||||||
|     }) |  | ||||||
|     if (nonNullValues > 0) { |     if (nonNullValues > 0) { | ||||||
|       this.absoluteValue = sum |       this.absoluteValue = sum | ||||||
|       decoder.absoluteValue = sum |       decoder.absoluteValue = sum | ||||||
|  | @ -1101,7 +1003,7 @@ class DeltaEncoder extends RLEEncoder { | ||||||
|  */ |  */ | ||||||
| class DeltaDecoder extends RLEDecoder { | class DeltaDecoder extends RLEDecoder { | ||||||
|   constructor(buffer) { |   constructor(buffer) { | ||||||
|     super("int", buffer) |     super('int', buffer) | ||||||
|     this.absoluteValue = 0 |     this.absoluteValue = 0 | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -1134,12 +1036,12 @@ class DeltaDecoder extends RLEDecoder { | ||||||
|     while (numSkip > 0 && !this.done) { |     while (numSkip > 0 && !this.done) { | ||||||
|       if (this.count === 0) this.readRecord() |       if (this.count === 0) this.readRecord() | ||||||
|       const consume = Math.min(numSkip, this.count) |       const consume = Math.min(numSkip, this.count) | ||||||
|       if (this.state === "literal") { |       if (this.state === 'literal') { | ||||||
|         for (let i = 0; i < consume; i++) { |         for (let i = 0; i < consume; i++) { | ||||||
|           this.lastValue = this.readRawValue() |           this.lastValue = this.readRawValue() | ||||||
|           this.absoluteValue += this.lastValue |           this.absoluteValue += this.lastValue | ||||||
|         } |         } | ||||||
|       } else if (this.state === "repetition") { |       } else if (this.state === 'repetition') { | ||||||
|         this.absoluteValue += consume * this.lastValue |         this.absoluteValue += consume * this.lastValue | ||||||
|       } |       } | ||||||
|       numSkip -= consume |       numSkip -= consume | ||||||
|  | @ -1188,13 +1090,12 @@ class BooleanEncoder extends Encoder { | ||||||
|    */ |    */ | ||||||
|   copyFrom(decoder, options = {}) { |   copyFrom(decoder, options = {}) { | ||||||
|     if (!(decoder instanceof BooleanDecoder)) { |     if (!(decoder instanceof BooleanDecoder)) { | ||||||
|       throw new TypeError("incompatible type of decoder") |       throw new TypeError('incompatible type of decoder') | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     const { count } = options |     const { count } = options | ||||||
|     let remaining = typeof count === "number" ? count : Number.MAX_SAFE_INTEGER |     let remaining = (typeof count === 'number' ? count : Number.MAX_SAFE_INTEGER) | ||||||
|     if (count && remaining > 0 && decoder.done) |     if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|       throw new RangeError(`cannot copy ${count} values`) |  | ||||||
|     if (remaining === 0 || decoder.done) return |     if (remaining === 0 || decoder.done) return | ||||||
| 
 | 
 | ||||||
|     // Copy one value to bring decoder and encoder state into sync, then finish that value's repetitions
 |     // Copy one value to bring decoder and encoder state into sync, then finish that value's repetitions
 | ||||||
|  | @ -1207,8 +1108,7 @@ class BooleanEncoder extends Encoder { | ||||||
| 
 | 
 | ||||||
|     while (remaining > 0 && !decoder.done) { |     while (remaining > 0 && !decoder.done) { | ||||||
|       decoder.count = decoder.readUint53() |       decoder.count = decoder.readUint53() | ||||||
|       if (decoder.count === 0) |       if (decoder.count === 0) throw new RangeError('Zero-length runs are not allowed') | ||||||
|         throw new RangeError("Zero-length runs are not allowed") |  | ||||||
|       decoder.lastValue = !decoder.lastValue |       decoder.lastValue = !decoder.lastValue | ||||||
|       this.appendUint53(this.count) |       this.appendUint53(this.count) | ||||||
| 
 | 
 | ||||||
|  | @ -1219,8 +1119,7 @@ class BooleanEncoder extends Encoder { | ||||||
|       remaining -= numCopied |       remaining -= numCopied | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (count && remaining > 0 && decoder.done) |     if (count && remaining > 0 && decoder.done) throw new RangeError(`cannot copy ${count} values`) | ||||||
|       throw new RangeError(`cannot copy ${count} values`) |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -1252,7 +1151,7 @@ class BooleanDecoder extends Decoder { | ||||||
|    * position, and true if we are at the end of the buffer. |    * position, and true if we are at the end of the buffer. | ||||||
|    */ |    */ | ||||||
|   get done() { |   get done() { | ||||||
|     return this.count === 0 && this.offset === this.buf.byteLength |     return (this.count === 0) && (this.offset === this.buf.byteLength) | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   /** |   /** | ||||||
|  | @ -1275,7 +1174,7 @@ class BooleanDecoder extends Decoder { | ||||||
|       this.count = this.readUint53() |       this.count = this.readUint53() | ||||||
|       this.lastValue = !this.lastValue |       this.lastValue = !this.lastValue | ||||||
|       if (this.count === 0 && !this.firstRun) { |       if (this.count === 0 && !this.firstRun) { | ||||||
|         throw new RangeError("Zero-length runs are not allowed") |         throw new RangeError('Zero-length runs are not allowed') | ||||||
|       } |       } | ||||||
|       this.firstRun = false |       this.firstRun = false | ||||||
|     } |     } | ||||||
|  | @ -1291,8 +1190,7 @@ class BooleanDecoder extends Decoder { | ||||||
|       if (this.count === 0) { |       if (this.count === 0) { | ||||||
|         this.count = this.readUint53() |         this.count = this.readUint53() | ||||||
|         this.lastValue = !this.lastValue |         this.lastValue = !this.lastValue | ||||||
|         if (this.count === 0) |         if (this.count === 0) throw new RangeError('Zero-length runs are not allowed') | ||||||
|           throw new RangeError("Zero-length runs are not allowed") |  | ||||||
|       } |       } | ||||||
|       if (this.count < numSkip) { |       if (this.count < numSkip) { | ||||||
|         numSkip -= this.count |         numSkip -= this.count | ||||||
|  | @ -1306,16 +1204,6 @@ class BooleanDecoder extends Decoder { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| module.exports = { | module.exports = { | ||||||
|   stringToUtf8, |   stringToUtf8, utf8ToString, hexStringToBytes, bytesToHexString, | ||||||
|   utf8ToString, |   Encoder, Decoder, RLEEncoder, RLEDecoder, DeltaEncoder, DeltaDecoder, BooleanEncoder, BooleanDecoder | ||||||
|   hexStringToBytes, |  | ||||||
|   bytesToHexString, |  | ||||||
|   Encoder, |  | ||||||
|   Decoder, |  | ||||||
|   RLEEncoder, |  | ||||||
|   RLEDecoder, |  | ||||||
|   DeltaEncoder, |  | ||||||
|   DeltaDecoder, |  | ||||||
|   BooleanEncoder, |  | ||||||
|   BooleanDecoder, |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -17,14 +17,9 @@ | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| const Backend = null //require('./backend')
 | const Backend = null //require('./backend')
 | ||||||
| const { | const { hexStringToBytes, bytesToHexString, Encoder, Decoder } = require('./encoding') | ||||||
|   hexStringToBytes, | const { decodeChangeMeta } = require('./columnar') | ||||||
|   bytesToHexString, | const { copyObject } = require('./common') | ||||||
|   Encoder, |  | ||||||
|   Decoder, |  | ||||||
| } = require("./encoding") |  | ||||||
| const { decodeChangeMeta } = require("./columnar") |  | ||||||
| const { copyObject } = require("./common") |  | ||||||
| 
 | 
 | ||||||
| const HASH_SIZE = 32 // 256 bits = 32 bytes
 | const HASH_SIZE = 32 // 256 bits = 32 bytes
 | ||||||
| const MESSAGE_TYPE_SYNC = 0x42 // first byte of a sync message, for identification
 | const MESSAGE_TYPE_SYNC = 0x42 // first byte of a sync message, for identification
 | ||||||
|  | @ -33,8 +28,7 @@ const PEER_STATE_TYPE = 0x43 // first byte of an encoded peer state, for identif | ||||||
| // These constants correspond to a 1% false positive rate. The values can be changed without
 | // These constants correspond to a 1% false positive rate. The values can be changed without
 | ||||||
| // breaking compatibility of the network protocol, since the parameters used for a particular
 | // breaking compatibility of the network protocol, since the parameters used for a particular
 | ||||||
| // Bloom filter are encoded in the wire format.
 | // Bloom filter are encoded in the wire format.
 | ||||||
| const BITS_PER_ENTRY = 10, | const BITS_PER_ENTRY = 10, NUM_PROBES = 7 | ||||||
|   NUM_PROBES = 7 |  | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  * A Bloom filter implementation that can be serialised to a byte array for transmission |  * A Bloom filter implementation that can be serialised to a byte array for transmission | ||||||
|  | @ -48,9 +42,7 @@ class BloomFilter { | ||||||
|       this.numEntries = arg.length |       this.numEntries = arg.length | ||||||
|       this.numBitsPerEntry = BITS_PER_ENTRY |       this.numBitsPerEntry = BITS_PER_ENTRY | ||||||
|       this.numProbes = NUM_PROBES |       this.numProbes = NUM_PROBES | ||||||
|       this.bits = new Uint8Array( |       this.bits = new Uint8Array(Math.ceil(this.numEntries * this.numBitsPerEntry / 8)) | ||||||
|         Math.ceil((this.numEntries * this.numBitsPerEntry) / 8) |  | ||||||
|       ) |  | ||||||
|       for (let hash of arg) this.addHash(hash) |       for (let hash of arg) this.addHash(hash) | ||||||
|     } else if (arg instanceof Uint8Array) { |     } else if (arg instanceof Uint8Array) { | ||||||
|       if (arg.byteLength === 0) { |       if (arg.byteLength === 0) { | ||||||
|  | @ -63,12 +55,10 @@ class BloomFilter { | ||||||
|         this.numEntries = decoder.readUint32() |         this.numEntries = decoder.readUint32() | ||||||
|         this.numBitsPerEntry = decoder.readUint32() |         this.numBitsPerEntry = decoder.readUint32() | ||||||
|         this.numProbes = decoder.readUint32() |         this.numProbes = decoder.readUint32() | ||||||
|         this.bits = decoder.readRawBytes( |         this.bits = decoder.readRawBytes(Math.ceil(this.numEntries * this.numBitsPerEntry / 8)) | ||||||
|           Math.ceil((this.numEntries * this.numBitsPerEntry) / 8) |  | ||||||
|         ) |  | ||||||
|       } |       } | ||||||
|     } else { |     } else { | ||||||
|       throw new TypeError("invalid argument") |       throw new TypeError('invalid argument') | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -96,32 +86,12 @@ class BloomFilter { | ||||||
|    * http://www.ccis.northeastern.edu/home/pete/pub/bloom-filters-verification.pdf
 |    * http://www.ccis.northeastern.edu/home/pete/pub/bloom-filters-verification.pdf
 | ||||||
|    */ |    */ | ||||||
|   getProbes(hash) { |   getProbes(hash) { | ||||||
|     const hashBytes = hexStringToBytes(hash), |     const hashBytes = hexStringToBytes(hash), modulo = 8 * this.bits.byteLength | ||||||
|       modulo = 8 * this.bits.byteLength |     if (hashBytes.byteLength !== 32) throw new RangeError(`Not a 256-bit hash: ${hash}`) | ||||||
|     if (hashBytes.byteLength !== 32) |  | ||||||
|       throw new RangeError(`Not a 256-bit hash: ${hash}`) |  | ||||||
|     // on the next three lines, the right shift means interpret value as unsigned
 |     // on the next three lines, the right shift means interpret value as unsigned
 | ||||||
|     let x = |     let x = ((hashBytes[0] | hashBytes[1] << 8 | hashBytes[2]  << 16 | hashBytes[3]  << 24) >>> 0) % modulo | ||||||
|       ((hashBytes[0] | |     let y = ((hashBytes[4] | hashBytes[5] << 8 | hashBytes[6]  << 16 | hashBytes[7]  << 24) >>> 0) % modulo | ||||||
|         (hashBytes[1] << 8) | |     let z = ((hashBytes[8] | hashBytes[9] << 8 | hashBytes[10] << 16 | hashBytes[11] << 24) >>> 0) % modulo | ||||||
|         (hashBytes[2] << 16) | |  | ||||||
|         (hashBytes[3] << 24)) >>> |  | ||||||
|         0) % |  | ||||||
|       modulo |  | ||||||
|     let y = |  | ||||||
|       ((hashBytes[4] | |  | ||||||
|         (hashBytes[5] << 8) | |  | ||||||
|         (hashBytes[6] << 16) | |  | ||||||
|         (hashBytes[7] << 24)) >>> |  | ||||||
|         0) % |  | ||||||
|       modulo |  | ||||||
|     let z = |  | ||||||
|       ((hashBytes[8] | |  | ||||||
|         (hashBytes[9] << 8) | |  | ||||||
|         (hashBytes[10] << 16) | |  | ||||||
|         (hashBytes[11] << 24)) >>> |  | ||||||
|         0) % |  | ||||||
|       modulo |  | ||||||
|     const probes = [x] |     const probes = [x] | ||||||
|     for (let i = 1; i < this.numProbes; i++) { |     for (let i = 1; i < this.numProbes; i++) { | ||||||
|       x = (x + y) % modulo |       x = (x + y) % modulo | ||||||
|  | @ -158,14 +128,12 @@ class BloomFilter { | ||||||
|  * Encodes a sorted array of SHA-256 hashes (as hexadecimal strings) into a byte array. |  * Encodes a sorted array of SHA-256 hashes (as hexadecimal strings) into a byte array. | ||||||
|  */ |  */ | ||||||
| function encodeHashes(encoder, hashes) { | function encodeHashes(encoder, hashes) { | ||||||
|   if (!Array.isArray(hashes)) throw new TypeError("hashes must be an array") |   if (!Array.isArray(hashes)) throw new TypeError('hashes must be an array') | ||||||
|   encoder.appendUint32(hashes.length) |   encoder.appendUint32(hashes.length) | ||||||
|   for (let i = 0; i < hashes.length; i++) { |   for (let i = 0; i < hashes.length; i++) { | ||||||
|     if (i > 0 && hashes[i - 1] >= hashes[i]) |     if (i > 0 && hashes[i - 1] >= hashes[i]) throw new RangeError('hashes must be sorted') | ||||||
|       throw new RangeError("hashes must be sorted") |  | ||||||
|     const bytes = hexStringToBytes(hashes[i]) |     const bytes = hexStringToBytes(hashes[i]) | ||||||
|     if (bytes.byteLength !== HASH_SIZE) |     if (bytes.byteLength !== HASH_SIZE) throw new TypeError('heads hashes must be 256 bits') | ||||||
|       throw new TypeError("heads hashes must be 256 bits") |  | ||||||
|     encoder.appendRawBytes(bytes) |     encoder.appendRawBytes(bytes) | ||||||
|   } |   } | ||||||
| } | } | ||||||
|  | @ -175,8 +143,7 @@ function encodeHashes(encoder, hashes) { | ||||||
|  * array of hex strings. |  * array of hex strings. | ||||||
|  */ |  */ | ||||||
| function decodeHashes(decoder) { | function decodeHashes(decoder) { | ||||||
|   let length = decoder.readUint32(), |   let length = decoder.readUint32(), hashes = [] | ||||||
|     hashes = [] |  | ||||||
|   for (let i = 0; i < length; i++) { |   for (let i = 0; i < length; i++) { | ||||||
|     hashes.push(bytesToHexString(decoder.readRawBytes(HASH_SIZE))) |     hashes.push(bytesToHexString(decoder.readRawBytes(HASH_SIZE))) | ||||||
|   } |   } | ||||||
|  | @ -278,26 +245,20 @@ function makeBloomFilter(backend, lastSync) { | ||||||
|  */ |  */ | ||||||
| function getChangesToSend(backend, have, need) { | function getChangesToSend(backend, have, need) { | ||||||
|   if (have.length === 0) { |   if (have.length === 0) { | ||||||
|     return need |     return need.map(hash => Backend.getChangeByHash(backend, hash)).filter(change => change !== undefined) | ||||||
|       .map(hash => Backend.getChangeByHash(backend, hash)) |  | ||||||
|       .filter(change => change !== undefined) |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   let lastSyncHashes = {}, |   let lastSyncHashes = {}, bloomFilters = [] | ||||||
|     bloomFilters = [] |  | ||||||
|   for (let h of have) { |   for (let h of have) { | ||||||
|     for (let hash of h.lastSync) lastSyncHashes[hash] = true |     for (let hash of h.lastSync) lastSyncHashes[hash] = true | ||||||
|     bloomFilters.push(new BloomFilter(h.bloom)) |     bloomFilters.push(new BloomFilter(h.bloom)) | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   // Get all changes that were added since the last sync
 |   // Get all changes that were added since the last sync
 | ||||||
|   const changes = Backend.getChanges(backend, Object.keys(lastSyncHashes)).map( |   const changes = Backend.getChanges(backend, Object.keys(lastSyncHashes)) | ||||||
|     change => decodeChangeMeta(change, true) |     .map(change => decodeChangeMeta(change, true)) | ||||||
|   ) |  | ||||||
| 
 | 
 | ||||||
|   let changeHashes = {}, |   let changeHashes = {}, dependents = {}, hashesToSend = {} | ||||||
|     dependents = {}, |  | ||||||
|     hashesToSend = {} |  | ||||||
|   for (let change of changes) { |   for (let change of changes) { | ||||||
|     changeHashes[change.hash] = true |     changeHashes[change.hash] = true | ||||||
| 
 | 
 | ||||||
|  | @ -331,8 +292,7 @@ function getChangesToSend(backend, have, need) { | ||||||
|   let changesToSend = [] |   let changesToSend = [] | ||||||
|   for (let hash of need) { |   for (let hash of need) { | ||||||
|     hashesToSend[hash] = true |     hashesToSend[hash] = true | ||||||
|     if (!changeHashes[hash]) { |     if (!changeHashes[hash]) { // Change is not among those returned by getMissingChanges()?
 | ||||||
|       // Change is not among those returned by getMissingChanges()?
 |  | ||||||
|       const change = Backend.getChangeByHash(backend, hash) |       const change = Backend.getChangeByHash(backend, hash) | ||||||
|       if (change) changesToSend.push(change) |       if (change) changesToSend.push(change) | ||||||
|     } |     } | ||||||
|  | @ -357,7 +317,7 @@ function initSyncState() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| function compareArrays(a, b) { | function compareArrays(a, b) { | ||||||
|   return a.length === b.length && a.every((v, i) => v === b[i]) |     return (a.length === b.length) && a.every((v, i) => v === b[i]) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  | @ -369,19 +329,10 @@ function generateSyncMessage(backend, syncState) { | ||||||
|     throw new Error("generateSyncMessage called with no Automerge document") |     throw new Error("generateSyncMessage called with no Automerge document") | ||||||
|   } |   } | ||||||
|   if (!syncState) { |   if (!syncState) { | ||||||
|     throw new Error( |     throw new Error("generateSyncMessage requires a syncState, which can be created with initSyncState()") | ||||||
|       "generateSyncMessage requires a syncState, which can be created with initSyncState()" |  | ||||||
|     ) |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   let { |   let { sharedHeads, lastSentHeads, theirHeads, theirNeed, theirHave, sentHashes } = syncState | ||||||
|     sharedHeads, |  | ||||||
|     lastSentHeads, |  | ||||||
|     theirHeads, |  | ||||||
|     theirNeed, |  | ||||||
|     theirHave, |  | ||||||
|     sentHashes, |  | ||||||
|   } = syncState |  | ||||||
|   const ourHeads = Backend.getHeads(backend) |   const ourHeads = Backend.getHeads(backend) | ||||||
| 
 | 
 | ||||||
|   // Hashes to explicitly request from the remote peer: any missing dependencies of unapplied
 |   // Hashes to explicitly request from the remote peer: any missing dependencies of unapplied
 | ||||||
|  | @ -405,28 +356,18 @@ function generateSyncMessage(backend, syncState) { | ||||||
|     const lastSync = theirHave[0].lastSync |     const lastSync = theirHave[0].lastSync | ||||||
|     if (!lastSync.every(hash => Backend.getChangeByHash(backend, hash))) { |     if (!lastSync.every(hash => Backend.getChangeByHash(backend, hash))) { | ||||||
|       // we need to queue them to send us a fresh sync message, the one they sent is uninteligible so we don't know what they need
 |       // we need to queue them to send us a fresh sync message, the one they sent is uninteligible so we don't know what they need
 | ||||||
|       const resetMsg = { |       const resetMsg = {heads: ourHeads, need: [], have: [{ lastSync: [], bloom: new Uint8Array(0) }], changes: []} | ||||||
|         heads: ourHeads, |  | ||||||
|         need: [], |  | ||||||
|         have: [{ lastSync: [], bloom: new Uint8Array(0) }], |  | ||||||
|         changes: [], |  | ||||||
|       } |  | ||||||
|       return [syncState, encodeSyncMessage(resetMsg)] |       return [syncState, encodeSyncMessage(resetMsg)] | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   // XXX: we should limit ourselves to only sending a subset of all the messages, probably limited by a total message size
 |   // XXX: we should limit ourselves to only sending a subset of all the messages, probably limited by a total message size
 | ||||||
|   //      these changes should ideally be RLE encoded but we haven't implemented that yet.
 |   //      these changes should ideally be RLE encoded but we haven't implemented that yet.
 | ||||||
|   let changesToSend = |   let changesToSend = Array.isArray(theirHave) && Array.isArray(theirNeed) ? getChangesToSend(backend, theirHave, theirNeed) : [] | ||||||
|     Array.isArray(theirHave) && Array.isArray(theirNeed) |  | ||||||
|       ? getChangesToSend(backend, theirHave, theirNeed) |  | ||||||
|       : [] |  | ||||||
| 
 | 
 | ||||||
|   // If the heads are equal, we're in sync and don't need to do anything further
 |   // If the heads are equal, we're in sync and don't need to do anything further
 | ||||||
|   const headsUnchanged = |   const headsUnchanged = Array.isArray(lastSentHeads) && compareArrays(ourHeads, lastSentHeads) | ||||||
|     Array.isArray(lastSentHeads) && compareArrays(ourHeads, lastSentHeads) |   const headsEqual = Array.isArray(theirHeads) && compareArrays(ourHeads, theirHeads) | ||||||
|   const headsEqual = |  | ||||||
|     Array.isArray(theirHeads) && compareArrays(ourHeads, theirHeads) |  | ||||||
|   if (headsUnchanged && headsEqual && changesToSend.length === 0) { |   if (headsUnchanged && headsEqual && changesToSend.length === 0) { | ||||||
|     // no need to send a sync message if we know we're synced!
 |     // no need to send a sync message if we know we're synced!
 | ||||||
|     return [syncState, null] |     return [syncState, null] | ||||||
|  | @ -434,19 +375,12 @@ function generateSyncMessage(backend, syncState) { | ||||||
| 
 | 
 | ||||||
|   // TODO: this recomputes the SHA-256 hash of each change; we should restructure this to avoid the
 |   // TODO: this recomputes the SHA-256 hash of each change; we should restructure this to avoid the
 | ||||||
|   // unnecessary recomputation
 |   // unnecessary recomputation
 | ||||||
|   changesToSend = changesToSend.filter( |   changesToSend = changesToSend.filter(change => !sentHashes[decodeChangeMeta(change, true).hash]) | ||||||
|     change => !sentHashes[decodeChangeMeta(change, true).hash] |  | ||||||
|   ) |  | ||||||
| 
 | 
 | ||||||
|   // Regular response to a sync message: send any changes that the other node
 |   // Regular response to a sync message: send any changes that the other node
 | ||||||
|   // doesn't have. We leave the "have" field empty because the previous message
 |   // doesn't have. We leave the "have" field empty because the previous message
 | ||||||
|   // generated by `syncStart` already indicated what changes we have.
 |   // generated by `syncStart` already indicated what changes we have.
 | ||||||
|   const syncMessage = { |   const syncMessage = {heads: ourHeads, have: ourHave, need: ourNeed, changes: changesToSend} | ||||||
|     heads: ourHeads, |  | ||||||
|     have: ourHave, |  | ||||||
|     need: ourNeed, |  | ||||||
|     changes: changesToSend, |  | ||||||
|   } |  | ||||||
|   if (changesToSend.length > 0) { |   if (changesToSend.length > 0) { | ||||||
|     sentHashes = copyObject(sentHashes) |     sentHashes = copyObject(sentHashes) | ||||||
|     for (const change of changesToSend) { |     for (const change of changesToSend) { | ||||||
|  | @ -454,10 +388,7 @@ function generateSyncMessage(backend, syncState) { | ||||||
|     } |     } | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   syncState = Object.assign({}, syncState, { |   syncState = Object.assign({}, syncState, {lastSentHeads: ourHeads, sentHashes}) | ||||||
|     lastSentHeads: ourHeads, |  | ||||||
|     sentHashes, |  | ||||||
|   }) |  | ||||||
|   return [syncState, encodeSyncMessage(syncMessage)] |   return [syncState, encodeSyncMessage(syncMessage)] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -475,14 +406,13 @@ function generateSyncMessage(backend, syncState) { | ||||||
|  * another peer, that means that peer had those changes, and therefore we now both know about them. |  * another peer, that means that peer had those changes, and therefore we now both know about them. | ||||||
|  */ |  */ | ||||||
| function advanceHeads(myOldHeads, myNewHeads, ourOldSharedHeads) { | function advanceHeads(myOldHeads, myNewHeads, ourOldSharedHeads) { | ||||||
|   const newHeads = myNewHeads.filter(head => !myOldHeads.includes(head)) |   const newHeads = myNewHeads.filter((head) => !myOldHeads.includes(head)) | ||||||
|   const commonHeads = ourOldSharedHeads.filter(head => |   const commonHeads = ourOldSharedHeads.filter((head) => myNewHeads.includes(head)) | ||||||
|     myNewHeads.includes(head) |  | ||||||
|   ) |  | ||||||
|   const advancedHeads = [...new Set([...newHeads, ...commonHeads])].sort() |   const advancedHeads = [...new Set([...newHeads, ...commonHeads])].sort() | ||||||
|   return advancedHeads |   return advancedHeads | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| /** | /** | ||||||
|  * Given a backend, a message message and the state of our peer, apply any changes, update what |  * Given a backend, a message message and the state of our peer, apply any changes, update what | ||||||
|  * we believe about the peer, and (if there were applied changes) produce a patch for the frontend |  * we believe about the peer, and (if there were applied changes) produce a patch for the frontend | ||||||
|  | @ -492,13 +422,10 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { | ||||||
|     throw new Error("generateSyncMessage called with no Automerge document") |     throw new Error("generateSyncMessage called with no Automerge document") | ||||||
|   } |   } | ||||||
|   if (!oldSyncState) { |   if (!oldSyncState) { | ||||||
|     throw new Error( |     throw new Error("generateSyncMessage requires a syncState, which can be created with initSyncState()") | ||||||
|       "generateSyncMessage requires a syncState, which can be created with initSyncState()" |  | ||||||
|     ) |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   let { sharedHeads, lastSentHeads, sentHashes } = oldSyncState, |   let { sharedHeads, lastSentHeads, sentHashes } = oldSyncState, patch = null | ||||||
|     patch = null |  | ||||||
|   const message = decodeSyncMessage(binaryMessage) |   const message = decodeSyncMessage(binaryMessage) | ||||||
|   const beforeHeads = Backend.getHeads(backend) |   const beforeHeads = Backend.getHeads(backend) | ||||||
| 
 | 
 | ||||||
|  | @ -507,27 +434,18 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { | ||||||
|   // changes without applying them. The set of changes may also be incomplete if the sender decided
 |   // changes without applying them. The set of changes may also be incomplete if the sender decided
 | ||||||
|   // to break a large set of changes into chunks.
 |   // to break a large set of changes into chunks.
 | ||||||
|   if (message.changes.length > 0) { |   if (message.changes.length > 0) { | ||||||
|     ;[backend, patch] = Backend.applyChanges(backend, message.changes) |     [backend, patch] = Backend.applyChanges(backend, message.changes) | ||||||
|     sharedHeads = advanceHeads( |     sharedHeads = advanceHeads(beforeHeads, Backend.getHeads(backend), sharedHeads) | ||||||
|       beforeHeads, |  | ||||||
|       Backend.getHeads(backend), |  | ||||||
|       sharedHeads |  | ||||||
|     ) |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   // If heads are equal, indicate we don't need to send a response message
 |   // If heads are equal, indicate we don't need to send a response message
 | ||||||
|   if ( |   if (message.changes.length === 0 && compareArrays(message.heads, beforeHeads)) { | ||||||
|     message.changes.length === 0 && |  | ||||||
|     compareArrays(message.heads, beforeHeads) |  | ||||||
|   ) { |  | ||||||
|     lastSentHeads = message.heads |     lastSentHeads = message.heads | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   // If all of the remote heads are known to us, that means either our heads are equal, or we are
 |   // If all of the remote heads are known to us, that means either our heads are equal, or we are
 | ||||||
|   // ahead of the remote peer. In this case, take the remote heads to be our shared heads.
 |   // ahead of the remote peer. In this case, take the remote heads to be our shared heads.
 | ||||||
|   const knownHeads = message.heads.filter(head => |   const knownHeads = message.heads.filter(head => Backend.getChangeByHash(backend, head)) | ||||||
|     Backend.getChangeByHash(backend, head) |  | ||||||
|   ) |  | ||||||
|   if (knownHeads.length === message.heads.length) { |   if (knownHeads.length === message.heads.length) { | ||||||
|     sharedHeads = message.heads |     sharedHeads = message.heads | ||||||
|     // If the remote peer has lost all its data, reset our state to perform a full resync
 |     // If the remote peer has lost all its data, reset our state to perform a full resync
 | ||||||
|  | @ -549,18 +467,14 @@ function receiveSyncMessage(backend, oldSyncState, binaryMessage) { | ||||||
|     theirHave: message.have, // the information we need to calculate the changes they need
 |     theirHave: message.have, // the information we need to calculate the changes they need
 | ||||||
|     theirHeads: message.heads, |     theirHeads: message.heads, | ||||||
|     theirNeed: message.need, |     theirNeed: message.need, | ||||||
|     sentHashes, |     sentHashes | ||||||
|   } |   } | ||||||
|   return [backend, syncState, patch] |   return [backend, syncState, patch] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| module.exports = { | module.exports = { | ||||||
|   receiveSyncMessage, |   receiveSyncMessage, generateSyncMessage, | ||||||
|   generateSyncMessage, |   encodeSyncMessage, decodeSyncMessage, | ||||||
|   encodeSyncMessage, |   initSyncState, encodeSyncState, decodeSyncState, | ||||||
|   decodeSyncMessage, |   BloomFilter // BloomFilter is a private API, exported only for testing purposes
 | ||||||
|   initSyncState, |  | ||||||
|   encodeSyncState, |  | ||||||
|   decodeSyncState, |  | ||||||
|   BloomFilter, // BloomFilter is a private API, exported only for testing purposes
 |  | ||||||
| } | } | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,99 +0,0 @@ | ||||||
| import * as assert from "assert" |  | ||||||
| import * as stable from "../src" |  | ||||||
| import { unstable } from "../src" |  | ||||||
| 
 |  | ||||||
| describe("stable/unstable interop", () => { |  | ||||||
|   it("should allow reading Text from stable as strings in unstable", () => { |  | ||||||
|     let stableDoc = stable.from({ |  | ||||||
|       text: new stable.Text("abc"), |  | ||||||
|     }) |  | ||||||
|     let unstableDoc = unstable.init<any>() |  | ||||||
|     unstableDoc = unstable.merge(unstableDoc, stableDoc) |  | ||||||
|     assert.deepStrictEqual(unstableDoc.text, "abc") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow string from stable as Text in unstable", () => { |  | ||||||
|     let unstableDoc = unstable.from({ |  | ||||||
|       text: "abc", |  | ||||||
|     }) |  | ||||||
|     let stableDoc = stable.init<any>() |  | ||||||
|     stableDoc = unstable.merge(stableDoc, unstableDoc) |  | ||||||
|     assert.deepStrictEqual(stableDoc.text, new stable.Text("abc")) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow reading strings from stable as RawString in unstable", () => { |  | ||||||
|     let stableDoc = stable.from({ |  | ||||||
|       text: "abc", |  | ||||||
|     }) |  | ||||||
|     let unstableDoc = unstable.init<any>() |  | ||||||
|     unstableDoc = unstable.merge(unstableDoc, stableDoc) |  | ||||||
|     assert.deepStrictEqual(unstableDoc.text, new unstable.RawString("abc")) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow reading RawString from unstable as string in stable", () => { |  | ||||||
|     let unstableDoc = unstable.from({ |  | ||||||
|       text: new unstable.RawString("abc"), |  | ||||||
|     }) |  | ||||||
|     let stableDoc = stable.init<any>() |  | ||||||
|     stableDoc = unstable.merge(stableDoc, unstableDoc) |  | ||||||
|     assert.deepStrictEqual(stableDoc.text, "abc") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should show conflicts on text objects", () => { |  | ||||||
|     let doc1 = stable.from({ text: new stable.Text("abc") }, "bb") |  | ||||||
|     let doc2 = stable.from({ text: new stable.Text("def") }, "aa") |  | ||||||
|     doc1 = stable.merge(doc1, doc2) |  | ||||||
|     let conflicts = stable.getConflicts(doc1, "text")! |  | ||||||
|     assert.equal(conflicts["1@bb"]!.toString(), "abc") |  | ||||||
|     assert.equal(conflicts["1@aa"]!.toString(), "def") |  | ||||||
| 
 |  | ||||||
|     let unstableDoc = unstable.init<any>() |  | ||||||
|     unstableDoc = unstable.merge(unstableDoc, doc1) |  | ||||||
|     let conflicts2 = unstable.getConflicts(unstableDoc, "text")! |  | ||||||
|     assert.equal(conflicts2["1@bb"]!.toString(), "abc") |  | ||||||
|     assert.equal(conflicts2["1@aa"]!.toString(), "def") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow filling a list with text in stable", () => { |  | ||||||
|     let doc = stable.from<{ list: Array<stable.Text | null> }>({ |  | ||||||
|       list: [null, null, null], |  | ||||||
|     }) |  | ||||||
|     doc = stable.change(doc, doc => { |  | ||||||
|       doc.list.fill(new stable.Text("abc"), 0, 3) |  | ||||||
|     }) |  | ||||||
|     assert.deepStrictEqual(doc.list, [ |  | ||||||
|       new stable.Text("abc"), |  | ||||||
|       new stable.Text("abc"), |  | ||||||
|       new stable.Text("abc"), |  | ||||||
|     ]) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow filling a list with text in unstable", () => { |  | ||||||
|     let doc = unstable.from<{ list: Array<string | null> }>({ |  | ||||||
|       list: [null, null, null], |  | ||||||
|     }) |  | ||||||
|     doc = stable.change(doc, doc => { |  | ||||||
|       doc.list.fill("abc", 0, 3) |  | ||||||
|     }) |  | ||||||
|     assert.deepStrictEqual(doc.list, ["abc", "abc", "abc"]) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow splicing text into a list on stable", () => { |  | ||||||
|     let doc = stable.from<{ list: Array<stable.Text> }>({ list: [] }) |  | ||||||
|     doc = stable.change(doc, doc => { |  | ||||||
|       doc.list.splice(0, 0, new stable.Text("abc"), new stable.Text("def")) |  | ||||||
|     }) |  | ||||||
|     assert.deepStrictEqual(doc.list, [ |  | ||||||
|       new stable.Text("abc"), |  | ||||||
|       new stable.Text("def"), |  | ||||||
|     ]) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow splicing text into a list on unstable", () => { |  | ||||||
|     let doc = unstable.from<{ list: Array<string> }>({ list: [] }) |  | ||||||
|     doc = unstable.change(doc, doc => { |  | ||||||
|       doc.list.splice(0, 0, "abc", "def") |  | ||||||
|     }) |  | ||||||
|     assert.deepStrictEqual(doc.list, ["abc", "def"]) |  | ||||||
|   }) |  | ||||||
| }) |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,34 +1,221 @@ | ||||||
| import * as assert from "assert" | import * as assert from 'assert' | ||||||
| import { unstable as Automerge } from "../src" | import * as Automerge from '../src' | ||||||
| import { assertEqualsOneOf } from "./helpers" | import { assertEqualsOneOf } from './helpers' | ||||||
| 
 | 
 | ||||||
| type DocType = { | function attributeStateToAttributes(accumulatedAttributes) { | ||||||
|   text: string |   const attributes = {} | ||||||
|   [key: string]: any |   Object.entries(accumulatedAttributes).forEach(([key, values]) => { | ||||||
|  |     if (values.length && values[0] !== null) { | ||||||
|  |       attributes[key] = values[0] | ||||||
|  |     } | ||||||
|  |   }) | ||||||
|  |   return attributes | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| describe("Automerge.Text", () => { | function isEquivalent(a, b) { | ||||||
|   let s1: Automerge.Doc<DocType>, s2: Automerge.Doc<DocType> |   const aProps = Object.getOwnPropertyNames(a) | ||||||
|   beforeEach(() => { |   const bProps = Object.getOwnPropertyNames(b) | ||||||
|     s1 = Automerge.change(Automerge.init<DocType>(), doc => (doc.text = "")) | 
 | ||||||
|     s2 = Automerge.merge(Automerge.init<DocType>(), s1) |   if (aProps.length != bProps.length) { | ||||||
|  |       return false | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   for (let i = 0; i < aProps.length; i++) { | ||||||
|  |     const propName = aProps[i] | ||||||
|  |       if (a[propName] !== b[propName]) { | ||||||
|  |           return false | ||||||
|  |       } | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function isControlMarker(pseudoCharacter) { | ||||||
|  |   return typeof pseudoCharacter === 'object' && pseudoCharacter.attributes | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function opFrom(text, attributes) { | ||||||
|  |   let op = { insert: text } | ||||||
|  |   if (Object.keys(attributes).length > 0) { | ||||||
|  |       op.attributes = attributes | ||||||
|  |   } | ||||||
|  |   return op | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function accumulateAttributes(span, accumulatedAttributes) { | ||||||
|  |   Object.entries(span).forEach(([key, value]) => { | ||||||
|  |     if (!accumulatedAttributes[key]) { | ||||||
|  |       accumulatedAttributes[key] = [] | ||||||
|  |     } | ||||||
|  |     if (value === null) { | ||||||
|  |       if (accumulatedAttributes[key].length === 0 || accumulatedAttributes[key] === null) { | ||||||
|  |         accumulatedAttributes[key].unshift(null) | ||||||
|  |       } else { | ||||||
|  |         accumulatedAttributes[key].shift() | ||||||
|  |       } | ||||||
|  |     } else { | ||||||
|  |       if (accumulatedAttributes[key][0] === null) { | ||||||
|  |         accumulatedAttributes[key].shift() | ||||||
|  |       } else { | ||||||
|  |         accumulatedAttributes[key].unshift(value) | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   }) | ||||||
|  |   return accumulatedAttributes | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function automergeTextToDeltaDoc(text) { | ||||||
|  |   let ops = [] | ||||||
|  |   let controlState = {} | ||||||
|  |   let currentString = "" | ||||||
|  |   let attributes = {} | ||||||
|  |   text.toSpans().forEach((span) => { | ||||||
|  |     if (isControlMarker(span)) { | ||||||
|  |       controlState = accumulateAttributes(span.attributes, controlState) | ||||||
|  |     } else { | ||||||
|  |       let next = attributeStateToAttributes(controlState) | ||||||
|  | 
 | ||||||
|  |       // if the next span has the same calculated attributes as the current span
 | ||||||
|  |       // don't bother outputting it as a separate span, just let it ride
 | ||||||
|  |       if (typeof span === 'string' && isEquivalent(next, attributes)) { | ||||||
|  |           currentString = currentString + span | ||||||
|  |           return | ||||||
|  |       } | ||||||
|  | 
 | ||||||
|  |       if (currentString) { | ||||||
|  |         ops.push(opFrom(currentString, attributes)) | ||||||
|  |       } | ||||||
|  | 
 | ||||||
|  |       // If we've got a string, we might be able to concatenate it to another
 | ||||||
|  |       // same-attributed-string, so remember it and go to the next iteration.
 | ||||||
|  |       if (typeof span === 'string') { | ||||||
|  |         currentString = span | ||||||
|  |         attributes = next | ||||||
|  |       } else { | ||||||
|  |         // otherwise we have an embed "character" and should output it immediately.
 | ||||||
|  |         // embeds are always one-"character" in length.
 | ||||||
|  |         ops.push(opFrom(span, next)) | ||||||
|  |         currentString = '' | ||||||
|  |         attributes = {} | ||||||
|  |       } | ||||||
|  |     } | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should support insertion", () => { |   // at the end, flush any accumulated string out
 | ||||||
|  |   if (currentString) { | ||||||
|  |     ops.push(opFrom(currentString, attributes)) | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   return ops | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function inverseAttributes(attributes) { | ||||||
|  |   let invertedAttributes = {} | ||||||
|  |   Object.keys(attributes).forEach((key) => { | ||||||
|  |     invertedAttributes[key] = null | ||||||
|  |   }) | ||||||
|  |   return invertedAttributes | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function applyDeleteOp(text, offset, op) { | ||||||
|  |   let length = op.delete | ||||||
|  |   while (length > 0) { | ||||||
|  |     if (isControlMarker(text.get(offset))) { | ||||||
|  |       offset += 1 | ||||||
|  |     } else { | ||||||
|  |       // we need to not delete control characters, but we do delete embed characters
 | ||||||
|  |       text.deleteAt(offset, 1) | ||||||
|  |       length -= 1 | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return [text, offset] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function applyRetainOp(text, offset, op) { | ||||||
|  |   let length = op.retain | ||||||
|  | 
 | ||||||
|  |   if (op.attributes) { | ||||||
|  |     text.insertAt(offset, { attributes: op.attributes }) | ||||||
|  |     offset += 1 | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   while (length > 0) { | ||||||
|  |     const char = text.get(offset) | ||||||
|  |     offset += 1 | ||||||
|  |     if (!isControlMarker(char)) { | ||||||
|  |       length -= 1 | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   if (op.attributes) { | ||||||
|  |     text.insertAt(offset, { attributes: inverseAttributes(op.attributes) }) | ||||||
|  |     offset += 1 | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   return [text, offset] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | function applyInsertOp(text, offset, op) { | ||||||
|  |   let originalOffset = offset | ||||||
|  | 
 | ||||||
|  |   if (typeof op.insert === 'string') { | ||||||
|  |     text.insertAt(offset, ...op.insert.split('')) | ||||||
|  |     offset += op.insert.length | ||||||
|  |   } else { | ||||||
|  |     // we have an embed or something similar
 | ||||||
|  |     text.insertAt(offset, op.insert) | ||||||
|  |     offset += 1 | ||||||
|  |   } | ||||||
|  | 
 | ||||||
|  |   if (op.attributes) { | ||||||
|  |     text.insertAt(originalOffset, { attributes: op.attributes }) | ||||||
|  |     offset += 1 | ||||||
|  |   } | ||||||
|  |   if (op.attributes) { | ||||||
|  |     text.insertAt(offset, { attributes: inverseAttributes(op.attributes) }) | ||||||
|  |     offset += 1 | ||||||
|  |   } | ||||||
|  |   return [text, offset] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // XXX: uhhhhh, why can't I pass in text?
 | ||||||
|  | function applyDeltaDocToAutomergeText(delta, doc) { | ||||||
|  |   let offset = 0 | ||||||
|  | 
 | ||||||
|  |   delta.forEach(op => { | ||||||
|  |     if (op.retain) { | ||||||
|  |       [, offset] = applyRetainOp(doc.text, offset, op) | ||||||
|  |     } else if (op.delete) { | ||||||
|  |       [, offset] = applyDeleteOp(doc.text, offset, op) | ||||||
|  |     } else if (op.insert) { | ||||||
|  |       [, offset] = applyInsertOp(doc.text, offset, op) | ||||||
|  |     } | ||||||
|  |   }) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | describe('Automerge.Text', () => { | ||||||
|  |   let s1, s2 | ||||||
|  |   beforeEach(() => { | ||||||
|  |     s1 = Automerge.change(Automerge.init(), doc => doc.text = "") | ||||||
|  |     s2 = Automerge.merge(Automerge.init(), s1) | ||||||
|  |   }) | ||||||
|  | 
 | ||||||
|  |   it('should support insertion', () => { | ||||||
|     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "a")) |     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "a")) | ||||||
|     assert.strictEqual(s1.text.length, 1) |     assert.strictEqual(s1.text.length, 1) | ||||||
|     assert.strictEqual(s1.text[0], "a") |     assert.strictEqual(s1.text[0], 'a') | ||||||
|     assert.strictEqual(s1.text, "a") |     assert.strictEqual(s1.text, 'a') | ||||||
|     //assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`)
 |     //assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`)
 | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should support deletion", () => { |   it('should support deletion', () => { | ||||||
|     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) |     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) | ||||||
|     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 1, 1)) |     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 1, 1)) | ||||||
|     assert.strictEqual(s1.text.length, 2) |     assert.strictEqual(s1.text.length, 2) | ||||||
|     assert.strictEqual(s1.text[0], "a") |     assert.strictEqual(s1.text[0], 'a') | ||||||
|     assert.strictEqual(s1.text[1], "c") |     assert.strictEqual(s1.text[1], 'c') | ||||||
|     assert.strictEqual(s1.text, "ac") |     assert.strictEqual(s1.text, 'ac') | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should support implicit and explicit deletion", () => { |   it("should support implicit and explicit deletion", () => { | ||||||
|  | @ -41,71 +228,70 @@ describe("Automerge.Text", () => { | ||||||
|     assert.strictEqual(s1.text, "ac") |     assert.strictEqual(s1.text, "ac") | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should handle concurrent insertion", () => { |   it('should handle concurrent insertion', () => { | ||||||
|     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) |     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, "abc")) | ||||||
|     s2 = Automerge.change(s2, doc => Automerge.splice(doc, "text", 0, 0, "xyz")) |     s2 = Automerge.change(s2, doc => Automerge.splice(doc, "text", 0, 0, "xyz")) | ||||||
|     s1 = Automerge.merge(s1, s2) |     s1 = Automerge.merge(s1, s2) | ||||||
|     assert.strictEqual(s1.text.length, 6) |     assert.strictEqual(s1.text.length, 6) | ||||||
|     assertEqualsOneOf(s1.text, "abcxyz", "xyzabc") |     assertEqualsOneOf(s1.text, 'abcxyz', 'xyzabc') | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should handle text and other ops in the same change", () => { |   it('should handle text and other ops in the same change', () => { | ||||||
|     s1 = Automerge.change(s1, doc => { |     s1 = Automerge.change(s1, doc => { | ||||||
|       doc.foo = "bar" |       doc.foo = 'bar' | ||||||
|       Automerge.splice(doc, "text", 0, 0, "a") |       Automerge.splice(doc, "text", 0, 0, 'a') | ||||||
|     }) |     }) | ||||||
|     assert.strictEqual(s1.foo, "bar") |     assert.strictEqual(s1.foo, 'bar') | ||||||
|     assert.strictEqual(s1.text, "a") |     assert.strictEqual(s1.text, 'a') | ||||||
|     assert.strictEqual(s1.text, "a") |     assert.strictEqual(s1.text, 'a') | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should serialize to JSON as a simple string", () => { |   it('should serialize to JSON as a simple string', () => { | ||||||
|     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, 'a"b')) |     s1 = Automerge.change(s1, doc => Automerge.splice(doc, "text", 0, 0, 'a"b')) | ||||||
|     assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}') |     assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}') | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should allow modification after an object is assigned to a document", () => { |   it('should allow modification after an object is assigned to a document', () => { | ||||||
|     s1 = Automerge.change(Automerge.init(), doc => { |     s1 = Automerge.change(Automerge.init(), doc => { | ||||||
|       doc.text = "" |       doc.text = "" | ||||||
|       Automerge.splice(doc, "text", 0, 0, "abcd") |       Automerge.splice(doc ,"text", 0, 0, 'abcd') | ||||||
|       Automerge.splice(doc ,"text", 2, 1) |       Automerge.splice(doc ,"text", 2, 1) | ||||||
|       assert.strictEqual(doc.text, "abd") |       assert.strictEqual(doc.text, 'abd') | ||||||
|     }) |     }) | ||||||
|     assert.strictEqual(s1.text, "abd") |     assert.strictEqual(s1.text, 'abd') | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   it("should not allow modification outside of a change callback", () => { |   it('should not allow modification outside of a change callback', () => { | ||||||
|     assert.throws( |     assert.throws(() => Automerge.splice(s1 ,"text", 0, 0, 'a'), /object cannot be modified outside of a change block/) | ||||||
|       () => Automerge.splice(s1, "text", 0, 0, "a"), |  | ||||||
|       /object cannot be modified outside of a change block/ |  | ||||||
|     ) |  | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   describe("with initial value", () => { |   describe('with initial value', () => { | ||||||
|     it("should initialize text in Automerge.from()", () => { | 
 | ||||||
|       let s1 = Automerge.from({ text: "init" }) |     it('should initialize text in Automerge.from()', () => { | ||||||
|  |       let s1 = Automerge.from({text: 'init'}) | ||||||
|       assert.strictEqual(s1.text.length, 4) |       assert.strictEqual(s1.text.length, 4) | ||||||
|       assert.strictEqual(s1.text[0], "i") |       assert.strictEqual(s1.text[0], 'i') | ||||||
|       assert.strictEqual(s1.text[1], "n") |       assert.strictEqual(s1.text[1], 'n') | ||||||
|       assert.strictEqual(s1.text[2], "i") |       assert.strictEqual(s1.text[2], 'i') | ||||||
|       assert.strictEqual(s1.text[3], "t") |       assert.strictEqual(s1.text[3], 't') | ||||||
|       assert.strictEqual(s1.text, "init") |       assert.strictEqual(s1.text, 'init') | ||||||
|     }) |     }) | ||||||
| 
 | 
 | ||||||
|     it("should encode the initial value as a change", () => { |     it('should encode the initial value as a change', () => { | ||||||
|       const s1 = Automerge.from({ text: "init" }) |       const s1 = Automerge.from({text: 'init'}) | ||||||
|       const changes = Automerge.getAllChanges(s1) |       const changes = Automerge.getAllChanges(s1) | ||||||
|       assert.strictEqual(changes.length, 1) |       assert.strictEqual(changes.length, 1) | ||||||
|       const [s2] = Automerge.applyChanges(Automerge.init<DocType>(), changes) |       const [s2] = Automerge.applyChanges(Automerge.init(), changes) | ||||||
|       assert.strictEqual(s2.text, "init") |       assert.strictEqual(s2.text, 'init') | ||||||
|       assert.strictEqual(s2.text, "init") |       assert.strictEqual(s2.text, 'init') | ||||||
|     }) |  | ||||||
|     }) |     }) | ||||||
| 
 | 
 | ||||||
|   it("should support unicode when creating text", () => { |   }) | ||||||
|  | 
 | ||||||
|  |   it('should support unicode when creating text', () => { | ||||||
|     s1 = Automerge.from({ |     s1 = Automerge.from({ | ||||||
|       text: "🐦", |       text: '🐦' | ||||||
|     }) |     }) | ||||||
|     assert.strictEqual(s1.text, "🐦") |     assert.strictEqual(s1.text, '🐦') | ||||||
|   }) |   }) | ||||||
| }) | }) | ||||||
|  |  | ||||||
|  | @ -1,281 +0,0 @@ | ||||||
| import * as assert from "assert" |  | ||||||
| import * as Automerge from "../src" |  | ||||||
| import { assertEqualsOneOf } from "./helpers" |  | ||||||
| 
 |  | ||||||
| type DocType = { text: Automerge.Text; [key: string]: any } |  | ||||||
| 
 |  | ||||||
| describe("Automerge.Text", () => { |  | ||||||
|   let s1: Automerge.Doc<DocType>, s2: Automerge.Doc<DocType> |  | ||||||
|   beforeEach(() => { |  | ||||||
|     s1 = Automerge.change( |  | ||||||
|       Automerge.init<DocType>(), |  | ||||||
|       doc => (doc.text = new Automerge.Text()) |  | ||||||
|     ) |  | ||||||
|     s2 = Automerge.merge(Automerge.init(), s1) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should support insertion", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a")) |  | ||||||
|     assert.strictEqual(s1.text.length, 1) |  | ||||||
|     assert.strictEqual(s1.text.get(0), "a") |  | ||||||
|     assert.strictEqual(s1.text.toString(), "a") |  | ||||||
|     //assert.strictEqual(s1.text.getElemId(0), `2@${Automerge.getActorId(s1)}`)
 |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should support deletion", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 1)) |  | ||||||
|     assert.strictEqual(s1.text.length, 2) |  | ||||||
|     assert.strictEqual(s1.text.get(0), "a") |  | ||||||
|     assert.strictEqual(s1.text.get(1), "c") |  | ||||||
|     assert.strictEqual(s1.text.toString(), "ac") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should support implicit and explicit deletion", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.deleteAt(1)) |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.deleteAt(1, 0)) |  | ||||||
|     assert.strictEqual(s1.text.length, 2) |  | ||||||
|     assert.strictEqual(s1.text.get(0), "a") |  | ||||||
|     assert.strictEqual(s1.text.get(1), "c") |  | ||||||
|     assert.strictEqual(s1.text.toString(), "ac") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should handle concurrent insertion", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", "b", "c")) |  | ||||||
|     s2 = Automerge.change(s2, doc => doc.text.insertAt(0, "x", "y", "z")) |  | ||||||
|     s1 = Automerge.merge(s1, s2) |  | ||||||
|     assert.strictEqual(s1.text.length, 6) |  | ||||||
|     assertEqualsOneOf(s1.text.toString(), "abcxyz", "xyzabc") |  | ||||||
|     assertEqualsOneOf(s1.text.join(""), "abcxyz", "xyzabc") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should handle text and other ops in the same change", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => { |  | ||||||
|       doc.foo = "bar" |  | ||||||
|       doc.text.insertAt(0, "a") |  | ||||||
|     }) |  | ||||||
|     assert.strictEqual(s1.foo, "bar") |  | ||||||
|     assert.strictEqual(s1.text.toString(), "a") |  | ||||||
|     assert.strictEqual(s1.text.join(""), "a") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should serialize to JSON as a simple string", () => { |  | ||||||
|     s1 = Automerge.change(s1, doc => doc.text.insertAt(0, "a", '"', "b")) |  | ||||||
|     assert.strictEqual(JSON.stringify(s1), '{"text":"a\\"b"}') |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow modification before an object is assigned to a document", () => { |  | ||||||
|     s1 = Automerge.change(Automerge.init(), doc => { |  | ||||||
|       const text = new Automerge.Text() |  | ||||||
|       text.insertAt(0, "a", "b", "c", "d") |  | ||||||
|       text.deleteAt(2) |  | ||||||
|       doc.text = text |  | ||||||
|       assert.strictEqual(doc.text.toString(), "abd") |  | ||||||
|       assert.strictEqual(doc.text.join(""), "abd") |  | ||||||
|     }) |  | ||||||
|     assert.strictEqual(s1.text.toString(), "abd") |  | ||||||
|     assert.strictEqual(s1.text.join(""), "abd") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should allow modification after an object is assigned to a document", () => { |  | ||||||
|     s1 = Automerge.change(Automerge.init(), doc => { |  | ||||||
|       const text = new Automerge.Text() |  | ||||||
|       doc.text = text |  | ||||||
|       doc.text.insertAt(0, "a", "b", "c", "d") |  | ||||||
|       doc.text.deleteAt(2) |  | ||||||
|       assert.strictEqual(doc.text.toString(), "abd") |  | ||||||
|       assert.strictEqual(doc.text.join(""), "abd") |  | ||||||
|     }) |  | ||||||
|     assert.strictEqual(s1.text.join(""), "abd") |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should not allow modification outside of a change callback", () => { |  | ||||||
|     assert.throws( |  | ||||||
|       () => s1.text.insertAt(0, "a"), |  | ||||||
|       /object cannot be modified outside of a change block/ |  | ||||||
|     ) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   describe("with initial value", () => { |  | ||||||
|     it("should accept a string as initial value", () => { |  | ||||||
|       let s1 = Automerge.change( |  | ||||||
|         Automerge.init<DocType>(), |  | ||||||
|         doc => (doc.text = new Automerge.Text("init")) |  | ||||||
|       ) |  | ||||||
|       assert.strictEqual(s1.text.length, 4) |  | ||||||
|       assert.strictEqual(s1.text.get(0), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(1), "n") |  | ||||||
|       assert.strictEqual(s1.text.get(2), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(3), "t") |  | ||||||
|       assert.strictEqual(s1.text.toString(), "init") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should accept an array as initial value", () => { |  | ||||||
|       let s1 = Automerge.change( |  | ||||||
|         Automerge.init<DocType>(), |  | ||||||
|         doc => (doc.text = new Automerge.Text(["i", "n", "i", "t"])) |  | ||||||
|       ) |  | ||||||
|       assert.strictEqual(s1.text.length, 4) |  | ||||||
|       assert.strictEqual(s1.text.get(0), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(1), "n") |  | ||||||
|       assert.strictEqual(s1.text.get(2), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(3), "t") |  | ||||||
|       assert.strictEqual(s1.text.toString(), "init") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should initialize text in Automerge.from()", () => { |  | ||||||
|       let s1 = Automerge.from({ text: new Automerge.Text("init") }) |  | ||||||
|       assert.strictEqual(s1.text.length, 4) |  | ||||||
|       assert.strictEqual(s1.text.get(0), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(1), "n") |  | ||||||
|       assert.strictEqual(s1.text.get(2), "i") |  | ||||||
|       assert.strictEqual(s1.text.get(3), "t") |  | ||||||
|       assert.strictEqual(s1.text.toString(), "init") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should encode the initial value as a change", () => { |  | ||||||
|       const s1 = Automerge.from({ text: new Automerge.Text("init") }) |  | ||||||
|       const changes = Automerge.getAllChanges(s1) |  | ||||||
|       assert.strictEqual(changes.length, 1) |  | ||||||
|       const [s2] = Automerge.applyChanges(Automerge.init<DocType>(), changes) |  | ||||||
|       assert.strictEqual(s2.text instanceof Automerge.Text, true) |  | ||||||
|       assert.strictEqual(s2.text.toString(), "init") |  | ||||||
|       assert.strictEqual(s2.text.join(""), "init") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should allow immediate access to the value", () => { |  | ||||||
|       Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|         const text = new Automerge.Text("init") |  | ||||||
|         assert.strictEqual(text.length, 4) |  | ||||||
|         assert.strictEqual(text.get(0), "i") |  | ||||||
|         assert.strictEqual(text.toString(), "init") |  | ||||||
|         doc.text = text |  | ||||||
|         assert.strictEqual(doc.text.length, 4) |  | ||||||
|         assert.strictEqual(doc.text.get(0), "i") |  | ||||||
|         assert.strictEqual(doc.text.toString(), "init") |  | ||||||
|       }) |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should allow pre-assignment modification of the initial value", () => { |  | ||||||
|       let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|         const text = new Automerge.Text("init") |  | ||||||
|         text.deleteAt(3) |  | ||||||
|         assert.strictEqual(text.join(""), "ini") |  | ||||||
|         doc.text = text |  | ||||||
|         assert.strictEqual(doc.text.join(""), "ini") |  | ||||||
|         assert.strictEqual(doc.text.toString(), "ini") |  | ||||||
|       }) |  | ||||||
|       assert.strictEqual(s1.text.toString(), "ini") |  | ||||||
|       assert.strictEqual(s1.text.join(""), "ini") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should allow post-assignment modification of the initial value", () => { |  | ||||||
|       let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|         const text = new Automerge.Text("init") |  | ||||||
|         doc.text = text |  | ||||||
|         doc.text.deleteAt(0) |  | ||||||
|         doc.text.insertAt(0, "I") |  | ||||||
|         assert.strictEqual(doc.text.join(""), "Init") |  | ||||||
|         assert.strictEqual(doc.text.toString(), "Init") |  | ||||||
|       }) |  | ||||||
|       assert.strictEqual(s1.text.join(""), "Init") |  | ||||||
|       assert.strictEqual(s1.text.toString(), "Init") |  | ||||||
|     }) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   describe("non-textual control characters", () => { |  | ||||||
|     let s1: Automerge.Doc<DocType> |  | ||||||
|     beforeEach(() => { |  | ||||||
|       s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|         doc.text = new Automerge.Text() |  | ||||||
|         doc.text.insertAt(0, "a") |  | ||||||
|         doc.text.insertAt(1, { attribute: "bold" }) |  | ||||||
|       }) |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should allow fetching non-textual characters", () => { |  | ||||||
|       assert.deepEqual(s1.text.get(1), { attribute: "bold" }) |  | ||||||
|       //assert.strictEqual(s1.text.getElemId(1), `3@${Automerge.getActorId(s1)}`)
 |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should include control characters in string length", () => { |  | ||||||
|       assert.strictEqual(s1.text.length, 2) |  | ||||||
|       assert.strictEqual(s1.text.get(0), "a") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should replace control characters from toString()", () => { |  | ||||||
|       assert.strictEqual(s1.text.toString(), "a\uFFFC") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     it("should allow control characters to be updated", () => { |  | ||||||
|       const s2 = Automerge.change( |  | ||||||
|         s1, |  | ||||||
|         doc => (doc.text.get(1)!.attribute = "italic") |  | ||||||
|       ) |  | ||||||
|       const s3 = Automerge.load<DocType>(Automerge.save(s2)) |  | ||||||
|       assert.strictEqual(s1.text.get(1).attribute, "bold") |  | ||||||
|       assert.strictEqual(s2.text.get(1).attribute, "italic") |  | ||||||
|       assert.strictEqual(s3.text.get(1).attribute, "italic") |  | ||||||
|     }) |  | ||||||
| 
 |  | ||||||
|     describe("spans interface to Text", () => { |  | ||||||
|       it("should return a simple string as a single span", () => { |  | ||||||
|         let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|           doc.text = new Automerge.Text("hello world") |  | ||||||
|         }) |  | ||||||
|         assert.deepEqual(s1.text.toSpans(), ["hello world"]) |  | ||||||
|       }) |  | ||||||
|       it("should return an empty string as an empty array", () => { |  | ||||||
|         let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|           doc.text = new Automerge.Text() |  | ||||||
|         }) |  | ||||||
|         assert.deepEqual(s1.text.toSpans(), []) |  | ||||||
|       }) |  | ||||||
|       it("should split a span at a control character", () => { |  | ||||||
|         let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|           doc.text = new Automerge.Text("hello world") |  | ||||||
|           doc.text.insertAt(5, { attributes: { bold: true } }) |  | ||||||
|         }) |  | ||||||
|         assert.deepEqual(s1.text.toSpans(), [ |  | ||||||
|           "hello", |  | ||||||
|           { attributes: { bold: true } }, |  | ||||||
|           " world", |  | ||||||
|         ]) |  | ||||||
|       }) |  | ||||||
|       it("should allow consecutive control characters", () => { |  | ||||||
|         let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|           doc.text = new Automerge.Text("hello world") |  | ||||||
|           doc.text.insertAt(5, { attributes: { bold: true } }) |  | ||||||
|           doc.text.insertAt(6, { attributes: { italic: true } }) |  | ||||||
|         }) |  | ||||||
|         assert.deepEqual(s1.text.toSpans(), [ |  | ||||||
|           "hello", |  | ||||||
|           { attributes: { bold: true } }, |  | ||||||
|           { attributes: { italic: true } }, |  | ||||||
|           " world", |  | ||||||
|         ]) |  | ||||||
|       }) |  | ||||||
|       it("should allow non-consecutive control characters", () => { |  | ||||||
|         let s1 = Automerge.change(Automerge.init<DocType>(), doc => { |  | ||||||
|           doc.text = new Automerge.Text("hello world") |  | ||||||
|           doc.text.insertAt(5, { attributes: { bold: true } }) |  | ||||||
|           doc.text.insertAt(12, { attributes: { italic: true } }) |  | ||||||
|         }) |  | ||||||
|         assert.deepEqual(s1.text.toSpans(), [ |  | ||||||
|           "hello", |  | ||||||
|           { attributes: { bold: true } }, |  | ||||||
|           " world", |  | ||||||
|           { attributes: { italic: true } }, |  | ||||||
|         ]) |  | ||||||
|       }) |  | ||||||
|     }) |  | ||||||
|   }) |  | ||||||
| 
 |  | ||||||
|   it("should support unicode when creating text", () => { |  | ||||||
|     s1 = Automerge.from({ |  | ||||||
|       text: new Automerge.Text("🐦"), |  | ||||||
|     }) |  | ||||||
|     assert.strictEqual(s1.text.get(0), "🐦") |  | ||||||
|   }) |  | ||||||
| }) |  | ||||||
|  | @ -1,20 +1,20 @@ | ||||||
| import * as assert from "assert" | import * as assert from 'assert' | ||||||
| import * as Automerge from "../src" | import * as Automerge from '../src' | ||||||
| 
 | 
 | ||||||
| const uuid = Automerge.uuid | const uuid = Automerge.uuid | ||||||
| 
 | 
 | ||||||
| describe("uuid", () => { | describe('uuid', () => { | ||||||
|   afterEach(() => { |   afterEach(() => { | ||||||
|     uuid.reset() |     uuid.reset() | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   describe("default implementation", () => { |   describe('default implementation', () => { | ||||||
|     it("generates unique values", () => { |     it('generates unique values', () => { | ||||||
|       assert.notEqual(uuid(), uuid()) |       assert.notEqual(uuid(), uuid()) | ||||||
|     }) |     }) | ||||||
|   }) |   }) | ||||||
| 
 | 
 | ||||||
|   describe("custom implementation", () => { |   describe('custom implementation', () => { | ||||||
|     let counter |     let counter | ||||||
| 
 | 
 | ||||||
|     function customUuid() { |     function customUuid() { | ||||||
|  | @ -22,11 +22,11 @@ describe("uuid", () => { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     before(() => uuid.setFactory(customUuid)) |     before(() => uuid.setFactory(customUuid)) | ||||||
|     beforeEach(() => (counter = 0)) |     beforeEach(() => counter = 0) | ||||||
| 
 | 
 | ||||||
|     it("invokes the custom factory", () => { |     it('invokes the custom factory', () => { | ||||||
|       assert.equal(uuid(), "custom-uuid-0") |       assert.equal(uuid(), 'custom-uuid-0') | ||||||
|       assert.equal(uuid(), "custom-uuid-1") |       assert.equal(uuid(), 'custom-uuid-1') | ||||||
|     }) |     }) | ||||||
|   }) |   }) | ||||||
| }) | }) | ||||||
|  |  | ||||||
|  | @ -14,6 +14,9 @@ | ||||||
|         "skipLibCheck": true, |         "skipLibCheck": true, | ||||||
|         "outDir": "./dist" |         "outDir": "./dist" | ||||||
|     }, |     }, | ||||||
|   "include": ["src/**/*", "test/**/*"], |     "include": [ "src/**/*" ], | ||||||
|   "exclude": ["./dist/**/*", "./node_modules", "./src/**/*.deno.ts"] |     "exclude": [ | ||||||
|  |         "./dist/**/*", | ||||||
|  |         "./node_modules" | ||||||
|  |     ] | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										220
									
								
								javascript/typedoc-readme.md
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										220
									
								
								javascript/typedoc-readme.md
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,220 @@ | ||||||
|  | # Automerge | ||||||
|  | 
 | ||||||
|  | This library provides the core automerge data structure and sync algorithms. | ||||||
|  | Other libraries can be built on top of this one which provide IO and | ||||||
|  | persistence. | ||||||
|  | 
 | ||||||
|  | An automerge document can be though of an immutable POJO (plain old javascript | ||||||
|  | object) which `automerge` tracks the history of, allowing it to be merged with | ||||||
|  | any other automerge document. | ||||||
|  | 
 | ||||||
|  | ## Creating and modifying a document | ||||||
|  | 
 | ||||||
|  | You can create a document with {@link init} or {@link from} and then make | ||||||
|  | changes to it with {@link change}, you can merge two documents with {@link | ||||||
|  | merge}. | ||||||
|  | 
 | ||||||
|  | ```javascript | ||||||
|  | import * as automerge from "@automerge/automerge" | ||||||
|  | 
 | ||||||
|  | type DocType = {ideas: Array<automerge.Text>} | ||||||
|  | 
 | ||||||
|  | let doc1 = automerge.init<DocType>() | ||||||
|  | doc1 = automerge.change(doc1, d => { | ||||||
|  |     d.ideas = [new automerge.Text("an immutable document")] | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | let doc2 = automerge.init<DocType>() | ||||||
|  | doc2 = automerge.merge(doc2, automerge.clone(doc1)) | ||||||
|  | doc2 = automerge.change<DocType>(doc2, d => { | ||||||
|  |     d.ideas.push(new automerge.Text("which records it's history")) | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | // Note the `automerge.clone` call, see the "cloning" section of this readme for | ||||||
|  | // more detail | ||||||
|  | doc1 = automerge.merge(doc1, automerge.clone(doc2)) | ||||||
|  | doc1 = automerge.change(doc1, d => { | ||||||
|  |     d.ideas[0].deleteAt(13, 8) | ||||||
|  |     d.ideas[0].insertAt(13, "object") | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | let doc3 = automerge.merge(doc1, doc2) | ||||||
|  | // doc3 is now {ideas: ["an immutable object", "which records it's history"]} | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | ## Applying changes from another document | ||||||
|  | 
 | ||||||
|  | You can get a representation of the result of the last {@link change} you made | ||||||
|  | to a document with {@link getLastLocalChange} and you can apply that change to | ||||||
|  | another document using {@link applyChanges}. | ||||||
|  | 
 | ||||||
|  | If you need to get just the changes which are in one document but not in another | ||||||
|  | you can use {@link getHeads} to get the heads of the document without the | ||||||
|  | changes and then {@link getMissingDeps}, passing the result of {@link getHeads} | ||||||
|  | on the document with the changes. | ||||||
|  | 
 | ||||||
|  | ## Saving and loading documents | ||||||
|  | 
 | ||||||
|  | You can {@link save} a document to generate a compresed binary representation of | ||||||
|  | the document which can be loaded with {@link load}. If you have a document which | ||||||
|  | you have recently made changes to you can generate recent changes with {@link | ||||||
|  | saveIncremental}, this will generate all the changes since you last called | ||||||
|  | `saveIncremental`, the changes generated can be applied to another document with | ||||||
|  | {@link loadIncremental}. | ||||||
|  | 
 | ||||||
|  | ## Viewing different versions of a document | ||||||
|  | 
 | ||||||
|  | Occasionally you may wish to explicitly step to a different point in a document | ||||||
|  | history. One common reason to do this is if you need to obtain a set of changes | ||||||
|  | which take the document from one state to another in order to send those changes | ||||||
|  | to another peer (or to save them somewhere). You can use {@link view} to do this. | ||||||
|  | 
 | ||||||
|  | ```javascript | ||||||
|  | import * as automerge from "@automerge/automerge" | ||||||
|  | import * as assert from "assert" | ||||||
|  | 
 | ||||||
|  | let doc = automerge.from({ | ||||||
|  |     "key1": "value1" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | // Make a clone of the document at this point, maybe this is actually on another | ||||||
|  | // peer. | ||||||
|  | let doc2 = automerge.clone<any>(doc) | ||||||
|  | 
 | ||||||
|  | let heads = automerge.getHeads(doc) | ||||||
|  | 
 | ||||||
|  | doc = automerge.change<any>(doc, d => { | ||||||
|  |     d.key2 = "value2" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | doc = automerge.change<any>(doc, d => { | ||||||
|  |     d.key3 = "value3" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | // At this point we've generated two separate changes, now we want to send  | ||||||
|  | // just those changes to someone else | ||||||
|  | 
 | ||||||
|  | // view is a cheap reference based copy of a document at a given set of heads | ||||||
|  | let before = automerge.view(doc, heads) | ||||||
|  | 
 | ||||||
|  | // This view doesn't show the last two changes in the document state | ||||||
|  | assert.deepEqual(before, { | ||||||
|  |     key1: "value1" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | // Get the changes to send to doc2 | ||||||
|  | let changes = automerge.getChanges(before, doc) | ||||||
|  | 
 | ||||||
|  | // Apply the changes at doc2 | ||||||
|  | doc2 = automerge.applyChanges<any>(doc2, changes)[0] | ||||||
|  | assert.deepEqual(doc2, { | ||||||
|  |     key1: "value1", | ||||||
|  |     key2: "value2", | ||||||
|  |     key3: "value3" | ||||||
|  | }) | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | If you have a {@link view} of a document which you want to make changes to you | ||||||
|  | can {@link clone} the viewed document. | ||||||
|  | 
 | ||||||
|  | ## Syncing | ||||||
|  | 
 | ||||||
|  | The sync protocol is stateful. This means that we start by creating a {@link | ||||||
|  | SyncState} for each peer we are communicating with using {@link initSyncState}. | ||||||
|  | Then we generate a message to send to the peer by calling {@link | ||||||
|  | generateSyncMessage}. When we receive a message from the peer we call {@link | ||||||
|  | receiveSyncMessage}. Here's a simple example of a loop which just keeps two | ||||||
|  | peers in sync. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ```javascript | ||||||
|  | let sync1 = automerge.initSyncState() | ||||||
|  | let msg: Uint8Array | null | ||||||
|  | [sync1, msg] = automerge.generateSyncMessage(doc1, sync1) | ||||||
|  | 
 | ||||||
|  | while (true) { | ||||||
|  |     if (msg != null) { | ||||||
|  |         network.send(msg) | ||||||
|  |     } | ||||||
|  |     let resp: Uint8Array = network.receive() | ||||||
|  |     [doc1, sync1, _ignore] = automerge.receiveSyncMessage(doc1, sync1, resp) | ||||||
|  |     [sync1, msg] = automerge.generateSyncMessage(doc1, sync1) | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ## Conflicts | ||||||
|  | 
 | ||||||
|  | The only time conflicts occur in automerge documents is in concurrent | ||||||
|  | assignments to the same key in an object. In this case automerge | ||||||
|  | deterministically chooses an arbitrary value to present to the application but | ||||||
|  | you can examine the conflicts using {@link getConflicts}. | ||||||
|  | 
 | ||||||
|  | ``` | ||||||
|  | import * as automerge from "@automerge/automerge" | ||||||
|  | 
 | ||||||
|  | type Profile = { | ||||||
|  |     pets: Array<{name: string, type: string}> | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | let doc1 = automerge.init<Profile>("aaaa") | ||||||
|  | doc1 = automerge.change(doc1, d => { | ||||||
|  |     d.pets = [{name: "Lassie", type: "dog"}] | ||||||
|  | }) | ||||||
|  | let doc2 = automerge.init<Profile>("bbbb") | ||||||
|  | doc2 = automerge.merge(doc2, automerge.clone(doc1)) | ||||||
|  | 
 | ||||||
|  | doc2 = automerge.change(doc2, d => { | ||||||
|  |     d.pets[0].name = "Beethoven" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | doc1 = automerge.change(doc1, d => { | ||||||
|  |     d.pets[0].name = "Babe" | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | const doc3 = automerge.merge(doc1, doc2) | ||||||
|  | 
 | ||||||
|  | // Note that here we pass `doc3.pets`, not `doc3` | ||||||
|  | let conflicts = automerge.getConflicts(doc3.pets[0], "name") | ||||||
|  | 
 | ||||||
|  | // The two conflicting values are the keys of the conflicts object | ||||||
|  | assert.deepEqual(Object.values(conflicts), ["Babe", Beethoven"]) | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | ## Actor IDs | ||||||
|  | 
 | ||||||
|  | By default automerge will generate a random actor ID for you, but most methods | ||||||
|  | for creating a document allow you to set the actor ID. You can get the actor ID | ||||||
|  | associated with the document by calling {@link getActorId}. Actor IDs must not | ||||||
|  | be used in concurrent threads of executiong - all changes by a given actor ID | ||||||
|  | are expected to be sequential.  | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ## Listening to patches | ||||||
|  | 
 | ||||||
|  | Sometimes you want to respond to changes made to an automerge document. In this | ||||||
|  | case you can use the {@link PatchCallback} type to receive notifications when | ||||||
|  | changes have been made. | ||||||
|  | 
 | ||||||
|  | ## Cloning | ||||||
|  | 
 | ||||||
|  | Currently you cannot make mutating changes (i.e. call {@link change}) to a | ||||||
|  | document which you have two pointers to. For example, in this code: | ||||||
|  | 
 | ||||||
|  | ```javascript | ||||||
|  | let doc1 = automerge.init() | ||||||
|  | let doc2 = automerge.change(doc1, d => d.key = "value") | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | `doc1` and `doc2` are both pointers to the same state. Any attempt to call | ||||||
|  | mutating methods on `doc1` will now result in an error like | ||||||
|  | 
 | ||||||
|  |     Attempting to change an out of date document | ||||||
|  |      | ||||||
|  | If you encounter this you need to clone the original document, the above sample | ||||||
|  | would work as: | ||||||
|  | 
 | ||||||
|  | ```javascript | ||||||
|  | let doc1 = automerge.init() | ||||||
|  | let doc2 = automerge.change(automerge.clone(doc1), d => d.key = "value") | ||||||
|  | ``` | ||||||
|  | @ -6,12 +6,18 @@ members = [ | ||||||
|     "automerge-test", |     "automerge-test", | ||||||
|     "automerge-wasm", |     "automerge-wasm", | ||||||
|     "edit-trace", |     "edit-trace", | ||||||
|  |     "badmessage", | ||||||
| ] | ] | ||||||
| resolver = "2" | resolver = "2" | ||||||
| 
 | 
 | ||||||
| [profile.release] | [profile.release] | ||||||
|  | debug = true | ||||||
| lto = true | lto = true | ||||||
| codegen-units = 1 | opt-level = 3 | ||||||
| 
 | 
 | ||||||
| [profile.bench] | [profile.bench] | ||||||
| debug = true | debug = true | ||||||
|  | 
 | ||||||
|  | [profile.release.package.automerge-wasm] | ||||||
|  | debug = false | ||||||
|  | opt-level = 3 | ||||||
|  |  | ||||||
|  | @ -1,250 +0,0 @@ | ||||||
| --- |  | ||||||
| Language:        Cpp |  | ||||||
| # BasedOnStyle:  Chromium |  | ||||||
| AccessModifierOffset: -1 |  | ||||||
| AlignAfterOpenBracket: Align |  | ||||||
| AlignArrayOfStructures: None |  | ||||||
| AlignConsecutiveAssignments: |  | ||||||
|   Enabled:         false |  | ||||||
|   AcrossEmptyLines: false |  | ||||||
|   AcrossComments:  false |  | ||||||
|   AlignCompound:   false |  | ||||||
|   PadOperators:    true |  | ||||||
| AlignConsecutiveBitFields: |  | ||||||
|   Enabled:         false |  | ||||||
|   AcrossEmptyLines: false |  | ||||||
|   AcrossComments:  false |  | ||||||
|   AlignCompound:   false |  | ||||||
|   PadOperators:    false |  | ||||||
| AlignConsecutiveDeclarations: |  | ||||||
|   Enabled:         false |  | ||||||
|   AcrossEmptyLines: false |  | ||||||
|   AcrossComments:  false |  | ||||||
|   AlignCompound:   false |  | ||||||
|   PadOperators:    false |  | ||||||
| AlignConsecutiveMacros: |  | ||||||
|   Enabled:         false |  | ||||||
|   AcrossEmptyLines: false |  | ||||||
|   AcrossComments:  false |  | ||||||
|   AlignCompound:   false |  | ||||||
|   PadOperators:    false |  | ||||||
| AlignEscapedNewlines: Left |  | ||||||
| AlignOperands:   Align |  | ||||||
| AlignTrailingComments: true |  | ||||||
| AllowAllArgumentsOnNextLine: true |  | ||||||
| AllowAllParametersOfDeclarationOnNextLine: false |  | ||||||
| AllowShortEnumsOnASingleLine: true |  | ||||||
| AllowShortBlocksOnASingleLine: Never |  | ||||||
| AllowShortCaseLabelsOnASingleLine: false |  | ||||||
| AllowShortFunctionsOnASingleLine: Inline |  | ||||||
| AllowShortLambdasOnASingleLine: All |  | ||||||
| AllowShortIfStatementsOnASingleLine: Never |  | ||||||
| AllowShortLoopsOnASingleLine: false |  | ||||||
| AlwaysBreakAfterDefinitionReturnType: None |  | ||||||
| AlwaysBreakAfterReturnType: None |  | ||||||
| AlwaysBreakBeforeMultilineStrings: true |  | ||||||
| AlwaysBreakTemplateDeclarations: Yes |  | ||||||
| AttributeMacros: |  | ||||||
|   - __capability |  | ||||||
| BinPackArguments: true |  | ||||||
| BinPackParameters: false |  | ||||||
| BraceWrapping: |  | ||||||
|   AfterCaseLabel:  false |  | ||||||
|   AfterClass:      false |  | ||||||
|   AfterControlStatement: Never |  | ||||||
|   AfterEnum:       false |  | ||||||
|   AfterFunction:   false |  | ||||||
|   AfterNamespace:  false |  | ||||||
|   AfterObjCDeclaration: false |  | ||||||
|   AfterStruct:     false |  | ||||||
|   AfterUnion:      false |  | ||||||
|   AfterExternBlock: false |  | ||||||
|   BeforeCatch:     false |  | ||||||
|   BeforeElse:      false |  | ||||||
|   BeforeLambdaBody: false |  | ||||||
|   BeforeWhile:     false |  | ||||||
|   IndentBraces:    false |  | ||||||
|   SplitEmptyFunction: true |  | ||||||
|   SplitEmptyRecord: true |  | ||||||
|   SplitEmptyNamespace: true |  | ||||||
| BreakBeforeBinaryOperators: None |  | ||||||
| BreakBeforeConceptDeclarations: Always |  | ||||||
| BreakBeforeBraces: Attach |  | ||||||
| BreakBeforeInheritanceComma: false |  | ||||||
| BreakInheritanceList: BeforeColon |  | ||||||
| BreakBeforeTernaryOperators: true |  | ||||||
| BreakConstructorInitializersBeforeComma: false |  | ||||||
| BreakConstructorInitializers: BeforeColon |  | ||||||
| BreakAfterJavaFieldAnnotations: false |  | ||||||
| BreakStringLiterals: true |  | ||||||
| ColumnLimit:     120 |  | ||||||
| CommentPragmas:  '^ IWYU pragma:' |  | ||||||
| QualifierAlignment: Leave |  | ||||||
| CompactNamespaces: false |  | ||||||
| ConstructorInitializerIndentWidth: 4 |  | ||||||
| ContinuationIndentWidth: 4 |  | ||||||
| Cpp11BracedListStyle: true |  | ||||||
| DeriveLineEnding: true |  | ||||||
| DerivePointerAlignment: false |  | ||||||
| DisableFormat:   false |  | ||||||
| EmptyLineAfterAccessModifier: Never |  | ||||||
| EmptyLineBeforeAccessModifier: LogicalBlock |  | ||||||
| ExperimentalAutoDetectBinPacking: false |  | ||||||
| PackConstructorInitializers: NextLine |  | ||||||
| BasedOnStyle:    '' |  | ||||||
| ConstructorInitializerAllOnOneLineOrOnePerLine: false |  | ||||||
| AllowAllConstructorInitializersOnNextLine: true |  | ||||||
| FixNamespaceComments: true |  | ||||||
| ForEachMacros: |  | ||||||
|   - foreach |  | ||||||
|   - Q_FOREACH |  | ||||||
|   - BOOST_FOREACH |  | ||||||
| IfMacros: |  | ||||||
|   - KJ_IF_MAYBE |  | ||||||
| IncludeBlocks:   Preserve |  | ||||||
| IncludeCategories: |  | ||||||
|   - Regex:           '^<ext/.*\.h>' |  | ||||||
|     Priority:        2 |  | ||||||
|     SortPriority:    0 |  | ||||||
|     CaseSensitive:   false |  | ||||||
|   - Regex:           '^<.*\.h>' |  | ||||||
|     Priority:        1 |  | ||||||
|     SortPriority:    0 |  | ||||||
|     CaseSensitive:   false |  | ||||||
|   - Regex:           '^<.*' |  | ||||||
|     Priority:        2 |  | ||||||
|     SortPriority:    0 |  | ||||||
|     CaseSensitive:   false |  | ||||||
|   - Regex:           '.*' |  | ||||||
|     Priority:        3 |  | ||||||
|     SortPriority:    0 |  | ||||||
|     CaseSensitive:   false |  | ||||||
| IncludeIsMainRegex: '([-_](test|unittest))?$' |  | ||||||
| IncludeIsMainSourceRegex: '' |  | ||||||
| IndentAccessModifiers: false |  | ||||||
| IndentCaseLabels: true |  | ||||||
| IndentCaseBlocks: false |  | ||||||
| IndentGotoLabels: true |  | ||||||
| IndentPPDirectives: None |  | ||||||
| IndentExternBlock: AfterExternBlock |  | ||||||
| IndentRequiresClause: true |  | ||||||
| IndentWidth:     4 |  | ||||||
| IndentWrappedFunctionNames: false |  | ||||||
| InsertBraces:    false |  | ||||||
| InsertTrailingCommas: None |  | ||||||
| JavaScriptQuotes: Leave |  | ||||||
| JavaScriptWrapImports: true |  | ||||||
| KeepEmptyLinesAtTheStartOfBlocks: false |  | ||||||
| LambdaBodyIndentation: Signature |  | ||||||
| MacroBlockBegin: '' |  | ||||||
| MacroBlockEnd:   '' |  | ||||||
| MaxEmptyLinesToKeep: 1 |  | ||||||
| NamespaceIndentation: None |  | ||||||
| ObjCBinPackProtocolList: Never |  | ||||||
| ObjCBlockIndentWidth: 2 |  | ||||||
| ObjCBreakBeforeNestedBlockParam: true |  | ||||||
| ObjCSpaceAfterProperty: false |  | ||||||
| ObjCSpaceBeforeProtocolList: true |  | ||||||
| PenaltyBreakAssignment: 2 |  | ||||||
| PenaltyBreakBeforeFirstCallParameter: 1 |  | ||||||
| PenaltyBreakComment: 300 |  | ||||||
| PenaltyBreakFirstLessLess: 120 |  | ||||||
| PenaltyBreakOpenParenthesis: 0 |  | ||||||
| PenaltyBreakString: 1000 |  | ||||||
| PenaltyBreakTemplateDeclaration: 10 |  | ||||||
| PenaltyExcessCharacter: 1000000 |  | ||||||
| PenaltyReturnTypeOnItsOwnLine: 200 |  | ||||||
| PenaltyIndentedWhitespace: 0 |  | ||||||
| PointerAlignment: Left |  | ||||||
| PPIndentWidth:   -1 |  | ||||||
| RawStringFormats: |  | ||||||
|   - Language:        Cpp |  | ||||||
|     Delimiters: |  | ||||||
|       - cc |  | ||||||
|       - CC |  | ||||||
|       - cpp |  | ||||||
|       - Cpp |  | ||||||
|       - CPP |  | ||||||
|       - 'c++' |  | ||||||
|       - 'C++' |  | ||||||
|     CanonicalDelimiter: '' |  | ||||||
|     BasedOnStyle:    google |  | ||||||
|   - Language:        TextProto |  | ||||||
|     Delimiters: |  | ||||||
|       - pb |  | ||||||
|       - PB |  | ||||||
|       - proto |  | ||||||
|       - PROTO |  | ||||||
|     EnclosingFunctions: |  | ||||||
|       - EqualsProto |  | ||||||
|       - EquivToProto |  | ||||||
|       - PARSE_PARTIAL_TEXT_PROTO |  | ||||||
|       - PARSE_TEST_PROTO |  | ||||||
|       - PARSE_TEXT_PROTO |  | ||||||
|       - ParseTextOrDie |  | ||||||
|       - ParseTextProtoOrDie |  | ||||||
|       - ParseTestProto |  | ||||||
|       - ParsePartialTestProto |  | ||||||
|     CanonicalDelimiter: pb |  | ||||||
|     BasedOnStyle:    google |  | ||||||
| ReferenceAlignment: Pointer |  | ||||||
| ReflowComments:  true |  | ||||||
| RemoveBracesLLVM: false |  | ||||||
| RequiresClausePosition: OwnLine |  | ||||||
| SeparateDefinitionBlocks: Leave |  | ||||||
| ShortNamespaceLines: 1 |  | ||||||
| SortIncludes:    CaseSensitive |  | ||||||
| SortJavaStaticImport: Before |  | ||||||
| SortUsingDeclarations: true |  | ||||||
| SpaceAfterCStyleCast: false |  | ||||||
| SpaceAfterLogicalNot: false |  | ||||||
| SpaceAfterTemplateKeyword: true |  | ||||||
| SpaceBeforeAssignmentOperators: true |  | ||||||
| SpaceBeforeCaseColon: false |  | ||||||
| SpaceBeforeCpp11BracedList: false |  | ||||||
| SpaceBeforeCtorInitializerColon: true |  | ||||||
| SpaceBeforeInheritanceColon: true |  | ||||||
| SpaceBeforeParens: ControlStatements |  | ||||||
| SpaceBeforeParensOptions: |  | ||||||
|   AfterControlStatements: true |  | ||||||
|   AfterForeachMacros: true |  | ||||||
|   AfterFunctionDefinitionName: false |  | ||||||
|   AfterFunctionDeclarationName: false |  | ||||||
|   AfterIfMacros:   true |  | ||||||
|   AfterOverloadedOperator: false |  | ||||||
|   AfterRequiresInClause: false |  | ||||||
|   AfterRequiresInExpression: false |  | ||||||
|   BeforeNonEmptyParentheses: false |  | ||||||
| SpaceAroundPointerQualifiers: Default |  | ||||||
| SpaceBeforeRangeBasedForLoopColon: true |  | ||||||
| SpaceInEmptyBlock: false |  | ||||||
| SpaceInEmptyParentheses: false |  | ||||||
| SpacesBeforeTrailingComments: 2 |  | ||||||
| SpacesInAngles:  Never |  | ||||||
| SpacesInConditionalStatement: false |  | ||||||
| SpacesInContainerLiterals: true |  | ||||||
| SpacesInCStyleCastParentheses: false |  | ||||||
| SpacesInLineCommentPrefix: |  | ||||||
|   Minimum:         1 |  | ||||||
|   Maximum:         -1 |  | ||||||
| SpacesInParentheses: false |  | ||||||
| SpacesInSquareBrackets: false |  | ||||||
| SpaceBeforeSquareBrackets: false |  | ||||||
| BitFieldColonSpacing: Both |  | ||||||
| Standard:        Auto |  | ||||||
| StatementAttributeLikeMacros: |  | ||||||
|   - Q_EMIT |  | ||||||
| StatementMacros: |  | ||||||
|   - Q_UNUSED |  | ||||||
|   - QT_REQUIRE_VERSION |  | ||||||
| TabWidth:        8 |  | ||||||
| UseCRLF:         false |  | ||||||
| UseTab:          Never |  | ||||||
| WhitespaceSensitiveMacros: |  | ||||||
|   - STRINGIZE |  | ||||||
|   - PP_STRINGIZE |  | ||||||
|   - BOOST_PP_STRINGIZE |  | ||||||
|   - NS_SWIFT_NAME |  | ||||||
|   - CF_SWIFT_NAME |  | ||||||
| ... |  | ||||||
| 
 |  | ||||||
							
								
								
									
										8
									
								
								rust/automerge-c/.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								rust/automerge-c/.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -1,10 +1,10 @@ | ||||||
| automerge | automerge | ||||||
| automerge.h | automerge.h | ||||||
| automerge.o | automerge.o | ||||||
| build/ | *.cmake | ||||||
| CMakeCache.txt |  | ||||||
| CMakeFiles | CMakeFiles | ||||||
| CMakePresets.json |  | ||||||
| Makefile | Makefile | ||||||
| DartConfiguration.tcl | DartConfiguration.tcl | ||||||
| out/ | config.h | ||||||
|  | CMakeCache.txt | ||||||
|  | Cargo | ||||||
|  |  | ||||||
|  | @ -1,297 +1,97 @@ | ||||||
| cmake_minimum_required(VERSION 3.23 FATAL_ERROR) | cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||||||
| 
 | 
 | ||||||
| project(automerge-c VERSION 0.1.0 | set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") | ||||||
|                     LANGUAGES C |  | ||||||
|                     DESCRIPTION "C bindings for the Automerge Rust library.") |  | ||||||
| 
 | 
 | ||||||
| set(LIBRARY_NAME "automerge") | # Parse the library name, project name and project version out of Cargo's TOML file. | ||||||
|  | set(CARGO_LIB_SECTION OFF) | ||||||
| 
 | 
 | ||||||
| set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) | set(LIBRARY_NAME "") | ||||||
| 
 | 
 | ||||||
| option(BUILD_SHARED_LIBS "Enable the choice of a shared or static library.") | set(CARGO_PKG_SECTION OFF) | ||||||
|  | 
 | ||||||
|  | set(CARGO_PKG_NAME "") | ||||||
|  | 
 | ||||||
|  | set(CARGO_PKG_VERSION "") | ||||||
|  | 
 | ||||||
|  | file(READ Cargo.toml TOML_STRING) | ||||||
|  | 
 | ||||||
|  | string(REPLACE ";" "\\\\;" TOML_STRING "${TOML_STRING}") | ||||||
|  | 
 | ||||||
|  | string(REPLACE "\n" ";" TOML_LINES "${TOML_STRING}") | ||||||
|  | 
 | ||||||
|  | foreach(TOML_LINE IN ITEMS ${TOML_LINES}) | ||||||
|  |     string(REGEX MATCH "^\\[(lib|package)\\]$" _ ${TOML_LINE}) | ||||||
|  | 
 | ||||||
|  |     if(CMAKE_MATCH_1 STREQUAL "lib") | ||||||
|  |         set(CARGO_LIB_SECTION ON) | ||||||
|  | 
 | ||||||
|  |         set(CARGO_PKG_SECTION OFF) | ||||||
|  |     elseif(CMAKE_MATCH_1 STREQUAL "package") | ||||||
|  |         set(CARGO_LIB_SECTION OFF) | ||||||
|  | 
 | ||||||
|  |         set(CARGO_PKG_SECTION ON) | ||||||
|  |     endif() | ||||||
|  | 
 | ||||||
|  |     string(REGEX MATCH "^name += +\"([^\"]+)\"$" _ ${TOML_LINE}) | ||||||
|  | 
 | ||||||
|  |     if(CMAKE_MATCH_1 AND (CARGO_LIB_SECTION AND NOT CARGO_PKG_SECTION)) | ||||||
|  |         set(LIBRARY_NAME "${CMAKE_MATCH_1}") | ||||||
|  |     elseif(CMAKE_MATCH_1 AND (NOT CARGO_LIB_SECTION AND CARGO_PKG_SECTION)) | ||||||
|  |         set(CARGO_PKG_NAME "${CMAKE_MATCH_1}") | ||||||
|  |     endif() | ||||||
|  | 
 | ||||||
|  |     string(REGEX MATCH "^version += +\"([^\"]+)\"$" _ ${TOML_LINE}) | ||||||
|  | 
 | ||||||
|  |     if(CMAKE_MATCH_1 AND CARGO_PKG_SECTION) | ||||||
|  |         set(CARGO_PKG_VERSION "${CMAKE_MATCH_1}") | ||||||
|  |     endif() | ||||||
|  | 
 | ||||||
|  |     if(LIBRARY_NAME AND (CARGO_PKG_NAME AND CARGO_PKG_VERSION)) | ||||||
|  |         break() | ||||||
|  |     endif() | ||||||
|  | endforeach() | ||||||
|  | 
 | ||||||
|  | project(${CARGO_PKG_NAME} VERSION 0.0.1 LANGUAGES C DESCRIPTION "C bindings for the Automerge Rust backend.") | ||||||
| 
 | 
 | ||||||
| include(CTest) | include(CTest) | ||||||
| 
 | 
 | ||||||
|  | option(BUILD_SHARED_LIBS "Enable the choice of a shared or static library.") | ||||||
|  | 
 | ||||||
| include(CMakePackageConfigHelpers) | include(CMakePackageConfigHelpers) | ||||||
| 
 | 
 | ||||||
| include(GNUInstallDirs) | include(GNUInstallDirs) | ||||||
| 
 | 
 | ||||||
| set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") |  | ||||||
| 
 |  | ||||||
| string(MAKE_C_IDENTIFIER ${PROJECT_NAME} SYMBOL_PREFIX) | string(MAKE_C_IDENTIFIER ${PROJECT_NAME} SYMBOL_PREFIX) | ||||||
| 
 | 
 | ||||||
| string(TOUPPER ${SYMBOL_PREFIX} SYMBOL_PREFIX) | string(TOUPPER ${SYMBOL_PREFIX} SYMBOL_PREFIX) | ||||||
| 
 | 
 | ||||||
| set(CARGO_TARGET_DIR "${CMAKE_BINARY_DIR}/Cargo/target") | set(CARGO_TARGET_DIR "${CMAKE_CURRENT_BINARY_DIR}/Cargo/target") | ||||||
| 
 | 
 | ||||||
| set(CBINDGEN_INCLUDEDIR "${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_INCLUDEDIR}") | set(CBINDGEN_INCLUDEDIR "${CARGO_TARGET_DIR}/${CMAKE_INSTALL_INCLUDEDIR}") | ||||||
| 
 | 
 | ||||||
| set(CBINDGEN_TARGET_DIR "${CBINDGEN_INCLUDEDIR}/${PROJECT_NAME}") | set(CBINDGEN_TARGET_DIR "${CBINDGEN_INCLUDEDIR}/${PROJECT_NAME}") | ||||||
| 
 | 
 | ||||||
| find_program ( | add_subdirectory(src) | ||||||
|     CARGO_CMD |  | ||||||
|     "cargo" |  | ||||||
|     PATHS "$ENV{CARGO_HOME}/bin" |  | ||||||
|     DOC "The Cargo command" |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| if(NOT CARGO_CMD) | # Generate and install the configuration header. | ||||||
|     message(FATAL_ERROR "Cargo (Rust package manager) not found! " |  | ||||||
|                         "Please install it and/or set the CARGO_HOME " |  | ||||||
|                         "environment variable to its path.") |  | ||||||
| endif() |  | ||||||
| 
 |  | ||||||
| string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) |  | ||||||
| 
 |  | ||||||
| # In order to build with -Z build-std, we need to pass target explicitly. |  | ||||||
| # https://doc.rust-lang.org/cargo/reference/unstable.html#build-std |  | ||||||
| execute_process ( |  | ||||||
|     COMMAND rustc -vV |  | ||||||
|     OUTPUT_VARIABLE RUSTC_VERSION |  | ||||||
|     OUTPUT_STRIP_TRAILING_WHITESPACE |  | ||||||
| ) |  | ||||||
| string(REGEX REPLACE ".*host: ([^ \n]*).*" "\\1" |  | ||||||
|     CARGO_TARGET |  | ||||||
|     ${RUSTC_VERSION} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| if(BUILD_TYPE_LOWER STREQUAL debug) |  | ||||||
|     set(CARGO_BUILD_TYPE "debug") |  | ||||||
| 
 |  | ||||||
|     set(CARGO_FLAG --target=${CARGO_TARGET}) |  | ||||||
| else() |  | ||||||
|     set(CARGO_BUILD_TYPE "release") |  | ||||||
| 
 |  | ||||||
|     if (NOT RUSTC_VERSION MATCHES "nightly") |  | ||||||
|         set(RUSTUP_TOOLCHAIN nightly) |  | ||||||
|     endif() |  | ||||||
| 
 |  | ||||||
|     set(RUSTFLAGS -C\ panic=abort) |  | ||||||
| 
 |  | ||||||
|     set(CARGO_FLAG -Z build-std=std,panic_abort --release --target=${CARGO_TARGET}) |  | ||||||
| endif() |  | ||||||
| 
 |  | ||||||
| set(CARGO_FEATURES "") |  | ||||||
| 
 |  | ||||||
| set(CARGO_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_TARGET}/${CARGO_BUILD_TYPE}") |  | ||||||
| 
 |  | ||||||
| set(BINDINGS_NAME "${LIBRARY_NAME}_core") |  | ||||||
| 
 |  | ||||||
| configure_file( |  | ||||||
|     ${CMAKE_MODULE_PATH}/Cargo.toml.in |  | ||||||
|     ${CMAKE_SOURCE_DIR}/Cargo.toml |  | ||||||
|     @ONLY |  | ||||||
|     NEWLINE_STYLE LF |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| set(INCLUDE_GUARD_PREFIX "${SYMBOL_PREFIX}") |  | ||||||
| 
 |  | ||||||
| configure_file( |  | ||||||
|     ${CMAKE_MODULE_PATH}/cbindgen.toml.in |  | ||||||
|     ${CMAKE_SOURCE_DIR}/cbindgen.toml |  | ||||||
|     @ONLY |  | ||||||
|     NEWLINE_STYLE LF |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| set(CARGO_OUTPUT |  | ||||||
|     ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h |  | ||||||
|     ${CARGO_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${BINDINGS_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| # \note cbindgen's naming behavior isn't fully configurable and it ignores |  | ||||||
| #       `const fn` calls (https://github.com/eqrion/cbindgen/issues/252). |  | ||||||
| add_custom_command( |  | ||||||
|     OUTPUT |  | ||||||
|         ${CARGO_OUTPUT} |  | ||||||
|     COMMAND |  | ||||||
|         # \note cbindgen won't regenerate its output header file after it's been removed but it will after its |  | ||||||
|         #       configuration file has been updated. |  | ||||||
|         ${CMAKE_COMMAND} -DCONDITION=NOT_EXISTS -P ${CMAKE_SOURCE_DIR}/cmake/file-touch.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CMAKE_SOURCE_DIR}/cbindgen.toml |  | ||||||
|     COMMAND |  | ||||||
|         ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN} RUSTFLAGS=${RUSTFLAGS} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} |  | ||||||
|     COMMAND |  | ||||||
|         # Compensate for cbindgen's translation of consecutive uppercase letters to "ScreamingSnakeCase". |  | ||||||
|         ${CMAKE_COMMAND} -DMATCH_REGEX=A_M\([^_]+\)_ -DREPLACE_EXPR=AM_\\1_ -P ${CMAKE_SOURCE_DIR}/cmake/file-regex-replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h |  | ||||||
|     COMMAND |  | ||||||
|         # Compensate for cbindgen ignoring `std:mem::size_of<usize>()` calls. |  | ||||||
|         ${CMAKE_COMMAND} -DMATCH_REGEX=USIZE_ -DREPLACE_EXPR=\+${CMAKE_SIZEOF_VOID_P} -P ${CMAKE_SOURCE_DIR}/cmake/file-regex-replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h |  | ||||||
|     MAIN_DEPENDENCY |  | ||||||
|         src/lib.rs |  | ||||||
|     DEPENDS |  | ||||||
|         src/actor_id.rs |  | ||||||
|         src/byte_span.rs |  | ||||||
|         src/change.rs |  | ||||||
|         src/doc.rs |  | ||||||
|         src/doc/list.rs |  | ||||||
|         src/doc/map.rs |  | ||||||
|         src/doc/utils.rs |  | ||||||
|         src/index.rs |  | ||||||
|         src/item.rs |  | ||||||
|         src/items.rs |  | ||||||
|         src/obj.rs |  | ||||||
|         src/result.rs |  | ||||||
|         src/sync.rs |  | ||||||
|         src/sync/have.rs |  | ||||||
|         src/sync/message.rs |  | ||||||
|         src/sync/state.rs |  | ||||||
|         ${CMAKE_SOURCE_DIR}/build.rs |  | ||||||
|         ${CMAKE_MODULE_PATH}/Cargo.toml.in |  | ||||||
|         ${CMAKE_MODULE_PATH}/cbindgen.toml.in |  | ||||||
|     WORKING_DIRECTORY |  | ||||||
|         ${CMAKE_SOURCE_DIR} |  | ||||||
|     COMMENT |  | ||||||
|         "Producing the bindings' artifacts with Cargo..." |  | ||||||
|     VERBATIM |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| add_custom_target(${BINDINGS_NAME}_artifacts ALL |  | ||||||
|     DEPENDS ${CARGO_OUTPUT} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| add_library(${BINDINGS_NAME} STATIC IMPORTED GLOBAL) |  | ||||||
| 
 |  | ||||||
| target_include_directories(${BINDINGS_NAME} INTERFACE "${CBINDGEN_INCLUDEDIR}") |  | ||||||
| 
 |  | ||||||
| set_target_properties( |  | ||||||
|     ${BINDINGS_NAME} |  | ||||||
|     PROPERTIES |  | ||||||
|         # \note Cargo writes a debug build into a nested directory instead of |  | ||||||
|         #       decorating its name. |  | ||||||
|         DEBUG_POSTFIX "" |  | ||||||
|         DEFINE_SYMBOL "" |  | ||||||
|         IMPORTED_IMPLIB "" |  | ||||||
|         IMPORTED_LOCATION "${CARGO_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${BINDINGS_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" |  | ||||||
|         IMPORTED_NO_SONAME "TRUE" |  | ||||||
|         IMPORTED_SONAME "" |  | ||||||
|         LINKER_LANGUAGE C |  | ||||||
|         PUBLIC_HEADER "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" |  | ||||||
|         SOVERSION "${PROJECT_VERSION_MAJOR}" |  | ||||||
|         VERSION "${PROJECT_VERSION}" |  | ||||||
|         # \note Cargo exports all of the symbols automatically. |  | ||||||
|         WINDOWS_EXPORT_ALL_SYMBOLS "TRUE" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| target_compile_definitions(${BINDINGS_NAME} INTERFACE $<TARGET_PROPERTY:${BINDINGS_NAME},DEFINE_SYMBOL>) |  | ||||||
| 
 |  | ||||||
| set(UTILS_SUBDIR "utils") |  | ||||||
| 
 |  | ||||||
| add_custom_command( |  | ||||||
|     OUTPUT |  | ||||||
|         ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h |  | ||||||
|         ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c |  | ||||||
|     COMMAND |  | ||||||
|         ${CMAKE_COMMAND} -DPROJECT_NAME=${PROJECT_NAME} -DLIBRARY_NAME=${LIBRARY_NAME} -DSUBDIR=${UTILS_SUBDIR} -P ${CMAKE_SOURCE_DIR}/cmake/enum-string-functions-gen.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c |  | ||||||
|     MAIN_DEPENDENCY |  | ||||||
|         ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h |  | ||||||
|     DEPENDS |  | ||||||
|         ${CMAKE_SOURCE_DIR}/cmake/enum-string-functions-gen.cmake |  | ||||||
|     WORKING_DIRECTORY |  | ||||||
|         ${CMAKE_SOURCE_DIR} |  | ||||||
|     COMMENT |  | ||||||
|         "Generating the enum string functions with CMake..." |  | ||||||
|     VERBATIM |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| add_custom_target(${LIBRARY_NAME}_utilities |  | ||||||
|     DEPENDS ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h |  | ||||||
|             ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| add_library(${LIBRARY_NAME}) |  | ||||||
| 
 |  | ||||||
| target_compile_features(${LIBRARY_NAME} PRIVATE c_std_99) |  | ||||||
| 
 |  | ||||||
| set(CMAKE_THREAD_PREFER_PTHREAD TRUE) |  | ||||||
| 
 |  | ||||||
| set(THREADS_PREFER_PTHREAD_FLAG TRUE) |  | ||||||
| 
 |  | ||||||
| find_package(Threads REQUIRED) |  | ||||||
| 
 |  | ||||||
| set(LIBRARY_DEPENDENCIES Threads::Threads ${CMAKE_DL_LIBS}) |  | ||||||
| 
 |  | ||||||
| if(WIN32) |  | ||||||
|     list(APPEND LIBRARY_DEPENDENCIES Bcrypt userenv ws2_32) |  | ||||||
| else() |  | ||||||
|     list(APPEND LIBRARY_DEPENDENCIES m) |  | ||||||
| endif() |  | ||||||
| 
 |  | ||||||
| target_link_libraries(${LIBRARY_NAME} |  | ||||||
|     PUBLIC ${BINDINGS_NAME} |  | ||||||
|            ${LIBRARY_DEPENDENCIES} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| # \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't |  | ||||||
| #       contain a non-existent path so its build-time include directory |  | ||||||
| #       must be specified for all of its dependent targets instead. |  | ||||||
| target_include_directories(${LIBRARY_NAME} |  | ||||||
|     PUBLIC "$<BUILD_INTERFACE:${CBINDGEN_INCLUDEDIR};${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}>" |  | ||||||
|            "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| add_dependencies(${LIBRARY_NAME} ${BINDINGS_NAME}_artifacts) |  | ||||||
| 
 |  | ||||||
| # Generate the configuration header. |  | ||||||
| math(EXPR INTEGER_PROJECT_VERSION_MAJOR "${PROJECT_VERSION_MAJOR} * 100000") | math(EXPR INTEGER_PROJECT_VERSION_MAJOR "${PROJECT_VERSION_MAJOR} * 100000") | ||||||
| 
 | 
 | ||||||
| math(EXPR INTEGER_PROJECT_VERSION_MINOR "${PROJECT_VERSION_MINOR} * 100") | math(EXPR INTEGER_PROJECT_VERSION_MINOR "${PROJECT_VERSION_MINOR} * 100") | ||||||
| 
 | 
 | ||||||
| math(EXPR INTEGER_PROJECT_VERSION_PATCH "${PROJECT_VERSION_PATCH}") | math(EXPR INTEGER_PROJECT_VERSION_PATCH "${PROJECT_VERSION_PATCH}") | ||||||
| 
 | 
 | ||||||
| math(EXPR INTEGER_PROJECT_VERSION "${INTEGER_PROJECT_VERSION_MAJOR} + \ | math(EXPR INTEGER_PROJECT_VERSION "${INTEGER_PROJECT_VERSION_MAJOR} + ${INTEGER_PROJECT_VERSION_MINOR} + ${INTEGER_PROJECT_VERSION_PATCH}") | ||||||
|                                    ${INTEGER_PROJECT_VERSION_MINOR} + \ |  | ||||||
|                                    ${INTEGER_PROJECT_VERSION_PATCH}") |  | ||||||
| 
 | 
 | ||||||
| configure_file( | configure_file( | ||||||
|     ${CMAKE_MODULE_PATH}/config.h.in |     ${CMAKE_MODULE_PATH}/config.h.in | ||||||
|     ${CBINDGEN_TARGET_DIR}/config.h |     config.h | ||||||
|     @ONLY |     @ONLY | ||||||
|     NEWLINE_STYLE LF |     NEWLINE_STYLE LF | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| target_sources(${LIBRARY_NAME} |  | ||||||
|     PRIVATE |  | ||||||
|         src/${UTILS_SUBDIR}/result.c |  | ||||||
|         src/${UTILS_SUBDIR}/stack_callback_data.c |  | ||||||
|         src/${UTILS_SUBDIR}/stack.c |  | ||||||
|         src/${UTILS_SUBDIR}/string.c |  | ||||||
|         ${CMAKE_BINARY_DIR}/src/${UTILS_SUBDIR}/enum_string.c |  | ||||||
|     PUBLIC |  | ||||||
|         FILE_SET api TYPE HEADERS |  | ||||||
|             BASE_DIRS |  | ||||||
|                 ${CBINDGEN_INCLUDEDIR} |  | ||||||
|                 ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR} |  | ||||||
|             FILES |  | ||||||
|                 ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h |  | ||||||
|                 ${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h |  | ||||||
|                 ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/result.h |  | ||||||
|                 ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack_callback_data.h |  | ||||||
|                 ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack.h |  | ||||||
|                 ${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/string.h |  | ||||||
|     INTERFACE |  | ||||||
|         FILE_SET config TYPE HEADERS |  | ||||||
|             BASE_DIRS |  | ||||||
|                 ${CBINDGEN_INCLUDEDIR} |  | ||||||
|             FILES |  | ||||||
|                 ${CBINDGEN_TARGET_DIR}/config.h |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| install( | install( | ||||||
|     TARGETS ${LIBRARY_NAME} |     FILES ${CMAKE_BINARY_DIR}/config.h | ||||||
|     EXPORT ${PROJECT_NAME}-config |     DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME} | ||||||
|     FILE_SET api |  | ||||||
|     FILE_SET config |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| # \note Install the Cargo-built core bindings to enable direct linkage. |  | ||||||
| install( |  | ||||||
|     FILES $<TARGET_PROPERTY:${BINDINGS_NAME},IMPORTED_LOCATION> |  | ||||||
|     DESTINATION ${CMAKE_INSTALL_LIBDIR} |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| install(EXPORT ${PROJECT_NAME}-config |  | ||||||
|         FILE ${PROJECT_NAME}-config.cmake |  | ||||||
|         NAMESPACE "${PROJECT_NAME}::" |  | ||||||
|         DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${LIB} |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| if(BUILD_TESTING) | if(BUILD_TESTING) | ||||||
|  | @ -300,6 +100,42 @@ if(BUILD_TESTING) | ||||||
|     enable_testing() |     enable_testing() | ||||||
| endif() | endif() | ||||||
| 
 | 
 | ||||||
| add_subdirectory(docs) |  | ||||||
| 
 |  | ||||||
| add_subdirectory(examples EXCLUDE_FROM_ALL) | add_subdirectory(examples EXCLUDE_FROM_ALL) | ||||||
|  | 
 | ||||||
|  | # Generate and install .cmake files | ||||||
|  | set(PROJECT_CONFIG_NAME "${PROJECT_NAME}-config") | ||||||
|  | 
 | ||||||
|  | set(PROJECT_CONFIG_VERSION_NAME "${PROJECT_CONFIG_NAME}-version") | ||||||
|  | 
 | ||||||
|  | write_basic_package_version_file( | ||||||
|  |     ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_VERSION_NAME}.cmake | ||||||
|  |     VERSION ${PROJECT_VERSION} | ||||||
|  |     COMPATIBILITY ExactVersion | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # The namespace label starts with the title-cased library name. | ||||||
|  | string(SUBSTRING ${LIBRARY_NAME} 0 1 NS_FIRST) | ||||||
|  | 
 | ||||||
|  | string(SUBSTRING ${LIBRARY_NAME} 1 -1 NS_REST) | ||||||
|  | 
 | ||||||
|  | string(TOUPPER ${NS_FIRST} NS_FIRST) | ||||||
|  | 
 | ||||||
|  | string(TOLOWER ${NS_REST} NS_REST) | ||||||
|  | 
 | ||||||
|  | string(CONCAT NAMESPACE ${NS_FIRST} ${NS_REST} "::") | ||||||
|  | 
 | ||||||
|  | # \note CMake doesn't automate the exporting of an imported library's targets | ||||||
|  | #       so the package configuration script must do it. | ||||||
|  | configure_package_config_file( | ||||||
|  |     ${CMAKE_MODULE_PATH}/${PROJECT_CONFIG_NAME}.cmake.in | ||||||
|  |     ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_NAME}.cmake | ||||||
|  |     INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | install( | ||||||
|  |     FILES | ||||||
|  |         ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_NAME}.cmake | ||||||
|  |         ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_CONFIG_VERSION_NAME}.cmake | ||||||
|  |     DESTINATION | ||||||
|  |         ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | @ -7,8 +7,8 @@ license = "MIT" | ||||||
| rust-version = "1.57.0" | rust-version = "1.57.0" | ||||||
| 
 | 
 | ||||||
| [lib] | [lib] | ||||||
| name = "automerge_core" | name = "automerge" | ||||||
| crate-type = ["staticlib"] | crate-type = ["cdylib", "staticlib"] | ||||||
| bench = false | bench = false | ||||||
| doc = false | doc = false | ||||||
| 
 | 
 | ||||||
|  | @ -19,4 +19,4 @@ libc = "^0.2" | ||||||
| smol_str = "^0.1.21" | smol_str = "^0.1.21" | ||||||
| 
 | 
 | ||||||
| [build-dependencies] | [build-dependencies] | ||||||
| cbindgen = "^0.24" | cbindgen = "^0.20" | ||||||
|  |  | ||||||
|  | @ -1,29 +1,22 @@ | ||||||
| # Overview | automerge-c exposes an API to C that can either be used directly or as a basis | ||||||
|  | for other language bindings that have good support for calling into C functions. | ||||||
| 
 | 
 | ||||||
| automerge-c exposes a C API that can either be used directly or as the basis | # Building | ||||||
| for other language bindings that have good support for calling C functions. |  | ||||||
| 
 | 
 | ||||||
| # Installing | See the main README for instructions on getting your environment set up, then | ||||||
|  | you can use `./scripts/ci/cmake-build Release static` to build automerge-c. | ||||||
| 
 | 
 | ||||||
| See the main README for instructions on getting your environment set up and then | It will output two files: | ||||||
| you can build the automerge-c library and install its constituent files within |  | ||||||
| a root directory of your choosing (e.g. "/usr/local") like so: |  | ||||||
| ```shell |  | ||||||
| cmake -E make_directory automerge-c/build |  | ||||||
| cmake -S automerge-c -B automerge-c/build  |  | ||||||
| cmake --build automerge-c/build |  | ||||||
| cmake --install automerge-c/build --prefix "/usr/local" |  | ||||||
| ``` |  | ||||||
| Installation is important because the name, location and structure of CMake's |  | ||||||
| out-of-source build subdirectory is subject to change based on the platform and |  | ||||||
| the release version; generated headers like `automerge-c/config.h` and |  | ||||||
| `automerge-c/utils/enum_string.h` are only sure to be found within their |  | ||||||
| installed locations. |  | ||||||
| 
 | 
 | ||||||
| It's not obvious because they are versioned but the `Cargo.toml` and | - ./build/Cargo/target/include/automerge-c/automerge.h | ||||||
| `cbindgen.toml` configuration files are also generated in order to ensure that | - ./build/Cargo/target/release/libautomerge.a | ||||||
| the project name, project version and library name that they contain match those | 
 | ||||||
| specified within the top-level `CMakeLists.txt` file. | To use these in your application you must arrange for your C compiler to find | ||||||
|  | these files, either by moving them to the right location on your computer, or | ||||||
|  | by configuring the compiler to reference these directories. | ||||||
|  | 
 | ||||||
|  | - `export LDFLAGS=-L./build/Cargo/target/release -lautomerge` | ||||||
|  | - `export CFLAGS=-I./build/Cargo/target/include` | ||||||
| 
 | 
 | ||||||
| If you'd like to cross compile the library for different platforms you can do so | If you'd like to cross compile the library for different platforms you can do so | ||||||
| using [cross](https://github.com/cross-rs/cross). For example: | using [cross](https://github.com/cross-rs/cross). For example: | ||||||
|  | @ -32,176 +25,134 @@ using [cross](https://github.com/cross-rs/cross). For example: | ||||||
| 
 | 
 | ||||||
| This will output a shared library in the directory `rust/target/aarch64-unknown-linux-gnu/release/`. | This will output a shared library in the directory `rust/target/aarch64-unknown-linux-gnu/release/`. | ||||||
| 
 | 
 | ||||||
| You can replace `aarch64-unknown-linux-gnu` with any | You can replace `aarch64-unknown-linux-gnu` with any [cross supported targets](https://github.com/cross-rs/cross#supported-targets). The targets below are known to work, though other targets are expected to work too: | ||||||
| [cross supported targets](https://github.com/cross-rs/cross#supported-targets). |  | ||||||
| The targets below are known to work, though other targets are expected to work |  | ||||||
| too: |  | ||||||
| 
 | 
 | ||||||
| - `x86_64-apple-darwin` | - `x86_64-apple-darwin` | ||||||
| - `aarch64-apple-darwin` | - `aarch64-apple-darwin` | ||||||
| - `x86_64-unknown-linux-gnu` | - `x86_64-unknown-linux-gnu` | ||||||
| - `aarch64-unknown-linux-gnu` | - `aarch64-unknown-linux-gnu` | ||||||
| 
 | 
 | ||||||
| As a caveat, CMake generates the `automerge.h` header file in terms of the | As a caveat, the header file is currently 32/64-bit dependant. You can re-use it | ||||||
| processor architecture of the computer on which it was built so, for example, | for all 64-bit architectures, but you must generate a specific header for 32-bit | ||||||
| don't use a header generated for a 64-bit processor if your target is a 32-bit | targets. | ||||||
| processor. |  | ||||||
| 
 | 
 | ||||||
| # Usage | # Usage | ||||||
| 
 | 
 | ||||||
| You can build and view the C API's HTML reference documentation like so: | For full reference, read through `automerge.h`, or to get started quickly look | ||||||
| ```shell | at the | ||||||
| cmake -E make_directory automerge-c/build |  | ||||||
| cmake -S automerge-c -B automerge-c/build  |  | ||||||
| cmake --build automerge-c/build --target automerge_docs |  | ||||||
| firefox automerge-c/build/src/html/index.html |  | ||||||
| ``` |  | ||||||
| 
 |  | ||||||
| To get started quickly, look at the |  | ||||||
| [examples](https://github.com/automerge/automerge-rs/tree/main/rust/automerge-c/examples). | [examples](https://github.com/automerge/automerge-rs/tree/main/rust/automerge-c/examples). | ||||||
| 
 | 
 | ||||||
| Almost all operations in automerge-c act on an Automerge document | Almost all operations in automerge-c act on an AMdoc struct which you can get | ||||||
| (`AMdoc` struct) which is structurally similar to a JSON document. | from `AMcreate()` or `AMload()`. Operations on a given doc are not thread safe | ||||||
|  | so you must use a mutex or similar to avoid calling more than one function with | ||||||
|  | the same AMdoc pointer concurrently. | ||||||
| 
 | 
 | ||||||
| You can get a document by calling either `AMcreate()` or `AMload()`. Operations | As with all functions that either allocate memory, or could fail if given | ||||||
| on a given document are not thread-safe so you must use a mutex or similar to | invalid input, `AMcreate()` returns an `AMresult`. The `AMresult` contains the | ||||||
| avoid calling more than one function on the same one concurrently. | returned doc (or error message), and must be freed with `AMfree()` after you are | ||||||
|  | done to avoid leaking memory. | ||||||
| 
 | 
 | ||||||
| A C API function that could succeed or fail returns a result (`AMresult` struct) |  | ||||||
| containing a status code (`AMstatus` enum) and either a sequence of at least one |  | ||||||
| item (`AMitem` struct) or a read-only view onto a UTF-8 error message string |  | ||||||
| (`AMbyteSpan` struct). |  | ||||||
| An item contains up to three components: an index within its parent object |  | ||||||
| (`AMbyteSpan` struct or `size_t`), a unique identifier (`AMobjId` struct) and a |  | ||||||
| value. |  | ||||||
| The result of a successful function call that doesn't produce any values will |  | ||||||
| contain a single item that is void (`AM_VAL_TYPE_VOID`). |  | ||||||
| A returned result **must** be passed to `AMresultFree()` once the item(s) or |  | ||||||
| error message it contains is no longer needed in order to avoid a memory leak. |  | ||||||
| ``` | ``` | ||||||
| #include <stdio.h> |  | ||||||
| #include <stdlib.h> |  | ||||||
| #include <automerge-c/automerge.h> | #include <automerge-c/automerge.h> | ||||||
| #include <automerge-c/utils/string.h> | #include <stdio.h> | ||||||
| 
 | 
 | ||||||
| int main(int argc, char** argv) { | int main(int argc, char** argv) { | ||||||
|   AMresult *docResult = AMcreate(NULL); |   AMresult *docResult = AMcreate(NULL); | ||||||
| 
 | 
 | ||||||
|   if (AMresultStatus(docResult) != AM_STATUS_OK) { |   if (AMresultStatus(docResult) != AM_STATUS_OK) { | ||||||
|     char* const err_msg = AMstrdup(AMresultError(docResult), NULL); |     printf("failed to create doc: %s", AMerrorMessage(docResult).src); | ||||||
|     printf("failed to create doc: %s", err_msg); |  | ||||||
|     free(err_msg); |  | ||||||
|     goto cleanup; |     goto cleanup; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   AMdoc *doc; |   AMdoc *doc = AMresultValue(docResult).doc; | ||||||
|   AMitemToDoc(AMresultItem(docResult), &doc); |  | ||||||
| 
 | 
 | ||||||
|   // useful code goes here! |   // useful code goes here! | ||||||
| 
 | 
 | ||||||
| cleanup: | cleanup: | ||||||
|   AMresultFree(docResult); |   AMfree(docResult); | ||||||
| } | } | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| If you are writing an application in C, the `AMstackItem()`, `AMstackItems()` | If you are writing code in C directly, you can use the `AMpush()` helper | ||||||
| and `AMstackResult()` functions enable the lifetimes of anonymous results to be | function to reduce the boilerplate of error handling and freeing for you (see | ||||||
| centrally managed and allow the same validation logic to be reused without | examples/quickstart.c). | ||||||
| relying upon the `goto` statement (see examples/quickstart.c). |  | ||||||
| 
 | 
 | ||||||
| If you are wrapping automerge-c in another language, particularly one that has a | If you are wrapping automerge-c in another language, particularly one that has a | ||||||
| garbage collector, you can call the `AMresultFree()` function within a finalizer | garbage collector, you can call `AMfree` within a finalizer to ensure that memory | ||||||
| to ensure that memory is reclaimed when it is no longer needed. | is reclaimed when it is no longer needed. | ||||||
| 
 | 
 | ||||||
| Automerge documents consist of a mutable root which is always a map from string | An AMdoc wraps an automerge document which are very similar to JSON documents. | ||||||
| keys to values. A value can be one of the following types: | Automerge documents consist of a mutable root, which is always a map from string | ||||||
|  | keys to values. Values can have the following types: | ||||||
| 
 | 
 | ||||||
| - A number of type double / int64_t / uint64_t | - A number of type double / int64_t / uint64_t | ||||||
| - An explicit true / false / null | - An explicit true / false / nul | ||||||
| - An immutable UTF-8 string (`AMbyteSpan`). | - An immutable utf-8 string (AMbyteSpan) | ||||||
| - An immutable array of arbitrary bytes (`AMbyteSpan`). | - An immutable array of arbitrary bytes (AMbyteSpan) | ||||||
| - A mutable map from string keys to values. | - A mutable map from string keys to values (AMmap) | ||||||
| - A mutable list of values. | - A mutable list of values (AMlist) | ||||||
| - A mutable UTF-8 string. | - A mutable string (AMtext) | ||||||
| 
 | 
 | ||||||
| If you read from a location in the document with no value, an item with type | If you read from a location in the document with no value a value with | ||||||
| `AM_VAL_TYPE_VOID` will be returned, but you cannot write such a value | `.tag == AM_VALUE_VOID` will be returned, but you cannot write such a value explicitly. | ||||||
| explicitly. |  | ||||||
| 
 | 
 | ||||||
| Under the hood, automerge references a mutable object by its object identifier | Under the hood, automerge references mutable objects by the internal object id, | ||||||
| where `AM_ROOT` signifies a document's root map object. | and `AM_ROOT` is always the object id of the root value. | ||||||
| 
 | 
 | ||||||
| There are functions to put each type of value into either a map or a list, and | There is a function to put each type of value into either a map or a list, and a | ||||||
| functions to read the current or a historical value from a map or a list. As (in general) collaborators | function to read the current value from a list. As (in general) collaborators | ||||||
| may edit the document at any time, you cannot guarantee that the type of the | may edit the document at any time, you cannot guarantee that the type of the | ||||||
| value at a given part of the document will stay the same. As a result, reading | value at a given part of the document will stay the same. As a result reading | ||||||
| from the document will return an `AMitem` struct that you can inspect to | from the document will return an `AMvalue` union that you can inspect to | ||||||
| determine the type of value that it contains. | determine its type. | ||||||
| 
 | 
 | ||||||
| Strings in automerge-c are represented using an `AMbyteSpan` which contains a | Strings in automerge-c are represented using an `AMbyteSpan` which contains a | ||||||
| pointer and a length. Strings must be valid UTF-8 and may contain NUL (`0`) | pointer and a length. Strings must be valid utf-8 and may contain null bytes. | ||||||
| characters. | As a convenience you can use `AMstr()` to get the representation of a | ||||||
| For your convenience, you can call `AMstr()` to get the `AMbyteSpan` struct | null-terminated C string as an `AMbyteSpan`. | ||||||
| equivalent of a null-terminated byte string or `AMstrdup()` to get the |  | ||||||
| representation of an `AMbyteSpan` struct as a null-terminated byte string |  | ||||||
| wherein its NUL characters have been removed/replaced as you choose. |  | ||||||
| 
 | 
 | ||||||
| Putting all of that together, to read and write from the root of the document | Putting all of that together, to read and write from the root of the document | ||||||
| you can do this: | you can do this: | ||||||
| 
 | 
 | ||||||
| ``` | ``` | ||||||
| #include <stdio.h> |  | ||||||
| #include <stdlib.h> |  | ||||||
| #include <automerge-c/automerge.h> | #include <automerge-c/automerge.h> | ||||||
| #include <automerge-c/utils/string.h> | #include <stdio.h> | ||||||
| 
 | 
 | ||||||
| int main(int argc, char** argv) { | int main(int argc, char** argv) { | ||||||
|   // ...previous example... |   // ...previous example... | ||||||
|   AMdoc *doc;  |   AMdoc *doc = AMresultValue(docResult).doc; | ||||||
|   AMitemToDoc(AMresultItem(docResult), &doc); |  | ||||||
| 
 | 
 | ||||||
|   AMresult *putResult = AMmapPutStr(doc, AM_ROOT, AMstr("key"), AMstr("value")); |   AMresult *putResult = AMmapPutStr(doc, AM_ROOT, AMstr("key"), AMstr("value")); | ||||||
|   if (AMresultStatus(putResult) != AM_STATUS_OK) { |   if (AMresultStatus(putResult) != AM_STATUS_OK) { | ||||||
|     char* const err_msg = AMstrdup(AMresultError(putResult), NULL); |     printf("failed to put: %s", AMerrorMessage(putResult).src); | ||||||
|     printf("failed to put: %s", err_msg); |  | ||||||
|     free(err_msg); |  | ||||||
|     goto cleanup; |     goto cleanup; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   AMresult *getResult = AMmapGet(doc, AM_ROOT, AMstr("key"), NULL); |   AMresult *getResult = AMmapGet(doc, AM_ROOT, AMstr("key"), NULL); | ||||||
|   if (AMresultStatus(getResult) != AM_STATUS_OK) { |   if (AMresultStatus(getResult) != AM_STATUS_OK) { | ||||||
|     char* const err_msg = AMstrdup(AMresultError(putResult), NULL); |     printf("failed to get: %s", AMerrorMessage(getResult).src); | ||||||
|     printf("failed to get: %s", err_msg); |  | ||||||
|     free(err_msg); |  | ||||||
|     goto cleanup; |     goto cleanup; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   AMbyteSpan got; |   AMvalue got = AMresultValue(getResult); | ||||||
|   if (AMitemToStr(AMresultItem(getResult), &got)) { |   if (got.tag != AM_VALUE_STR) { | ||||||
|     char* const c_str = AMstrdup(got, NULL); |  | ||||||
|     printf("Got %zu-character string \"%s\"", got.count, c_str); |  | ||||||
|     free(c_str); |  | ||||||
|   } else { |  | ||||||
|     printf("expected to read a string!"); |     printf("expected to read a string!"); | ||||||
|     goto cleanup; |     goto cleanup; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  |   printf("Got %zu-character string `%s`", got.str.count, got.str.src); | ||||||
| 
 | 
 | ||||||
| cleanup: | cleanup: | ||||||
|   AMresultFree(getResult); |   AMfree(getResult); | ||||||
|   AMresultFree(putResult); |   AMfree(putResult); | ||||||
|   AMresultFree(docResult); |   AMfree(docResult); | ||||||
| } | } | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
| Functions that do not return an `AMresult` (for example `AMitemKey()`) do | Functions that do not return an `AMresult` (for example `AMmapItemValue()`) do | ||||||
| not allocate memory but rather reference memory that was previously | not allocate memory, but continue to reference memory that was previously | ||||||
| allocated. It's therefore important to keep the original `AMresult` alive (in | allocated. It's thus important to keep the original `AMresult` alive (in this | ||||||
| this case the one returned by `AMmapRange()`) until after you are finished with | case the one returned by `AMmapRange()`) until after you are done with the return | ||||||
| the items that it contains. However, the memory for an individual `AMitem` can | values of these functions. | ||||||
| be shared with a new `AMresult` by calling `AMitemResult()` on it. In other |  | ||||||
| words, a select group of items can be filtered out of a collection and only each |  | ||||||
| one's corresponding `AMresult` must be kept alive from that point forward; the |  | ||||||
| originating collection's `AMresult` can be safely freed. |  | ||||||
| 
 | 
 | ||||||
| Beyond that, good luck! | Beyond that, good luck! | ||||||
|  |  | ||||||
|  | @ -10,7 +10,7 @@ fn main() { | ||||||
|     let config = cbindgen::Config::from_file("cbindgen.toml") |     let config = cbindgen::Config::from_file("cbindgen.toml") | ||||||
|         .expect("Unable to find cbindgen.toml configuration file"); |         .expect("Unable to find cbindgen.toml configuration file"); | ||||||
| 
 | 
 | ||||||
|     if let Ok(writer) = cbindgen::generate_with_config(crate_dir, config) { |     if let Ok(writer) = cbindgen::generate_with_config(&crate_dir, config) { | ||||||
|         // \note CMake sets this environment variable before invoking Cargo so
 |         // \note CMake sets this environment variable before invoking Cargo so
 | ||||||
|         //       that it can direct the generated header file into its
 |         //       that it can direct the generated header file into its
 | ||||||
|         //       out-of-source build directory for post-processing.
 |         //       out-of-source build directory for post-processing.
 | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| after_includes = """\n | after_includes = """\n | ||||||
| /** | /** | ||||||
|  * \\defgroup enumerations Public Enumerations |  * \\defgroup enumerations Public Enumerations | ||||||
|  *  Symbolic names for integer constants. |      Symbolic names for integer constants. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  | @ -12,23 +12,21 @@ after_includes = """\n | ||||||
| #define AM_ROOT NULL | #define AM_ROOT NULL | ||||||
| 
 | 
 | ||||||
| /** | /** | ||||||
|  * \\memberof AMdoc |  * \\memberof AMchangeHash | ||||||
|  * \\def AM_CHANGE_HASH_SIZE |  * \\def AM_CHANGE_HASH_SIZE | ||||||
|  * \\brief The count of bytes in a change hash. |  * \\brief The count of bytes in a change hash. | ||||||
|  */ |  */ | ||||||
| #define AM_CHANGE_HASH_SIZE 32 | #define AM_CHANGE_HASH_SIZE 32 | ||||||
| """ | """ | ||||||
| autogen_warning = """ | autogen_warning = "/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */" | ||||||
| /** |  | ||||||
|  * \\file |  | ||||||
|  * \\brief All constants, functions and types in the core Automerge C API. |  | ||||||
|  * |  | ||||||
|  * \\warning This file is auto-generated by cbindgen. |  | ||||||
|  */ |  | ||||||
| """ |  | ||||||
| documentation = true | documentation = true | ||||||
| documentation_style = "doxy" | documentation_style = "doxy" | ||||||
| include_guard = "AUTOMERGE_C_H" | header = """ | ||||||
|  | /** \\file | ||||||
|  |  * All constants, functions and types in the Automerge library's C API. | ||||||
|  |  */ | ||||||
|  |  """ | ||||||
|  | include_guard = "AUTOMERGE_H" | ||||||
| includes = [] | includes = [] | ||||||
| language = "C" | language = "C" | ||||||
| line_length = 140 | line_length = 140 | ||||||
|  |  | ||||||
|  | @ -1,22 +0,0 @@ | ||||||
| [package] |  | ||||||
| name = "@PROJECT_NAME@" |  | ||||||
| version = "@PROJECT_VERSION@" |  | ||||||
| authors = ["Orion Henry <orion.henry@gmail.com>", "Jason Kankiewicz <jason.kankiewicz@gmail.com>"] |  | ||||||
| edition = "2021" |  | ||||||
| license = "MIT" |  | ||||||
| rust-version = "1.57.0" |  | ||||||
| 
 |  | ||||||
| [lib] |  | ||||||
| name = "@BINDINGS_NAME@" |  | ||||||
| crate-type = ["staticlib"] |  | ||||||
| bench = false |  | ||||||
| doc = false |  | ||||||
| 
 |  | ||||||
| [dependencies] |  | ||||||
| @LIBRARY_NAME@ = { path = "../@LIBRARY_NAME@" } |  | ||||||
| hex = "^0.4.3" |  | ||||||
| libc = "^0.2" |  | ||||||
| smol_str = "^0.1.21" |  | ||||||
| 
 |  | ||||||
| [build-dependencies] |  | ||||||
| cbindgen = "^0.24" |  | ||||||
|  | @ -1,48 +0,0 @@ | ||||||
| after_includes = """\n |  | ||||||
| /** |  | ||||||
|  * \\defgroup enumerations Public Enumerations |  | ||||||
|  *  Symbolic names for integer constants. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * \\memberof AMdoc |  | ||||||
|  * \\def AM_ROOT |  | ||||||
|  * \\brief The root object of a document. |  | ||||||
|  */ |  | ||||||
| #define AM_ROOT NULL |  | ||||||
| 
 |  | ||||||
| /** |  | ||||||
|  * \\memberof AMdoc |  | ||||||
|  * \\def AM_CHANGE_HASH_SIZE |  | ||||||
|  * \\brief The count of bytes in a change hash. |  | ||||||
|  */ |  | ||||||
| #define AM_CHANGE_HASH_SIZE 32 |  | ||||||
| """ |  | ||||||
| autogen_warning = """ |  | ||||||
| /** |  | ||||||
|  * \\file |  | ||||||
|  * \\brief All constants, functions and types in the core Automerge C API. |  | ||||||
|  * |  | ||||||
|  * \\warning This file is auto-generated by cbindgen. |  | ||||||
|  */ |  | ||||||
| """ |  | ||||||
| documentation = true |  | ||||||
| documentation_style = "doxy" |  | ||||||
| include_guard = "@INCLUDE_GUARD_PREFIX@_H" |  | ||||||
| includes = [] |  | ||||||
| language = "C" |  | ||||||
| line_length = 140 |  | ||||||
| no_includes = true |  | ||||||
| style = "both" |  | ||||||
| sys_includes = ["stdbool.h", "stddef.h", "stdint.h", "time.h"] |  | ||||||
| usize_is_size_t = true |  | ||||||
| 
 |  | ||||||
| [enum] |  | ||||||
| derive_const_casts = true |  | ||||||
| enum_class = true |  | ||||||
| must_use = "MUST_USE_ENUM" |  | ||||||
| prefix_with_name = true |  | ||||||
| rename_variants = "ScreamingSnakeCase" |  | ||||||
| 
 |  | ||||||
| [export] |  | ||||||
| item_types = ["constants", "enums", "functions", "opaque", "structs", "typedefs"] |  | ||||||
|  | @ -1,35 +1,14 @@ | ||||||
| #ifndef @INCLUDE_GUARD_PREFIX@_CONFIG_H | #ifndef @SYMBOL_PREFIX@_CONFIG_H | ||||||
| #define @INCLUDE_GUARD_PREFIX@_CONFIG_H | #define @SYMBOL_PREFIX@_CONFIG_H | ||||||
| /**
 | 
 | ||||||
|  * \file | /* This header is auto-generated by CMake. */ | ||||||
|  * \brief Configuration pararameters defined by the build system. |  | ||||||
|  * |  | ||||||
|  * \warning This file is auto-generated by CMake. |  | ||||||
|  */ |  | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * \def @SYMBOL_PREFIX@_VERSION |  | ||||||
|  * \brief Denotes a semantic version of the form {MAJOR}{MINOR}{PATCH} as three, |  | ||||||
|  *        two-digit decimal numbers without leading zeros (e.g. 100 is 0.1.0). |  | ||||||
|  */ |  | ||||||
| #define @SYMBOL_PREFIX@_VERSION @INTEGER_PROJECT_VERSION@ | #define @SYMBOL_PREFIX@_VERSION @INTEGER_PROJECT_VERSION@ | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * \def @SYMBOL_PREFIX@_MAJOR_VERSION |  | ||||||
|  * \brief Denotes a semantic major version as a decimal number. |  | ||||||
|  */ |  | ||||||
| #define @SYMBOL_PREFIX@_MAJOR_VERSION (@SYMBOL_PREFIX@_VERSION / 100000) | #define @SYMBOL_PREFIX@_MAJOR_VERSION (@SYMBOL_PREFIX@_VERSION / 100000) | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * \def @SYMBOL_PREFIX@_MINOR_VERSION |  | ||||||
|  * \brief Denotes a semantic minor version as a decimal number. |  | ||||||
|  */ |  | ||||||
| #define @SYMBOL_PREFIX@_MINOR_VERSION ((@SYMBOL_PREFIX@_VERSION / 100) % 1000) | #define @SYMBOL_PREFIX@_MINOR_VERSION ((@SYMBOL_PREFIX@_VERSION / 100) % 1000) | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * \def @SYMBOL_PREFIX@_PATCH_VERSION |  | ||||||
|  * \brief Denotes a semantic patch version as a decimal number. |  | ||||||
|  */ |  | ||||||
| #define @SYMBOL_PREFIX@_PATCH_VERSION (@SYMBOL_PREFIX@_VERSION % 100) | #define @SYMBOL_PREFIX@_PATCH_VERSION (@SYMBOL_PREFIX@_VERSION % 100) | ||||||
| 
 | 
 | ||||||
| #endif /* @INCLUDE_GUARD_PREFIX@_CONFIG_H */ | #endif  /* @SYMBOL_PREFIX@_CONFIG_H */ | ||||||
|  |  | ||||||
|  | @ -1,183 +0,0 @@ | ||||||
| # This CMake script is used to generate a header and a source file for utility |  | ||||||
| # functions that convert the tags of generated enum types into strings and |  | ||||||
| # strings into the tags of generated enum types. |  | ||||||
| cmake_minimum_required(VERSION 3.23 FATAL_ERROR) |  | ||||||
| 
 |  | ||||||
| # Seeks the starting line of the source enum's declaration. |  | ||||||
| macro(seek_enum_mode) |  | ||||||
|     if (line MATCHES "^(typedef[ \t]+)?enum ") |  | ||||||
|         string(REGEX REPLACE "^enum ([0-9a-zA-Z_]+).*$" "\\1" enum_name "${line}") |  | ||||||
|         set(mode "read_tags") |  | ||||||
|     endif() |  | ||||||
| endmacro() |  | ||||||
| 
 |  | ||||||
| # Scans the input for the current enum's tags. |  | ||||||
| macro(read_tags_mode) |  | ||||||
|     if(line MATCHES "^}") |  | ||||||
|         set(mode "generate") |  | ||||||
|     elseif(line MATCHES "^[A-Z0-9_]+.*$") |  | ||||||
|         string(REGEX REPLACE "^([A-Za-z0-9_]+).*$" "\\1" tmp "${line}") |  | ||||||
|         list(APPEND enum_tags "${tmp}") |  | ||||||
|     endif() |  | ||||||
| endmacro() |  | ||||||
| 
 |  | ||||||
| macro(write_header_file) |  | ||||||
|     # Generate a to-string function declaration. |  | ||||||
|     list(APPEND header_body |  | ||||||
|         "/**\n" |  | ||||||
|         " * \\ingroup enumerations\n" |  | ||||||
|         " * \\brief Gets the string representation of an `${enum_name}` enum tag.\n" |  | ||||||
|         " *\n" |  | ||||||
|         " * \\param[in] tag An `${enum_name}` enum tag.\n" |  | ||||||
|         " * \\return A null-terminated byte string.\n" |  | ||||||
|         " */\n" |  | ||||||
|         "char const* ${enum_name}ToString(${enum_name} const tag)\;\n" |  | ||||||
|         "\n") |  | ||||||
|     # Generate a from-string function declaration. |  | ||||||
|     list(APPEND header_body |  | ||||||
|         "/**\n" |  | ||||||
|         " * \\ingroup enumerations\n" |  | ||||||
|         " * \\brief Gets an `${enum_name}` enum tag from its string representation.\n" |  | ||||||
|         " *\n" |  | ||||||
|         " * \\param[out] dest An `${enum_name}` enum tag pointer.\n" |  | ||||||
|         " * \\param[in] src A null-terminated byte string.\n" |  | ||||||
|         " * \\return `true` if \\p src matches the string representation of an\n" |  | ||||||
|         " *         `${enum_name}` enum tag, `false` otherwise.\n" |  | ||||||
|         " */\n" |  | ||||||
|         "bool ${enum_name}FromString(${enum_name}* dest, char const* const src)\;\n" |  | ||||||
|         "\n") |  | ||||||
| endmacro() |  | ||||||
| 
 |  | ||||||
| macro(write_source_file) |  | ||||||
|     # Generate a to-string function implementation. |  | ||||||
|     list(APPEND source_body |  | ||||||
|         "char const* ${enum_name}ToString(${enum_name} const tag) {\n" |  | ||||||
|         "    switch (tag) {\n" |  | ||||||
|         "        default:\n" |  | ||||||
|         "            return \"???\"\;\n") |  | ||||||
|     foreach(label IN LISTS enum_tags) |  | ||||||
|         list(APPEND source_body |  | ||||||
|             "        case ${label}:\n" |  | ||||||
|             "            return \"${label}\"\;\n") |  | ||||||
|     endforeach() |  | ||||||
|     list(APPEND source_body |  | ||||||
|         "    }\n" |  | ||||||
|         "}\n" |  | ||||||
|         "\n") |  | ||||||
|     # Generate a from-string function implementation. |  | ||||||
|     list(APPEND source_body |  | ||||||
|         "bool ${enum_name}FromString(${enum_name}* dest, char const* const src) {\n") |  | ||||||
|     foreach(label IN LISTS enum_tags) |  | ||||||
|         list(APPEND source_body |  | ||||||
|             "    if (!strcmp(src, \"${label}\")) {\n" |  | ||||||
|             "        *dest = ${label}\;\n" |  | ||||||
|             "        return true\;\n" |  | ||||||
|             "    }\n") |  | ||||||
|     endforeach() |  | ||||||
|     list(APPEND source_body |  | ||||||
|         "    return false\;\n" |  | ||||||
|         "}\n" |  | ||||||
|         "\n") |  | ||||||
| endmacro() |  | ||||||
| 
 |  | ||||||
| function(main) |  | ||||||
|     set(header_body "") |  | ||||||
|     # File header and includes. |  | ||||||
|     list(APPEND header_body |  | ||||||
|         "#ifndef ${include_guard}\n" |  | ||||||
|         "#define ${include_guard}\n" |  | ||||||
|         "/**\n" |  | ||||||
|         " * \\file\n" |  | ||||||
|         " * \\brief Utility functions for converting enum tags into null-terminated\n" |  | ||||||
|         " *        byte strings and vice versa.\n" |  | ||||||
|         " *\n" |  | ||||||
|         " * \\warning This file is auto-generated by CMake.\n" |  | ||||||
|         " */\n" |  | ||||||
|         "\n" |  | ||||||
|         "#include <stdbool.h>\n" |  | ||||||
|         "\n" |  | ||||||
|         "#include <${library_include}>\n" |  | ||||||
|         "\n") |  | ||||||
|     set(source_body "") |  | ||||||
|     # File includes. |  | ||||||
|     list(APPEND source_body |  | ||||||
|         "/** \\warning This file is auto-generated by CMake. */\n" |  | ||||||
|         "\n" |  | ||||||
|         "#include \"stdio.h\"\n" |  | ||||||
|         "#include \"string.h\"\n" |  | ||||||
|         "\n" |  | ||||||
|         "#include <${header_include}>\n" |  | ||||||
|         "\n") |  | ||||||
|     set(enum_name "") |  | ||||||
|     set(enum_tags "") |  | ||||||
|     set(mode "seek_enum") |  | ||||||
|     file(STRINGS "${input_path}" lines) |  | ||||||
|     foreach(line IN LISTS lines) |  | ||||||
|         string(REGEX REPLACE "^(.+)(//.*)?" "\\1" line "${line}") |  | ||||||
|         string(STRIP "${line}" line) |  | ||||||
|         if(mode STREQUAL "seek_enum") |  | ||||||
|             seek_enum_mode() |  | ||||||
|         elseif(mode STREQUAL "read_tags") |  | ||||||
|             read_tags_mode() |  | ||||||
|         else() |  | ||||||
|             # The end of the enum declaration was reached. |  | ||||||
|             if(NOT enum_name) |  | ||||||
|                 # The end of the file was reached. |  | ||||||
|                 return() |  | ||||||
|             endif() |  | ||||||
|             if(NOT enum_tags) |  | ||||||
|                 message(FATAL_ERROR "No tags found for `${enum_name}`.") |  | ||||||
|             endif() |  | ||||||
|             string(TOLOWER "${enum_name}" output_stem_prefix) |  | ||||||
|             string(CONCAT output_stem "${output_stem_prefix}" "_string") |  | ||||||
|             cmake_path(REPLACE_EXTENSION output_stem "h" OUTPUT_VARIABLE output_header_basename) |  | ||||||
|             write_header_file() |  | ||||||
|             write_source_file() |  | ||||||
|             set(enum_name "") |  | ||||||
|             set(enum_tags "") |  | ||||||
|             set(mode "seek_enum") |  | ||||||
|         endif() |  | ||||||
|     endforeach() |  | ||||||
|     # File footer. |  | ||||||
|     list(APPEND header_body |  | ||||||
|         "#endif /* ${include_guard} */\n") |  | ||||||
|     message(STATUS "Generating header file \"${output_header_path}\"...") |  | ||||||
|     file(WRITE "${output_header_path}" ${header_body}) |  | ||||||
|     message(STATUS "Generating source file \"${output_source_path}\"...") |  | ||||||
|     file(WRITE "${output_source_path}" ${source_body}) |  | ||||||
| endfunction() |  | ||||||
| 
 |  | ||||||
| if(NOT DEFINED PROJECT_NAME) |  | ||||||
|     message(FATAL_ERROR "Variable PROJECT_NAME is not defined.") |  | ||||||
| elseif(NOT DEFINED LIBRARY_NAME) |  | ||||||
|     message(FATAL_ERROR "Variable LIBRARY_NAME is not defined.") |  | ||||||
| elseif(NOT DEFINED SUBDIR) |  | ||||||
|     message(FATAL_ERROR "Variable SUBDIR is not defined.") |  | ||||||
| elseif(${CMAKE_ARGC} LESS 9) |  | ||||||
|     message(FATAL_ERROR "Too few arguments.") |  | ||||||
| elseif(${CMAKE_ARGC} GREATER 10) |  | ||||||
|     message(FATAL_ERROR "Too many arguments.") |  | ||||||
| elseif(NOT EXISTS ${CMAKE_ARGV5}) |  | ||||||
|     message(FATAL_ERROR "Input header \"${CMAKE_ARGV7}\" not found.") |  | ||||||
| endif() |  | ||||||
| cmake_path(CONVERT "${CMAKE_ARGV7}" TO_CMAKE_PATH_LIST input_path NORMALIZE) |  | ||||||
| cmake_path(CONVERT "${CMAKE_ARGV8}" TO_CMAKE_PATH_LIST output_header_path NORMALIZE) |  | ||||||
| cmake_path(CONVERT "${CMAKE_ARGV9}" TO_CMAKE_PATH_LIST output_source_path NORMALIZE) |  | ||||||
| string(TOLOWER "${PROJECT_NAME}" project_root) |  | ||||||
| cmake_path(CONVERT "${SUBDIR}" TO_CMAKE_PATH_LIST project_subdir NORMALIZE) |  | ||||||
| string(TOLOWER "${project_subdir}" project_subdir) |  | ||||||
| string(TOLOWER "${LIBRARY_NAME}" library_stem) |  | ||||||
| cmake_path(REPLACE_EXTENSION library_stem "h" OUTPUT_VARIABLE library_basename) |  | ||||||
| string(JOIN "/" library_include "${project_root}" "${library_basename}") |  | ||||||
| string(TOUPPER "${PROJECT_NAME}" project_name_upper) |  | ||||||
| string(TOUPPER "${project_subdir}" include_guard_infix) |  | ||||||
| string(REGEX REPLACE "/" "_" include_guard_infix "${include_guard_infix}") |  | ||||||
| string(REGEX REPLACE "-" "_" include_guard_prefix "${project_name_upper}") |  | ||||||
| string(JOIN "_" include_guard_prefix  "${include_guard_prefix}" "${include_guard_infix}") |  | ||||||
| string(JOIN "/" output_header_prefix "${project_root}" "${project_subdir}") |  | ||||||
| cmake_path(GET output_header_path STEM output_header_stem) |  | ||||||
| string(TOUPPER "${output_header_stem}" include_guard_stem) |  | ||||||
| string(JOIN "_" include_guard "${include_guard_prefix}" "${include_guard_stem}" "H") |  | ||||||
| cmake_path(GET output_header_path FILENAME output_header_basename) |  | ||||||
| string(JOIN "/" header_include "${output_header_prefix}" "${output_header_basename}") |  | ||||||
| main() |  | ||||||
|  | @ -1,6 +1,4 @@ | ||||||
| # This CMake script is used to perform string substitutions within a generated | cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||||||
| # file. |  | ||||||
| cmake_minimum_required(VERSION 3.23 FATAL_ERROR) |  | ||||||
| 
 | 
 | ||||||
| if(NOT DEFINED MATCH_REGEX) | if(NOT DEFINED MATCH_REGEX) | ||||||
|     message(FATAL_ERROR "Variable \"MATCH_REGEX\" is not defined.") |     message(FATAL_ERROR "Variable \"MATCH_REGEX\" is not defined.") | ||||||
|  | @ -1,6 +1,4 @@ | ||||||
| # This CMake script is used to force Cargo to regenerate the header file for the | cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||||||
| # core bindings after the out-of-source build directory has been cleaned. |  | ||||||
| cmake_minimum_required(VERSION 3.23 FATAL_ERROR) |  | ||||||
| 
 | 
 | ||||||
| if(NOT DEFINED CONDITION) | if(NOT DEFINED CONDITION) | ||||||
|     message(FATAL_ERROR "Variable \"CONDITION\" is not defined.") |     message(FATAL_ERROR "Variable \"CONDITION\" is not defined.") | ||||||
|  | @ -1,35 +0,0 @@ | ||||||
| find_package(Doxygen OPTIONAL_COMPONENTS dot) |  | ||||||
| 
 |  | ||||||
| if(DOXYGEN_FOUND) |  | ||||||
|     set(DOXYGEN_ALIASES "installed_headerfile=\\headerfile ${LIBRARY_NAME}.h <${PROJECT_NAME}/${LIBRARY_NAME}.h>") |  | ||||||
| 
 |  | ||||||
|     set(DOXYGEN_GENERATE_LATEX YES) |  | ||||||
| 
 |  | ||||||
|     set(DOXYGEN_PDF_HYPERLINKS YES) |  | ||||||
| 
 |  | ||||||
|     set(DOXYGEN_PROJECT_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/img/brandmark.png") |  | ||||||
| 
 |  | ||||||
|     set(DOXYGEN_SORT_BRIEF_DOCS YES) |  | ||||||
| 
 |  | ||||||
|     set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "${CMAKE_SOURCE_DIR}/README.md") |  | ||||||
| 
 |  | ||||||
|     doxygen_add_docs( |  | ||||||
|         ${LIBRARY_NAME}_docs |  | ||||||
|         "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" |  | ||||||
|         "${CBINDGEN_TARGET_DIR}/config.h" |  | ||||||
|         "${CBINDGEN_TARGET_DIR}/${UTILS_SUBDIR}/enum_string.h" |  | ||||||
|         "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/result.h" |  | ||||||
|         "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack_callback_data.h" |  | ||||||
|         "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/stack.h" |  | ||||||
|         "${CMAKE_SOURCE_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/${UTILS_SUBDIR}/string.h" |  | ||||||
|         "${CMAKE_SOURCE_DIR}/README.md" |  | ||||||
|         WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} |  | ||||||
|         COMMENT "Producing documentation with Doxygen..." |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     # \note A Doxygen input file isn't a file-level dependency so the Doxygen |  | ||||||
|     #       command must instead depend upon a target that either outputs the |  | ||||||
|     #       file or depends upon it also or it will just output an error message |  | ||||||
|     #       when it can't be found. |  | ||||||
|     add_dependencies(${LIBRARY_NAME}_docs ${BINDINGS_NAME}_artifacts ${LIBRARY_NAME}_utilities) |  | ||||||
| endif() |  | ||||||
|  | @ -1,39 +1,41 @@ | ||||||
|  | cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||||||
|  | 
 | ||||||
| add_executable( | add_executable( | ||||||
|     ${LIBRARY_NAME}_quickstart |     example_quickstart | ||||||
|         quickstart.c |         quickstart.c | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| set_target_properties(${LIBRARY_NAME}_quickstart PROPERTIES LINKER_LANGUAGE C) | set_target_properties(example_quickstart PROPERTIES LINKER_LANGUAGE C) | ||||||
| 
 | 
 | ||||||
| # \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't | # \note An imported library's INTERFACE_INCLUDE_DIRECTORIES property can't | ||||||
| #       contain a non-existent path so its build-time include directory | #       contain a non-existent path so its build-time include directory | ||||||
| #       must be specified for all of its dependent targets instead. | #       must be specified for all of its dependent targets instead. | ||||||
| target_include_directories( | target_include_directories( | ||||||
|     ${LIBRARY_NAME}_quickstart |     example_quickstart | ||||||
|     PRIVATE "$<BUILD_INTERFACE:${CBINDGEN_INCLUDEDIR}>" |     PRIVATE "$<BUILD_INTERFACE:${CBINDGEN_INCLUDEDIR}>" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| target_link_libraries(${LIBRARY_NAME}_quickstart PRIVATE ${LIBRARY_NAME}) | target_link_libraries(example_quickstart PRIVATE ${LIBRARY_NAME}) | ||||||
| 
 | 
 | ||||||
| add_dependencies(${LIBRARY_NAME}_quickstart ${BINDINGS_NAME}_artifacts) | add_dependencies(example_quickstart ${LIBRARY_NAME}_artifacts) | ||||||
| 
 | 
 | ||||||
| if(BUILD_SHARED_LIBS AND WIN32) | if(BUILD_SHARED_LIBS AND WIN32) | ||||||
|     add_custom_command( |     add_custom_command( | ||||||
|         TARGET ${LIBRARY_NAME}_quickstart |         TARGET example_quickstart | ||||||
|         POST_BUILD |         POST_BUILD | ||||||
|         COMMAND ${CMAKE_COMMAND} -E copy_if_different |         COMMAND ${CMAKE_COMMAND} -E copy_if_different | ||||||
|                 ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX} |                 ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX} | ||||||
|                 ${CMAKE_BINARY_DIR} |                 ${CMAKE_CURRENT_BINARY_DIR} | ||||||
|         COMMENT "Copying the DLL built by Cargo into the examples directory..." |         COMMENT "Copying the DLL built by Cargo into the examples directory..." | ||||||
|         VERBATIM |         VERBATIM | ||||||
|     ) |     ) | ||||||
| endif() | endif() | ||||||
| 
 | 
 | ||||||
| add_custom_command( | add_custom_command( | ||||||
|     TARGET ${LIBRARY_NAME}_quickstart |     TARGET example_quickstart | ||||||
|     POST_BUILD |     POST_BUILD | ||||||
|     COMMAND |     COMMAND | ||||||
|         ${LIBRARY_NAME}_quickstart |         example_quickstart | ||||||
|     COMMENT |     COMMENT | ||||||
|         "Running the example quickstart..." |         "Running the example quickstart..." | ||||||
|     VERBATIM |     VERBATIM | ||||||
|  |  | ||||||
|  | @ -5,5 +5,5 @@ | ||||||
| ```shell | ```shell | ||||||
| cmake -E make_directory automerge-c/build | cmake -E make_directory automerge-c/build | ||||||
| cmake -S automerge-c -B automerge-c/build | cmake -S automerge-c -B automerge-c/build | ||||||
| cmake --build automerge-c/build --target automerge_quickstart | cmake --build automerge-c/build --target example_quickstart | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
|  | @ -3,127 +3,152 @@ | ||||||
| #include <string.h> | #include <string.h> | ||||||
| 
 | 
 | ||||||
| #include <automerge-c/automerge.h> | #include <automerge-c/automerge.h> | ||||||
| #include <automerge-c/utils/enum_string.h> |  | ||||||
| #include <automerge-c/utils/stack.h> |  | ||||||
| #include <automerge-c/utils/stack_callback_data.h> |  | ||||||
| #include <automerge-c/utils/string.h> |  | ||||||
| 
 | 
 | ||||||
| static bool abort_cb(AMstack**, void*); | static void abort_cb(AMresultStack**, uint8_t); | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * \brief Based on https://automerge.github.io/docs/quickstart
 |  * \brief Based on https://automerge.github.io/docs/quickstart
 | ||||||
|  */ |  */ | ||||||
| int main(int argc, char** argv) { | int main(int argc, char** argv) { | ||||||
|     AMstack* stack = NULL; |     AMresultStack* stack = NULL; | ||||||
|     AMdoc* doc1; |     AMdoc* const doc1 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, abort_cb).doc; | ||||||
|     AMitemToDoc(AMstackItem(&stack, AMcreate(NULL), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc1); |     AMobjId const* const cards = AMpush(&stack, | ||||||
|     AMobjId const* const cards = |                                         AMmapPutObject(doc1, AM_ROOT, AMstr("cards"), AM_OBJ_TYPE_LIST), | ||||||
|         AMitemObjId(AMstackItem(&stack, AMmapPutObject(doc1, AM_ROOT, AMstr("cards"), AM_OBJ_TYPE_LIST), abort_cb, |                                         AM_VALUE_OBJ_ID, | ||||||
|                                 AMexpect(AM_VAL_TYPE_OBJ_TYPE))); |                                         abort_cb).obj_id; | ||||||
|     AMobjId const* const card1 = |     AMobjId const* const card1 = AMpush(&stack, | ||||||
|         AMitemObjId(AMstackItem(&stack, AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), abort_cb, |                                         AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), | ||||||
|                                 AMexpect(AM_VAL_TYPE_OBJ_TYPE))); |                                         AM_VALUE_OBJ_ID, | ||||||
|     AMstackItem(NULL, AMmapPutStr(doc1, card1, AMstr("title"), AMstr("Rewrite everything in Clojure")), abort_cb, |                                         abort_cb).obj_id; | ||||||
|                 AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMmapPutStr(doc1, card1, AMstr("title"), AMstr("Rewrite everything in Clojure"))); | ||||||
|     AMstackItem(NULL, AMmapPutBool(doc1, card1, AMstr("done"), false), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMmapPutBool(doc1, card1, AMstr("done"), false)); | ||||||
|     AMobjId const* const card2 = |     AMobjId const* const card2 = AMpush(&stack, | ||||||
|         AMitemObjId(AMstackItem(&stack, AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), abort_cb, |                                         AMlistPutObject(doc1, cards, SIZE_MAX, true, AM_OBJ_TYPE_MAP), | ||||||
|                                 AMexpect(AM_VAL_TYPE_OBJ_TYPE))); |                                         AM_VALUE_OBJ_ID, | ||||||
|     AMstackItem(NULL, AMmapPutStr(doc1, card2, AMstr("title"), AMstr("Rewrite everything in Haskell")), abort_cb, |                                         abort_cb).obj_id; | ||||||
|                 AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMmapPutStr(doc1, card2, AMstr("title"), AMstr("Rewrite everything in Haskell"))); | ||||||
|     AMstackItem(NULL, AMmapPutBool(doc1, card2, AMstr("done"), false), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMmapPutBool(doc1, card2, AMstr("done"), false)); | ||||||
|     AMstackItem(NULL, AMcommit(doc1, AMstr("Add card"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |     AMfree(AMcommit(doc1, AMstr("Add card"), NULL)); | ||||||
| 
 | 
 | ||||||
|     AMdoc* doc2; |     AMdoc* doc2 = AMpush(&stack, AMcreate(NULL), AM_VALUE_DOC, abort_cb).doc; | ||||||
|     AMitemToDoc(AMstackItem(&stack, AMcreate(NULL), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2); |     AMfree(AMmerge(doc2, doc1)); | ||||||
|     AMstackItem(NULL, AMmerge(doc2, doc1), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |  | ||||||
| 
 | 
 | ||||||
|     AMbyteSpan binary; |     AMbyteSpan const binary = AMpush(&stack, AMsave(doc1), AM_VALUE_BYTES, abort_cb).bytes; | ||||||
|     AMitemToBytes(AMstackItem(&stack, AMsave(doc1), abort_cb, AMexpect(AM_VAL_TYPE_BYTES)), &binary); |     doc2 = AMpush(&stack, AMload(binary.src, binary.count), AM_VALUE_DOC, abort_cb).doc; | ||||||
|     AMitemToDoc(AMstackItem(&stack, AMload(binary.src, binary.count), abort_cb, AMexpect(AM_VAL_TYPE_DOC)), &doc2); |  | ||||||
| 
 | 
 | ||||||
|     AMstackItem(NULL, AMmapPutBool(doc1, card1, AMstr("done"), true), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMmapPutBool(doc1, card1, AMstr("done"), true)); | ||||||
|     AMstackItem(NULL, AMcommit(doc1, AMstr("Mark card as done"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |     AMfree(AMcommit(doc1, AMstr("Mark card as done"), NULL)); | ||||||
| 
 | 
 | ||||||
|     AMstackItem(NULL, AMlistDelete(doc2, cards, 0), abort_cb, AMexpect(AM_VAL_TYPE_VOID)); |     AMfree(AMlistDelete(doc2, cards, 0)); | ||||||
|     AMstackItem(NULL, AMcommit(doc2, AMstr("Delete card"), NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |     AMfree(AMcommit(doc2, AMstr("Delete card"), NULL)); | ||||||
| 
 | 
 | ||||||
|     AMstackItem(NULL, AMmerge(doc1, doc2), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |     AMfree(AMmerge(doc1, doc2)); | ||||||
| 
 | 
 | ||||||
|     AMitems changes = AMstackItems(&stack, AMgetChanges(doc1, NULL), abort_cb, AMexpect(AM_VAL_TYPE_CHANGE)); |     AMchanges changes = AMpush(&stack, AMgetChanges(doc1, NULL), AM_VALUE_CHANGES, abort_cb).changes; | ||||||
|     AMitem* item = NULL; |     AMchange const* change = NULL; | ||||||
|     while ((item = AMitemsNext(&changes, 1)) != NULL) { |     while ((change = AMchangesNext(&changes, 1)) != NULL) { | ||||||
|         AMchange const* change; |         AMbyteSpan const change_hash = AMchangeHash(change); | ||||||
|         AMitemToChange(item, &change); |         AMchangeHashes const heads = AMpush(&stack, | ||||||
|         AMitems const heads = AMstackItems(&stack, AMitemFromChangeHash(AMchangeHash(change)), abort_cb, |                                             AMchangeHashesInit(&change_hash, 1), | ||||||
|                                            AMexpect(AM_VAL_TYPE_CHANGE_HASH)); |                                             AM_VALUE_CHANGE_HASHES, | ||||||
|         char* const c_msg = AMstrdup(AMchangeMessage(change), NULL); |                                             abort_cb).change_hashes; | ||||||
|         printf("%s %zu\n", c_msg, AMobjSize(doc1, cards, &heads)); |         AMbyteSpan const msg = AMchangeMessage(change); | ||||||
|  |         char* const c_msg = calloc(1, msg.count + 1); | ||||||
|  |         strncpy(c_msg, msg.src, msg.count); | ||||||
|  |         printf("%s %ld\n", c_msg, AMobjSize(doc1, cards, &heads)); | ||||||
|         free(c_msg); |         free(c_msg); | ||||||
|     } |     } | ||||||
|     AMstackFree(&stack); |     AMfreeStack(&stack); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static char const* discriminant_suffix(AMvalueVariant const); | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * \brief Examines the result at the top of the given stack and, if it's |  * \brief Prints an error message to `stderr`, deallocates all results in the | ||||||
|  *        invalid, prints an error message to `stderr`, deallocates all results |  *        given stack and exits. | ||||||
|  *        in the stack and exits. |  | ||||||
|  * |  * | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  * \param[in,out] stack A pointer to a pointer to an `AMresultStack` struct. | ||||||
|  * \param[in] data A pointer to an owned `AMstackCallbackData` struct or `NULL`. |  * \param[in] discriminant An `AMvalueVariant` enum tag. | ||||||
|  * \return `true` if the top `AMresult` in \p stack is valid, `false` otherwise. |  | ||||||
|  * \pre \p stack` != NULL`. |  * \pre \p stack` != NULL`. | ||||||
|  |  * \post `*stack == NULL`. | ||||||
|  */ |  */ | ||||||
| static bool abort_cb(AMstack** stack, void* data) { | static void abort_cb(AMresultStack** stack, uint8_t discriminant) { | ||||||
|     static char buffer[512] = {0}; |     static char buffer[512] = {0}; | ||||||
| 
 | 
 | ||||||
|     char const* suffix = NULL; |     char const* suffix = NULL; | ||||||
|     if (!stack) { |     if (!stack) { | ||||||
|         suffix = "Stack*"; |         suffix = "Stack*"; | ||||||
|     } else if (!*stack) { |     } | ||||||
|  |     else if (!*stack) { | ||||||
|         suffix = "Stack"; |         suffix = "Stack"; | ||||||
|     } else if (!(*stack)->result) { |     } | ||||||
|  |     else if (!(*stack)->result) { | ||||||
|         suffix = ""; |         suffix = ""; | ||||||
|     } |     } | ||||||
|     if (suffix) { |     if (suffix) { | ||||||
|         fprintf(stderr, "Null `AMresult%s*`.\n", suffix); |         fprintf(stderr, "Null `AMresult%s*`.", suffix); | ||||||
|         AMstackFree(stack); |         AMfreeStack(stack); | ||||||
|         exit(EXIT_FAILURE); |         exit(EXIT_FAILURE); | ||||||
|         return false; |         return; | ||||||
|     } |     } | ||||||
|     AMstatus const status = AMresultStatus((*stack)->result); |     AMstatus const status = AMresultStatus((*stack)->result); | ||||||
|     switch (status) { |     switch (status) { | ||||||
|         case AM_STATUS_ERROR: |         case AM_STATUS_ERROR:          strcpy(buffer, "Error");          break; | ||||||
|             strcpy(buffer, "Error"); |         case AM_STATUS_INVALID_RESULT: strcpy(buffer, "Invalid result"); break; | ||||||
|             break; |         case AM_STATUS_OK:                                               break; | ||||||
|         case AM_STATUS_INVALID_RESULT: |         default: sprintf(buffer, "Unknown `AMstatus` tag %d", status); | ||||||
|             strcpy(buffer, "Invalid result"); |  | ||||||
|             break; |  | ||||||
|         case AM_STATUS_OK: |  | ||||||
|             break; |  | ||||||
|         default: |  | ||||||
|             sprintf(buffer, "Unknown `AMstatus` tag %d", status); |  | ||||||
|     } |     } | ||||||
|     if (buffer[0]) { |     if (buffer[0]) { | ||||||
|         char* const c_msg = AMstrdup(AMresultError((*stack)->result), NULL); |         AMbyteSpan const msg = AMerrorMessage((*stack)->result); | ||||||
|         fprintf(stderr, "%s; %s.\n", buffer, c_msg); |         char* const c_msg = calloc(1, msg.count + 1); | ||||||
|  |         strncpy(c_msg, msg.src, msg.count); | ||||||
|  |         fprintf(stderr, "%s; %s.", buffer, c_msg); | ||||||
|         free(c_msg); |         free(c_msg); | ||||||
|         AMstackFree(stack); |         AMfreeStack(stack); | ||||||
|         exit(EXIT_FAILURE); |         exit(EXIT_FAILURE); | ||||||
|         return false; |         return; | ||||||
|     } |     } | ||||||
|     if (data) { |     AMvalue const value = AMresultValue((*stack)->result); | ||||||
|         AMstackCallbackData* sc_data = (AMstackCallbackData*)data; |     fprintf(stderr, "Unexpected tag `AM_VALUE_%s` (%d); expected `AM_VALUE_%s`.", | ||||||
|         AMvalType const tag = AMitemValType(AMresultItem((*stack)->result)); |         discriminant_suffix(value.tag), | ||||||
|         if (tag != sc_data->bitmask) { |         value.tag, | ||||||
|             fprintf(stderr, "Unexpected tag `%s` (%d) instead of `%s` at %s:%d.\n", AMvalTypeToString(tag), tag, |         discriminant_suffix(discriminant)); | ||||||
|                     AMvalTypeToString(sc_data->bitmask), sc_data->file, sc_data->line); |     AMfreeStack(stack); | ||||||
|             free(sc_data); |  | ||||||
|             AMstackFree(stack); |  | ||||||
|     exit(EXIT_FAILURE); |     exit(EXIT_FAILURE); | ||||||
|             return false; |  | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * \brief Gets the suffix for a discriminant's corresponding string | ||||||
|  |  *        representation. | ||||||
|  |  * | ||||||
|  |  * \param[in] discriminant An `AMvalueVariant` enum tag. | ||||||
|  |  * \return A UTF-8 string. | ||||||
|  |  */ | ||||||
|  | static char const* discriminant_suffix(AMvalueVariant const discriminant) { | ||||||
|  |     char const* suffix = NULL; | ||||||
|  |     switch (discriminant) { | ||||||
|  |         case AM_VALUE_ACTOR_ID:      suffix = "ACTOR_ID";      break; | ||||||
|  |         case AM_VALUE_BOOLEAN:       suffix = "BOOLEAN";       break; | ||||||
|  |         case AM_VALUE_BYTES:         suffix = "BYTES";         break; | ||||||
|  |         case AM_VALUE_CHANGE_HASHES: suffix = "CHANGE_HASHES"; break; | ||||||
|  |         case AM_VALUE_CHANGES:       suffix = "CHANGES";       break; | ||||||
|  |         case AM_VALUE_COUNTER:       suffix = "COUNTER";       break; | ||||||
|  |         case AM_VALUE_DOC:           suffix = "DOC";           break; | ||||||
|  |         case AM_VALUE_F64:           suffix = "F64";           break; | ||||||
|  |         case AM_VALUE_INT:           suffix = "INT";           break; | ||||||
|  |         case AM_VALUE_LIST_ITEMS:    suffix = "LIST_ITEMS";    break; | ||||||
|  |         case AM_VALUE_MAP_ITEMS:     suffix = "MAP_ITEMS";     break; | ||||||
|  |         case AM_VALUE_NULL:          suffix = "NULL";          break; | ||||||
|  |         case AM_VALUE_OBJ_ID:        suffix = "OBJ_ID";        break; | ||||||
|  |         case AM_VALUE_OBJ_ITEMS:     suffix = "OBJ_ITEMS";     break; | ||||||
|  |         case AM_VALUE_STR:           suffix = "STR";           break; | ||||||
|  |         case AM_VALUE_STRS:          suffix = "STRINGS";       break; | ||||||
|  |         case AM_VALUE_SYNC_MESSAGE:  suffix = "SYNC_MESSAGE";  break; | ||||||
|  |         case AM_VALUE_SYNC_STATE:    suffix = "SYNC_STATE";    break; | ||||||
|  |         case AM_VALUE_TIMESTAMP:     suffix = "TIMESTAMP";     break; | ||||||
|  |         case AM_VALUE_UINT:          suffix = "UINT";          break; | ||||||
|  |         case AM_VALUE_VOID:          suffix = "VOID";          break; | ||||||
|  |         default:                     suffix = "..."; | ||||||
|     } |     } | ||||||
|     free(data); |     return suffix; | ||||||
|     return true; |  | ||||||
| } | } | ||||||
|  |  | ||||||
| Before Width: | Height: | Size: 1.4 KiB After Width: | Height: | Size: 1.4 KiB | 
|  | @ -1,30 +0,0 @@ | ||||||
| #ifndef AUTOMERGE_C_UTILS_RESULT_H |  | ||||||
| #define AUTOMERGE_C_UTILS_RESULT_H |  | ||||||
| /**
 |  | ||||||
|  * \file |  | ||||||
|  * \brief Utility functions for use with `AMresult` structs. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| #include <stdarg.h> |  | ||||||
| 
 |  | ||||||
| #include <automerge-c/automerge.h> |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \brief Transfers the items within an arbitrary list of results into a |  | ||||||
|  *        new result in their order of specification. |  | ||||||
|  * \param[in] count The count of subsequent arguments. |  | ||||||
|  * \param[in] ... A \p count list of arguments, each of which is a pointer to |  | ||||||
|  *                an `AMresult` struct whose items will be transferred out of it |  | ||||||
|  *                and which is subsequently freed. |  | ||||||
|  * \return A pointer to an `AMresult` struct or `NULL`. |  | ||||||
|  * \pre `∀𝑥 ∈` \p ... `, AMresultStatus(𝑥) == AM_STATUS_OK` |  | ||||||
|  * \post `(∃𝑥 ∈` \p ... `, AMresultStatus(𝑥) != AM_STATUS_OK) -> NULL` |  | ||||||
|  * \attention All `AMresult` struct pointer arguments are passed to |  | ||||||
|  *            `AMresultFree()` regardless of success; use `AMresultCat()` |  | ||||||
|  *            instead if you wish to pass them to `AMresultFree()` yourself. |  | ||||||
|  * \warning The returned `AMresult` struct pointer must be passed to |  | ||||||
|  *          `AMresultFree()` in order to avoid a memory leak. |  | ||||||
|  */ |  | ||||||
| AMresult* AMresultFrom(int count, ...); |  | ||||||
| 
 |  | ||||||
| #endif /* AUTOMERGE_C_UTILS_RESULT_H */ |  | ||||||
|  | @ -1,130 +0,0 @@ | ||||||
| #ifndef AUTOMERGE_C_UTILS_STACK_H |  | ||||||
| #define AUTOMERGE_C_UTILS_STACK_H |  | ||||||
| /**
 |  | ||||||
|  * \file |  | ||||||
|  * \brief Utility data structures and functions for hiding `AMresult` structs, |  | ||||||
|  *        managing their lifetimes, and automatically applying custom |  | ||||||
|  *        validation logic to the `AMitem` structs that they contain. |  | ||||||
|  * |  | ||||||
|  * \note The `AMstack` struct and its related functions drastically reduce the |  | ||||||
|  *       need for boilerplate code and/or `goto` statement usage within a C |  | ||||||
|  *       application but a higher-level programming language offers even better |  | ||||||
|  *       ways to do the same things. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| #include <automerge-c/automerge.h> |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \struct AMstack |  | ||||||
|  * \brief A node in a singly-linked list of result pointers. |  | ||||||
|  */ |  | ||||||
| typedef struct AMstack { |  | ||||||
|     /** A result to be deallocated. */ |  | ||||||
|     AMresult* result; |  | ||||||
|     /** The previous node in the singly-linked list or `NULL`. */  |  | ||||||
|     struct AMstack* prev; |  | ||||||
| } AMstack; |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief The prototype of a function that examines the result at the top of |  | ||||||
|  *        the given stack in terms of some arbitrary data. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \param[in] data A pointer to arbitrary data or `NULL`. |  | ||||||
|  * \return `true` if the top `AMresult` struct in \p stack is valid, `false` |  | ||||||
|  *         otherwise. |  | ||||||
|  * \pre \p stack `!= NULL`. |  | ||||||
|  */ |  | ||||||
| typedef bool (*AMstackCallback)(AMstack** stack, void* data); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Deallocates the storage for a stack of results. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \pre \p stack `!= NULL` |  | ||||||
|  * \post `*stack == NULL` |  | ||||||
|  */ |  | ||||||
| void AMstackFree(AMstack** stack); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Gets a result from the stack after removing it. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \param[in] result A pointer to the `AMresult` to be popped or `NULL` to |  | ||||||
|  *                   select the top result in \p stack. |  | ||||||
|  * \return A pointer to an `AMresult` struct or `NULL`. |  | ||||||
|  * \pre \p stack `!= NULL` |  | ||||||
|  * \warning The returned `AMresult` struct pointer must be passed to |  | ||||||
|  *          `AMresultFree()` in order to avoid a memory leak. |  | ||||||
|  */ |  | ||||||
| AMresult* AMstackPop(AMstack** stack, AMresult const* result); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Pushes the given result onto the given stack, calls the given |  | ||||||
|  *        callback with the given data to validate it and then either gets the |  | ||||||
|  *        result if it's valid or gets `NULL` instead. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \param[in] result A pointer to an `AMresult` struct. |  | ||||||
|  * \param[in] callback A pointer to a function with the same signature as |  | ||||||
|  *                     `AMstackCallback()` or `NULL`. |  | ||||||
|  * \param[in] data A pointer to arbitrary data or `NULL` which is passed to |  | ||||||
|  *                 \p callback. |  | ||||||
|  * \return \p result or `NULL`. |  | ||||||
|  * \warning If \p stack `== NULL` then \p result is deallocated in order to |  | ||||||
|  *          avoid a memory leak. |  | ||||||
|  */ |  | ||||||
| AMresult* AMstackResult(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Pushes the given result onto the given stack, calls the given |  | ||||||
|  *        callback with the given data to validate it and then either gets the |  | ||||||
|  *        first item in the sequence of items within that result if it's valid |  | ||||||
|  *        or gets `NULL` instead. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \param[in] result A pointer to an `AMresult` struct. |  | ||||||
|  * \param[in] callback A pointer to a function with the same signature as |  | ||||||
|  *                     `AMstackCallback()` or `NULL`. |  | ||||||
|  * \param[in] data A pointer to arbitrary data or `NULL` which is passed to |  | ||||||
|  *                 \p callback. |  | ||||||
|  * \return A pointer to an `AMitem` struct or `NULL`. |  | ||||||
|  * \warning If \p stack `== NULL` then \p result is deallocated in order to |  | ||||||
|  *          avoid a memory leak. |  | ||||||
|  */ |  | ||||||
| AMitem* AMstackItem(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Pushes the given result onto the given stack, calls the given |  | ||||||
|  *        callback with the given data to validate it and then either gets an |  | ||||||
|  *        `AMitems` struct over the sequence of items within that result if it's |  | ||||||
|  *        valid or gets an empty `AMitems` instead. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to a pointer to an `AMstack` struct. |  | ||||||
|  * \param[in] result A pointer to an `AMresult` struct. |  | ||||||
|  * \param[in] callback A pointer to a function with the same signature as |  | ||||||
|  *                     `AMstackCallback()` or `NULL`. |  | ||||||
|  * \param[in] data A pointer to arbitrary data or `NULL` which is passed to |  | ||||||
|  *                 \p callback. |  | ||||||
|  * \return An `AMitems` struct. |  | ||||||
|  * \warning If \p stack `== NULL` then \p result is deallocated immediately |  | ||||||
|  *          in order to avoid a memory leak. |  | ||||||
|  */ |  | ||||||
| AMitems AMstackItems(AMstack** stack, AMresult* result, AMstackCallback callback, void* data); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstack |  | ||||||
|  * \brief Gets the count of results that have been pushed onto the stack. |  | ||||||
|  * |  | ||||||
|  * \param[in,out] stack A pointer to an `AMstack` struct. |  | ||||||
|  * \return A 64-bit unsigned integer. |  | ||||||
|  */ |  | ||||||
| size_t AMstackSize(AMstack const* const stack); |  | ||||||
| 
 |  | ||||||
| #endif /* AUTOMERGE_C_UTILS_STACK_H */ |  | ||||||
|  | @ -1,53 +0,0 @@ | ||||||
| #ifndef AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H |  | ||||||
| #define AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H |  | ||||||
| /**
 |  | ||||||
|  * \file |  | ||||||
|  * \brief Utility data structures, functions and macros for supplying |  | ||||||
|  *        parameters to the custom validation logic applied to `AMitem` |  | ||||||
|  *        structs. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| #include <automerge-c/automerge.h> |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \struct AMstackCallbackData |  | ||||||
|  * \brief  A data structure for passing the parameters of an item value test |  | ||||||
|  *         to an implementation of the `AMstackCallback` function prototype. |  | ||||||
|  */ |  | ||||||
| typedef struct { |  | ||||||
|     /** A bitmask of `AMvalType` tags. */ |  | ||||||
|     AMvalType bitmask; |  | ||||||
|     /** A null-terminated file path string. */ |  | ||||||
|     char const* file; |  | ||||||
|     /** The ordinal number of a line within a file. */ |  | ||||||
|     int line; |  | ||||||
| } AMstackCallbackData; |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstackCallbackData |  | ||||||
|  * \brief Allocates a new `AMstackCallbackData` struct and initializes its |  | ||||||
|  *        members from their corresponding arguments. |  | ||||||
|  * |  | ||||||
|  * \param[in] bitmask A bitmask of `AMvalType` tags. |  | ||||||
|  * \param[in] file A null-terminated file path string. |  | ||||||
|  * \param[in] line The ordinal number of a line within a file. |  | ||||||
|  * \return A pointer to a disowned `AMstackCallbackData` struct. |  | ||||||
|  * \warning The returned pointer must be passed to `free()` to avoid a memory |  | ||||||
|  *          leak. |  | ||||||
|  */ |  | ||||||
| AMstackCallbackData* AMstackCallbackDataInit(AMvalType const bitmask, char const* const file, int const line); |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMstackCallbackData |  | ||||||
|  * \def AMexpect |  | ||||||
|  * \brief Allocates a new `AMstackCallbackData` struct and initializes it from |  | ||||||
|  *        an `AMvalueType` bitmask. |  | ||||||
|  * |  | ||||||
|  * \param[in] bitmask A bitmask of `AMvalType` tags. |  | ||||||
|  * \return A pointer to a disowned `AMstackCallbackData` struct. |  | ||||||
|  * \warning The returned pointer must be passed to `free()` to avoid a memory |  | ||||||
|  *          leak. |  | ||||||
|  */ |  | ||||||
| #define AMexpect(bitmask) AMstackCallbackDataInit(bitmask, __FILE__, __LINE__) |  | ||||||
| 
 |  | ||||||
| #endif /* AUTOMERGE_C_UTILS_PUSH_CALLBACK_DATA_H */ |  | ||||||
|  | @ -1,29 +0,0 @@ | ||||||
| #ifndef AUTOMERGE_C_UTILS_STRING_H |  | ||||||
| #define AUTOMERGE_C_UTILS_STRING_H |  | ||||||
| /**
 |  | ||||||
|  * \file |  | ||||||
|  * \brief Utility functions for use with `AMbyteSpan` structs that provide |  | ||||||
|  *        UTF-8 string views. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| #include <automerge-c/automerge.h> |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * \memberof AMbyteSpan |  | ||||||
|  * \brief Returns a pointer to a null-terminated byte string which is a |  | ||||||
|  *        duplicate of the given UTF-8 string view except for the substitution |  | ||||||
|  *        of its NUL (0) characters with the specified null-terminated byte |  | ||||||
|  *        string. |  | ||||||
|  *         |  | ||||||
|  * \param[in] str A UTF-8 string view as an `AMbyteSpan` struct. |  | ||||||
|  * \param[in] nul A null-terminated byte string to substitute for NUL characters |  | ||||||
|  *                or `NULL` to substitute `"\\0"` for NUL characters. |  | ||||||
|  * \return A disowned null-terminated byte string. |  | ||||||
|  * \pre \p str.src `!= NULL` |  | ||||||
|  * \pre \p str.count `<= sizeof(`\p str.src `)` |  | ||||||
|  * \warning The returned pointer must be passed to `free()` to avoid a memory |  | ||||||
|  *          leak.  |  | ||||||
|  */ |  | ||||||
| char* AMstrdup(AMbyteSpan const str, char const* nul); |  | ||||||
| 
 |  | ||||||
| #endif /* AUTOMERGE_C_UTILS_STRING_H */ |  | ||||||
							
								
								
									
										250
									
								
								rust/automerge-c/src/CMakeLists.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										250
									
								
								rust/automerge-c/src/CMakeLists.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,250 @@ | ||||||
|  | cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||||||
|  | 
 | ||||||
|  | find_program ( | ||||||
|  |     CARGO_CMD | ||||||
|  |     "cargo" | ||||||
|  |     PATHS "$ENV{CARGO_HOME}/bin" | ||||||
|  |     DOC "The Cargo command" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | if(NOT CARGO_CMD) | ||||||
|  |     message(FATAL_ERROR "Cargo (Rust package manager) not found! Install it and/or set the CARGO_HOME environment variable.") | ||||||
|  | endif() | ||||||
|  | 
 | ||||||
|  | string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) | ||||||
|  | 
 | ||||||
|  | if(BUILD_TYPE_LOWER STREQUAL debug) | ||||||
|  |     set(CARGO_BUILD_TYPE "debug") | ||||||
|  | 
 | ||||||
|  |     set(CARGO_FLAG "") | ||||||
|  | else() | ||||||
|  |     set(CARGO_BUILD_TYPE "release") | ||||||
|  | 
 | ||||||
|  |     set(CARGO_FLAG "--release") | ||||||
|  | endif() | ||||||
|  | 
 | ||||||
|  | set(CARGO_FEATURES "") | ||||||
|  | 
 | ||||||
|  | set(CARGO_CURRENT_BINARY_DIR "${CARGO_TARGET_DIR}/${CARGO_BUILD_TYPE}") | ||||||
|  | 
 | ||||||
|  | set( | ||||||
|  |     CARGO_OUTPUT | ||||||
|  |         ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h | ||||||
|  |         ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX} | ||||||
|  |         ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | if(WIN32) | ||||||
|  |     # \note The basename of an import library output by Cargo is the filename | ||||||
|  |     #       of its corresponding shared library. | ||||||
|  |     list(APPEND CARGO_OUTPUT ${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||||||
|  | endif() | ||||||
|  | 
 | ||||||
|  | add_custom_command( | ||||||
|  |     OUTPUT | ||||||
|  |         ${CARGO_OUTPUT} | ||||||
|  |     COMMAND | ||||||
|  |         # \note cbindgen won't regenerate its output header file after it's | ||||||
|  |         #       been removed but it will after its configuration file has been | ||||||
|  |         #       updated. | ||||||
|  |         ${CMAKE_COMMAND} -DCONDITION=NOT_EXISTS -P ${CMAKE_SOURCE_DIR}/cmake/file_touch.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h ${CMAKE_SOURCE_DIR}/cbindgen.toml | ||||||
|  |     COMMAND | ||||||
|  |         ${CMAKE_COMMAND} -E env CARGO_TARGET_DIR=${CARGO_TARGET_DIR} CBINDGEN_TARGET_DIR=${CBINDGEN_TARGET_DIR} ${CARGO_CMD} build ${CARGO_FLAG} ${CARGO_FEATURES} | ||||||
|  |     MAIN_DEPENDENCY | ||||||
|  |         lib.rs | ||||||
|  |     DEPENDS | ||||||
|  |         actor_id.rs | ||||||
|  |         byte_span.rs | ||||||
|  |         change_hashes.rs | ||||||
|  |         change.rs | ||||||
|  |         changes.rs | ||||||
|  |         doc.rs | ||||||
|  |         doc/list.rs | ||||||
|  |         doc/list/item.rs | ||||||
|  |         doc/list/items.rs | ||||||
|  |         doc/map.rs | ||||||
|  |         doc/map/item.rs | ||||||
|  |         doc/map/items.rs | ||||||
|  |         doc/utils.rs | ||||||
|  |         obj.rs | ||||||
|  |         obj/item.rs | ||||||
|  |         obj/items.rs | ||||||
|  |         result.rs | ||||||
|  |         result_stack.rs | ||||||
|  |         strs.rs | ||||||
|  |         sync.rs | ||||||
|  |         sync/have.rs | ||||||
|  |         sync/haves.rs | ||||||
|  |         sync/message.rs | ||||||
|  |         sync/state.rs | ||||||
|  |         ${CMAKE_SOURCE_DIR}/build.rs | ||||||
|  |         ${CMAKE_SOURCE_DIR}/Cargo.toml | ||||||
|  |         ${CMAKE_SOURCE_DIR}/cbindgen.toml | ||||||
|  |     WORKING_DIRECTORY | ||||||
|  |         ${CMAKE_SOURCE_DIR} | ||||||
|  |     COMMENT | ||||||
|  |         "Producing the library artifacts with Cargo..." | ||||||
|  |     VERBATIM | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | add_custom_target( | ||||||
|  |     ${LIBRARY_NAME}_artifacts ALL | ||||||
|  |     DEPENDS ${CARGO_OUTPUT} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # \note cbindgen's naming behavior isn't fully configurable and it ignores | ||||||
|  | #       `const fn` calls (https://github.com/eqrion/cbindgen/issues/252). | ||||||
|  | add_custom_command( | ||||||
|  |     TARGET ${LIBRARY_NAME}_artifacts | ||||||
|  |     POST_BUILD | ||||||
|  |     COMMAND | ||||||
|  |         # Compensate for cbindgen's variant struct naming. | ||||||
|  |         ${CMAKE_COMMAND} -DMATCH_REGEX=AM\([^_]+_[^_]+\)_Body -DREPLACE_EXPR=AM\\1 -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h | ||||||
|  |     COMMAND | ||||||
|  |         # Compensate for cbindgen's union tag enum type naming. | ||||||
|  |         ${CMAKE_COMMAND} -DMATCH_REGEX=AM\([^_]+\)_Tag -DREPLACE_EXPR=AM\\1Variant -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h | ||||||
|  |     COMMAND | ||||||
|  |         # Compensate for cbindgen's translation of consecutive uppercase letters to "ScreamingSnakeCase". | ||||||
|  |         ${CMAKE_COMMAND} -DMATCH_REGEX=A_M\([^_]+\)_ -DREPLACE_EXPR=AM_\\1_ -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h | ||||||
|  |     COMMAND | ||||||
|  |         # Compensate for cbindgen ignoring `std:mem::size_of<usize>()` calls. | ||||||
|  |         ${CMAKE_COMMAND} -DMATCH_REGEX=USIZE_ -DREPLACE_EXPR=\+${CMAKE_SIZEOF_VOID_P} -P ${CMAKE_SOURCE_DIR}/cmake/file_regex_replace.cmake -- ${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h | ||||||
|  |     WORKING_DIRECTORY | ||||||
|  |         ${CMAKE_SOURCE_DIR} | ||||||
|  |     COMMENT | ||||||
|  |         "Compensating for cbindgen deficits..." | ||||||
|  |     VERBATIM | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | if(BUILD_SHARED_LIBS) | ||||||
|  |     if(WIN32) | ||||||
|  |         set(LIBRARY_DESTINATION "${CMAKE_INSTALL_BINDIR}") | ||||||
|  |     else() | ||||||
|  |         set(LIBRARY_DESTINATION "${CMAKE_INSTALL_LIBDIR}") | ||||||
|  |     endif() | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_DEFINE_SYMBOL "${SYMBOL_PREFIX}_EXPORTS") | ||||||
|  | 
 | ||||||
|  |     # \note The basename of an import library output by Cargo is the filename | ||||||
|  |     #       of its corresponding shared library. | ||||||
|  |     set(LIBRARY_IMPLIB "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_LOCATION "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_NO_SONAME "${WIN32}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_SONAME "${CMAKE_SHARED_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_TYPE "SHARED") | ||||||
|  | else() | ||||||
|  |     set(LIBRARY_DEFINE_SYMBOL "") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_DESTINATION "${CMAKE_INSTALL_LIBDIR}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_IMPLIB "") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_LOCATION "${CARGO_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_NO_SONAME "TRUE") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_SONAME "") | ||||||
|  | 
 | ||||||
|  |     set(LIBRARY_TYPE "STATIC") | ||||||
|  | endif() | ||||||
|  | 
 | ||||||
|  | add_library(${LIBRARY_NAME} ${LIBRARY_TYPE} IMPORTED GLOBAL) | ||||||
|  | 
 | ||||||
|  | set_target_properties( | ||||||
|  |     ${LIBRARY_NAME} | ||||||
|  |     PROPERTIES | ||||||
|  |         # \note Cargo writes a debug build into a nested directory instead of | ||||||
|  |         #       decorating its name. | ||||||
|  |         DEBUG_POSTFIX "" | ||||||
|  |         DEFINE_SYMBOL "${LIBRARY_DEFINE_SYMBOL}" | ||||||
|  |         IMPORTED_IMPLIB "${LIBRARY_IMPLIB}" | ||||||
|  |         IMPORTED_LOCATION "${LIBRARY_LOCATION}" | ||||||
|  |         IMPORTED_NO_SONAME "${LIBRARY_NO_SONAME}" | ||||||
|  |         IMPORTED_SONAME "${LIBRARY_SONAME}" | ||||||
|  |         LINKER_LANGUAGE C | ||||||
|  |         PUBLIC_HEADER "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" | ||||||
|  |         SOVERSION "${PROJECT_VERSION_MAJOR}" | ||||||
|  |         VERSION "${PROJECT_VERSION}" | ||||||
|  |         # \note Cargo exports all of the symbols automatically. | ||||||
|  |         WINDOWS_EXPORT_ALL_SYMBOLS "TRUE" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | target_compile_definitions(${LIBRARY_NAME} INTERFACE $<TARGET_PROPERTY:${LIBRARY_NAME},DEFINE_SYMBOL>) | ||||||
|  | 
 | ||||||
|  | target_include_directories( | ||||||
|  |     ${LIBRARY_NAME} | ||||||
|  |     INTERFACE | ||||||
|  |         "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}>" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | set(CMAKE_THREAD_PREFER_PTHREAD TRUE) | ||||||
|  | 
 | ||||||
|  | set(THREADS_PREFER_PTHREAD_FLAG TRUE) | ||||||
|  | 
 | ||||||
|  | find_package(Threads REQUIRED) | ||||||
|  | 
 | ||||||
|  | set(LIBRARY_DEPENDENCIES Threads::Threads ${CMAKE_DL_LIBS}) | ||||||
|  | 
 | ||||||
|  | if(WIN32) | ||||||
|  |     list(APPEND LIBRARY_DEPENDENCIES Bcrypt userenv ws2_32) | ||||||
|  | else() | ||||||
|  |     list(APPEND LIBRARY_DEPENDENCIES m) | ||||||
|  | endif() | ||||||
|  | 
 | ||||||
|  | target_link_libraries(${LIBRARY_NAME} INTERFACE ${LIBRARY_DEPENDENCIES}) | ||||||
|  | 
 | ||||||
|  | install( | ||||||
|  |     FILES $<TARGET_PROPERTY:${LIBRARY_NAME},IMPORTED_IMPLIB> | ||||||
|  |     TYPE LIB | ||||||
|  |     # \note The basename of an import library output by Cargo is the filename | ||||||
|  |     #       of its corresponding shared library. | ||||||
|  |     RENAME "${CMAKE_STATIC_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}" | ||||||
|  |     OPTIONAL | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | set(LIBRARY_FILE_NAME "${CMAKE_${LIBRARY_TYPE}_LIBRARY_PREFIX}${LIBRARY_NAME}${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX}${CMAKE_${LIBRARY_TYPE}_LIBRARY_SUFFIX}") | ||||||
|  | 
 | ||||||
|  | install( | ||||||
|  |     FILES $<TARGET_PROPERTY:${LIBRARY_NAME},IMPORTED_LOCATION> | ||||||
|  |     RENAME "${LIBRARY_FILE_NAME}" | ||||||
|  |     DESTINATION ${LIBRARY_DESTINATION} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | install( | ||||||
|  |     FILES $<TARGET_PROPERTY:${LIBRARY_NAME},PUBLIC_HEADER> | ||||||
|  |     DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | find_package(Doxygen OPTIONAL_COMPONENTS dot) | ||||||
|  | 
 | ||||||
|  | if(DOXYGEN_FOUND) | ||||||
|  |     set(DOXYGEN_ALIASES "installed_headerfile=\\headerfile ${LIBRARY_NAME}.h <${PROJECT_NAME}/${LIBRARY_NAME}.h>") | ||||||
|  | 
 | ||||||
|  |     set(DOXYGEN_GENERATE_LATEX YES) | ||||||
|  | 
 | ||||||
|  |     set(DOXYGEN_PDF_HYPERLINKS YES) | ||||||
|  | 
 | ||||||
|  |     set(DOXYGEN_PROJECT_LOGO "${CMAKE_SOURCE_DIR}/img/brandmark.png") | ||||||
|  | 
 | ||||||
|  |     set(DOXYGEN_SORT_BRIEF_DOCS YES) | ||||||
|  | 
 | ||||||
|  |     set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "${CMAKE_SOURCE_DIR}/README.md") | ||||||
|  | 
 | ||||||
|  |     doxygen_add_docs( | ||||||
|  |         ${LIBRARY_NAME}_docs | ||||||
|  |         "${CBINDGEN_TARGET_DIR}/${LIBRARY_NAME}.h" | ||||||
|  |         "${CMAKE_SOURCE_DIR}/README.md" | ||||||
|  |         USE_STAMP_FILE | ||||||
|  |         WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} | ||||||
|  |         COMMENT "Producing documentation with Doxygen..." | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     # \note A Doxygen input file isn't a file-level dependency so the Doxygen | ||||||
|  |     #       command must instead depend upon a target that outputs the file or | ||||||
|  |     #       it will just output an error message when it can't be found. | ||||||
|  |     add_dependencies(${LIBRARY_NAME}_docs ${LIBRARY_NAME}_artifacts) | ||||||
|  | endif() | ||||||
|  | @ -1,5 +1,4 @@ | ||||||
| use automerge as am; | use automerge as am; | ||||||
| use libc::c_int; |  | ||||||
| use std::cell::RefCell; | use std::cell::RefCell; | ||||||
| use std::cmp::Ordering; | use std::cmp::Ordering; | ||||||
| use std::str::FromStr; | use std::str::FromStr; | ||||||
|  | @ -12,7 +11,7 @@ macro_rules! to_actor_id { | ||||||
|         let handle = $handle.as_ref(); |         let handle = $handle.as_ref(); | ||||||
|         match handle { |         match handle { | ||||||
|             Some(b) => b, |             Some(b) => b, | ||||||
|             None => return AMresult::error("Invalid `AMactorId*`").into(), |             None => return AMresult::err("Invalid AMactorId pointer").into(), | ||||||
|         } |         } | ||||||
|     }}; |     }}; | ||||||
| } | } | ||||||
|  | @ -58,11 +57,11 @@ impl AsRef<am::ActorId> for AMactorId { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMactorId
 | /// \memberof AMactorId
 | ||||||
| /// \brief Gets the value of an actor identifier as an array of bytes.
 | /// \brief Gets the value of an actor identifier as a sequence of bytes.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] actor_id A pointer to an `AMactorId` struct.
 | /// \param[in] actor_id A pointer to an `AMactorId` struct.
 | ||||||
| /// \return An `AMbyteSpan` struct for an array of bytes.
 | /// \pre \p actor_id `!= NULL`.
 | ||||||
| /// \pre \p actor_id `!= NULL`
 | /// \return An `AMbyteSpan` struct.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -83,8 +82,8 @@ pub unsafe extern "C" fn AMactorIdBytes(actor_id: *const AMactorId) -> AMbyteSpa | ||||||
| /// \return `-1` if \p actor_id1 `<` \p actor_id2, `0` if
 | /// \return `-1` if \p actor_id1 `<` \p actor_id2, `0` if
 | ||||||
| ///         \p actor_id1 `==` \p actor_id2 and `1` if
 | ///         \p actor_id1 `==` \p actor_id2 and `1` if
 | ||||||
| ///         \p actor_id1 `>` \p actor_id2.
 | ///         \p actor_id1 `>` \p actor_id2.
 | ||||||
| /// \pre \p actor_id1 `!= NULL`
 | /// \pre \p actor_id1 `!= NULL`.
 | ||||||
| /// \pre \p actor_id2 `!= NULL`
 | /// \pre \p actor_id2 `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// #Safety
 | /// #Safety
 | ||||||
|  | @ -94,7 +93,7 @@ pub unsafe extern "C" fn AMactorIdBytes(actor_id: *const AMactorId) -> AMbyteSpa | ||||||
| pub unsafe extern "C" fn AMactorIdCmp( | pub unsafe extern "C" fn AMactorIdCmp( | ||||||
|     actor_id1: *const AMactorId, |     actor_id1: *const AMactorId, | ||||||
|     actor_id2: *const AMactorId, |     actor_id2: *const AMactorId, | ||||||
| ) -> c_int { | ) -> isize { | ||||||
|     match (actor_id1.as_ref(), actor_id2.as_ref()) { |     match (actor_id1.as_ref(), actor_id2.as_ref()) { | ||||||
|         (Some(actor_id1), Some(actor_id2)) => match actor_id1.as_ref().cmp(actor_id2.as_ref()) { |         (Some(actor_id1), Some(actor_id2)) => match actor_id1.as_ref().cmp(actor_id2.as_ref()) { | ||||||
|             Ordering::Less => -1, |             Ordering::Less => -1, | ||||||
|  | @ -102,69 +101,65 @@ pub unsafe extern "C" fn AMactorIdCmp( | ||||||
|             Ordering::Greater => 1, |             Ordering::Greater => 1, | ||||||
|         }, |         }, | ||||||
|         (None, Some(_)) => -1, |         (None, Some(_)) => -1, | ||||||
|         (None, None) => 0, |  | ||||||
|         (Some(_), None) => 1, |         (Some(_), None) => 1, | ||||||
|  |         (None, None) => 0, | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMactorId
 | /// \memberof AMactorId
 | ||||||
| /// \brief Allocates a new actor identifier and initializes it from a random
 | /// \brief Allocates a new actor identifier and initializes it with a random
 | ||||||
| ///        UUID value.
 | ///        UUID.
 | ||||||
| ///
 | ///
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item.
 | /// \return A pointer to an `AMresult` struct containing a pointer to an
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///         `AMactorId` struct.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMactorIdInit() -> *mut AMresult { | pub unsafe extern "C" fn AMactorIdInit() -> *mut AMresult { | ||||||
|     to_result(Ok::<am::ActorId, am::AutomergeError>(am::ActorId::random())) |     to_result(Ok::<am::ActorId, am::AutomergeError>(am::ActorId::random())) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMactorId
 | /// \memberof AMactorId
 | ||||||
| /// \brief Allocates a new actor identifier and initializes it from an array of
 | /// \brief Allocates a new actor identifier and initializes it from a sequence
 | ||||||
| ///        bytes value.
 | ///        of bytes.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] src A pointer to an array of bytes.
 | /// \param[in] src A pointer to a contiguous sequence of bytes.
 | ||||||
| /// \param[in] count The count of bytes to copy from the array pointed to by
 | /// \param[in] count The number of bytes to copy from \p src.
 | ||||||
| ///                  \p src.
 | /// \pre `0 <` \p count `<= sizeof(`\p src`)`.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item.
 | /// \return A pointer to an `AMresult` struct containing a pointer to an
 | ||||||
| /// \pre \p src `!= NULL`
 | ///         `AMactorId` struct.
 | ||||||
| /// \pre `sizeof(`\p src `) > 0`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \pre \p count `<= sizeof(`\p src `)`
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 |  | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// src must be a byte array of length `>= count`
 | /// src must be a byte array of size `>= count`
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMactorIdFromBytes(src: *const u8, count: usize) -> *mut AMresult { | pub unsafe extern "C" fn AMactorIdInitBytes(src: *const u8, count: usize) -> *mut AMresult { | ||||||
|     if !src.is_null() { |     let slice = std::slice::from_raw_parts(src, count); | ||||||
|         let value = std::slice::from_raw_parts(src, count); |  | ||||||
|     to_result(Ok::<am::ActorId, am::InvalidActorId>(am::ActorId::from( |     to_result(Ok::<am::ActorId, am::InvalidActorId>(am::ActorId::from( | ||||||
|             value, |         slice, | ||||||
|     ))) |     ))) | ||||||
|     } else { |  | ||||||
|         AMresult::error("Invalid uint8_t*").into() |  | ||||||
|     } |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMactorId
 | /// \memberof AMactorId
 | ||||||
| /// \brief Allocates a new actor identifier and initializes it from a
 | /// \brief Allocates a new actor identifier and initializes it from a
 | ||||||
| ///        hexadecimal UTF-8 string view value.
 | ///        hexadecimal string.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct.
 | /// \param[in] hex_str A UTF-8 string view as an `AMbyteSpan` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item.
 | /// \return A pointer to an `AMresult` struct containing a pointer to an
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///         `AMactorId` struct.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// hex_str must be a valid pointer to an AMbyteSpan
 | /// hex_str must be a valid pointer to an AMbyteSpan
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMactorIdFromStr(value: AMbyteSpan) -> *mut AMresult { | pub unsafe extern "C" fn AMactorIdInitStr(hex_str: AMbyteSpan) -> *mut AMresult { | ||||||
|     use am::AutomergeError::InvalidActorId; |     use am::AutomergeError::InvalidActorId; | ||||||
| 
 | 
 | ||||||
|     to_result(match (&value).try_into() { |     to_result(match (&hex_str).try_into() { | ||||||
|         Ok(s) => match am::ActorId::from_str(s) { |         Ok(s) => match am::ActorId::from_str(s) { | ||||||
|             Ok(actor_id) => Ok(actor_id), |             Ok(actor_id) => Ok(actor_id), | ||||||
|             Err(_) => Err(InvalidActorId(String::from(s))), |             Err(_) => Err(InvalidActorId(String::from(s))), | ||||||
|  | @ -174,12 +169,11 @@ pub unsafe extern "C" fn AMactorIdFromStr(value: AMbyteSpan) -> *mut AMresult { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMactorId
 | /// \memberof AMactorId
 | ||||||
| /// \brief Gets the value of an actor identifier as a UTF-8 hexadecimal string
 | /// \brief Gets the value of an actor identifier as a hexadecimal string.
 | ||||||
| ///        view.
 |  | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] actor_id A pointer to an `AMactorId` struct.
 | /// \param[in] actor_id A pointer to an `AMactorId` struct.
 | ||||||
|  | /// \pre \p actor_id `!= NULL`.
 | ||||||
| /// \return A UTF-8 string view as an `AMbyteSpan` struct.
 | /// \return A UTF-8 string view as an `AMbyteSpan` struct.
 | ||||||
| /// \pre \p actor_id `!= NULL`
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  |  | ||||||
|  | @ -1,17 +1,14 @@ | ||||||
| use automerge as am; | use automerge as am; | ||||||
| use std::cmp::Ordering; | use libc::strlen; | ||||||
| use std::convert::TryFrom; | use std::convert::TryFrom; | ||||||
| use std::os::raw::c_char; | use std::os::raw::c_char; | ||||||
| 
 | 
 | ||||||
| use libc::{c_int, strlen}; |  | ||||||
| use smol_str::SmolStr; |  | ||||||
| 
 |  | ||||||
| macro_rules! to_str { | macro_rules! to_str { | ||||||
|     ($byte_span:expr) => {{ |     ($span:expr) => {{ | ||||||
|         let result: Result<&str, am::AutomergeError> = (&$byte_span).try_into(); |         let result: Result<&str, am::AutomergeError> = (&$span).try_into(); | ||||||
|         match result { |         match result { | ||||||
|             Ok(s) => s, |             Ok(s) => s, | ||||||
|             Err(e) => return AMresult::error(&e.to_string()).into(), |             Err(e) => return AMresult::err(&e.to_string()).into(), | ||||||
|         } |         } | ||||||
|     }}; |     }}; | ||||||
| } | } | ||||||
|  | @ -20,17 +17,16 @@ pub(crate) use to_str; | ||||||
| 
 | 
 | ||||||
| /// \struct AMbyteSpan
 | /// \struct AMbyteSpan
 | ||||||
| /// \installed_headerfile
 | /// \installed_headerfile
 | ||||||
| /// \brief A view onto an array of bytes.
 | /// \brief A view onto a contiguous sequence of bytes.
 | ||||||
| #[repr(C)] | #[repr(C)] | ||||||
| pub struct AMbyteSpan { | pub struct AMbyteSpan { | ||||||
|     /// A pointer to the first byte of an array of bytes.
 |     /// A pointer to an array of bytes.
 | ||||||
|     /// \warning \p src is only valid until the array of bytes to which it
 |     /// \attention <b>NEVER CALL `free()` ON \p src!</b>
 | ||||||
|     ///          points is freed.
 |     /// \warning \p src is only valid until the `AMfree()` function is called
 | ||||||
|     /// \note If the `AMbyteSpan` came from within an `AMitem` struct then
 |     ///          on the `AMresult` struct that stores the array of bytes to
 | ||||||
|     ///       \p src will be freed when the pointer to the `AMresult` struct
 |     ///          which it points.
 | ||||||
|     ///       containing the `AMitem` struct is passed to `AMresultFree()`.
 |  | ||||||
|     pub src: *const u8, |     pub src: *const u8, | ||||||
|     /// The count of bytes in the array.
 |     /// The number of bytes in the array.
 | ||||||
|     pub count: usize, |     pub count: usize, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -56,7 +52,9 @@ impl PartialEq for AMbyteSpan { | ||||||
|         } else if self.src == other.src { |         } else if self.src == other.src { | ||||||
|             return true; |             return true; | ||||||
|         } |         } | ||||||
|         <&[u8]>::from(self) == <&[u8]>::from(other) |         let slice = unsafe { std::slice::from_raw_parts(self.src, self.count) }; | ||||||
|  |         let other_slice = unsafe { std::slice::from_raw_parts(other.src, other.count) }; | ||||||
|  |         slice == other_slice | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -74,15 +72,10 @@ impl From<&am::ActorId> for AMbyteSpan { | ||||||
| 
 | 
 | ||||||
| impl From<&mut am::ActorId> for AMbyteSpan { | impl From<&mut am::ActorId> for AMbyteSpan { | ||||||
|     fn from(actor: &mut am::ActorId) -> Self { |     fn from(actor: &mut am::ActorId) -> Self { | ||||||
|         actor.as_ref().into() |         let slice = actor.to_bytes(); | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl From<&am::ChangeHash> for AMbyteSpan { |  | ||||||
|     fn from(change_hash: &am::ChangeHash) -> Self { |  | ||||||
|         Self { |         Self { | ||||||
|             src: change_hash.0.as_ptr(), |             src: slice.as_ptr(), | ||||||
|             count: change_hash.0.len(), |             count: slice.len(), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | @ -100,9 +93,12 @@ impl From<*const c_char> for AMbyteSpan { | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl From<&SmolStr> for AMbyteSpan { | impl From<&am::ChangeHash> for AMbyteSpan { | ||||||
|     fn from(smol_str: &SmolStr) -> Self { |     fn from(change_hash: &am::ChangeHash) -> Self { | ||||||
|         smol_str.as_bytes().into() |         Self { | ||||||
|  |             src: change_hash.0.as_ptr(), | ||||||
|  |             count: change_hash.0.len(), | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -115,39 +111,13 @@ impl From<&[u8]> for AMbyteSpan { | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl From<&AMbyteSpan> for &[u8] { |  | ||||||
|     fn from(byte_span: &AMbyteSpan) -> Self { |  | ||||||
|         unsafe { std::slice::from_raw_parts(byte_span.src, byte_span.count) } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl From<&AMbyteSpan> for Vec<u8> { |  | ||||||
|     fn from(byte_span: &AMbyteSpan) -> Self { |  | ||||||
|         <&[u8]>::from(byte_span).to_vec() |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl TryFrom<&AMbyteSpan> for am::ChangeHash { |  | ||||||
|     type Error = am::AutomergeError; |  | ||||||
| 
 |  | ||||||
|     fn try_from(byte_span: &AMbyteSpan) -> Result<Self, Self::Error> { |  | ||||||
|         use am::AutomergeError::InvalidChangeHashBytes; |  | ||||||
| 
 |  | ||||||
|         let slice: &[u8] = byte_span.into(); |  | ||||||
|         match slice.try_into() { |  | ||||||
|             Ok(change_hash) => Ok(change_hash), |  | ||||||
|             Err(e) => Err(InvalidChangeHashBytes(e)), |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| impl TryFrom<&AMbyteSpan> for &str { | impl TryFrom<&AMbyteSpan> for &str { | ||||||
|     type Error = am::AutomergeError; |     type Error = am::AutomergeError; | ||||||
| 
 | 
 | ||||||
|     fn try_from(byte_span: &AMbyteSpan) -> Result<Self, Self::Error> { |     fn try_from(span: &AMbyteSpan) -> Result<Self, Self::Error> { | ||||||
|         use am::AutomergeError::InvalidCharacter; |         use am::AutomergeError::InvalidCharacter; | ||||||
| 
 | 
 | ||||||
|         let slice = byte_span.into(); |         let slice = unsafe { std::slice::from_raw_parts(span.src, span.count) }; | ||||||
|         match std::str::from_utf8(slice) { |         match std::str::from_utf8(slice) { | ||||||
|             Ok(str_) => Ok(str_), |             Ok(str_) => Ok(str_), | ||||||
|             Err(e) => Err(InvalidCharacter(e.valid_up_to())), |             Err(e) => Err(InvalidCharacter(e.valid_up_to())), | ||||||
|  | @ -155,69 +125,17 @@ impl TryFrom<&AMbyteSpan> for &str { | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMbyteSpan
 | /// \brief Creates an AMbyteSpan from a pointer + length
 | ||||||
| /// \brief Creates a view onto an array of bytes.
 |  | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] src A pointer to an array of bytes or `NULL`.
 | /// \param[in] src  A pointer to a span of bytes
 | ||||||
| /// \param[in] count The count of bytes to view from the array pointed to by
 | /// \param[in] count The number of bytes in the span
 | ||||||
| ///                  \p src.
 | /// \return An `AMbyteSpan` struct
 | ||||||
| /// \return An `AMbyteSpan` struct.
 |  | ||||||
| /// \pre \p count `<= sizeof(`\p src `)`
 |  | ||||||
| /// \post `(`\p src `== NULL) -> (AMbyteSpan){NULL, 0}`
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// #Safety
 | /// #Safety
 | ||||||
| /// src must be a byte array of length `>= count` or `std::ptr::null()`
 | /// AMbytes does not retain the underlying storage, so you must discard the
 | ||||||
|  | /// return value before freeing the bytes.
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMbytes(src: *const u8, count: usize) -> AMbyteSpan { | pub unsafe extern "C" fn AMbytes(src: *const u8, count: usize) -> AMbyteSpan { | ||||||
|     AMbyteSpan { |     AMbyteSpan { src, count } | ||||||
|         src, |  | ||||||
|         count: if src.is_null() { 0 } else { count }, |  | ||||||
|     } |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// \memberof AMbyteSpan
 |  | ||||||
| /// \brief Creates a view onto a C string.
 |  | ||||||
| ///
 |  | ||||||
| /// \param[in] c_str A null-terminated byte string or `NULL`.
 |  | ||||||
| /// \return An `AMbyteSpan` struct.
 |  | ||||||
| /// \pre Each byte in \p c_str encodes one UTF-8 character.
 |  | ||||||
| /// \internal
 |  | ||||||
| ///
 |  | ||||||
| /// #Safety
 |  | ||||||
| /// c_str must be a null-terminated array of `std::os::raw::c_char` or `std::ptr::null()`.
 |  | ||||||
| #[no_mangle] |  | ||||||
| pub unsafe extern "C" fn AMstr(c_str: *const c_char) -> AMbyteSpan { |  | ||||||
|     c_str.into() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// \memberof AMbyteSpan
 |  | ||||||
| /// \brief Compares two UTF-8 string views lexicographically.
 |  | ||||||
| ///        
 |  | ||||||
| /// \param[in] lhs A UTF-8 string view as an `AMbyteSpan` struct.
 |  | ||||||
| /// \param[in] rhs A UTF-8 string view as an `AMbyteSpan` struct.
 |  | ||||||
| /// \return Negative value if \p lhs appears before \p rhs in lexicographical order.
 |  | ||||||
| ///         Zero if \p lhs and \p rhs compare equal.
 |  | ||||||
| ///         Positive value if \p lhs appears after \p rhs in lexicographical order.
 |  | ||||||
| /// \pre \p lhs.src `!= NULL`
 |  | ||||||
| /// \pre \p lhs.count `<= sizeof(`\p lhs.src `)`
 |  | ||||||
| /// \pre \p rhs.src `!= NULL`
 |  | ||||||
| /// \pre \p rhs.count `<= sizeof(`\p rhs.src `)`
 |  | ||||||
| /// \internal
 |  | ||||||
| ///
 |  | ||||||
| /// #Safety
 |  | ||||||
| /// lhs.src must be a byte array of length >= lhs.count
 |  | ||||||
| /// rhs.src must be a a byte array of length >= rhs.count
 |  | ||||||
| #[no_mangle] |  | ||||||
| pub unsafe extern "C" fn AMstrCmp(lhs: AMbyteSpan, rhs: AMbyteSpan) -> c_int { |  | ||||||
|     match (<&str>::try_from(&lhs), <&str>::try_from(&rhs)) { |  | ||||||
|         (Ok(lhs), Ok(rhs)) => match lhs.cmp(rhs) { |  | ||||||
|             Ordering::Less => -1, |  | ||||||
|             Ordering::Equal => 0, |  | ||||||
|             Ordering::Greater => 1, |  | ||||||
|         }, |  | ||||||
|         (Err(_), Ok(_)) => -1, |  | ||||||
|         (Err(_), Err(_)) => 0, |  | ||||||
|         (Ok(_), Err(_)) => 1, |  | ||||||
|     } |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -2,6 +2,7 @@ use automerge as am; | ||||||
| use std::cell::RefCell; | use std::cell::RefCell; | ||||||
| 
 | 
 | ||||||
| use crate::byte_span::AMbyteSpan; | use crate::byte_span::AMbyteSpan; | ||||||
|  | use crate::change_hashes::AMchangeHashes; | ||||||
| use crate::result::{to_result, AMresult}; | use crate::result::{to_result, AMresult}; | ||||||
| 
 | 
 | ||||||
| macro_rules! to_change { | macro_rules! to_change { | ||||||
|  | @ -9,7 +10,7 @@ macro_rules! to_change { | ||||||
|         let handle = $handle.as_ref(); |         let handle = $handle.as_ref(); | ||||||
|         match handle { |         match handle { | ||||||
|             Some(b) => b, |             Some(b) => b, | ||||||
|             None => return AMresult::error("Invalid `AMchange*`").into(), |             None => return AMresult::err("Invalid AMchange pointer").into(), | ||||||
|         } |         } | ||||||
|     }}; |     }}; | ||||||
| } | } | ||||||
|  | @ -20,14 +21,14 @@ macro_rules! to_change { | ||||||
| #[derive(Eq, PartialEq)] | #[derive(Eq, PartialEq)] | ||||||
| pub struct AMchange { | pub struct AMchange { | ||||||
|     body: *mut am::Change, |     body: *mut am::Change, | ||||||
|     change_hash: RefCell<Option<am::ChangeHash>>, |     changehash: RefCell<Option<am::ChangeHash>>, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| impl AMchange { | impl AMchange { | ||||||
|     pub fn new(change: &mut am::Change) -> Self { |     pub fn new(change: &mut am::Change) -> Self { | ||||||
|         Self { |         Self { | ||||||
|             body: change, |             body: change, | ||||||
|             change_hash: Default::default(), |             changehash: Default::default(), | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -39,12 +40,12 @@ impl AMchange { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     pub fn hash(&self) -> AMbyteSpan { |     pub fn hash(&self) -> AMbyteSpan { | ||||||
|         let mut change_hash = self.change_hash.borrow_mut(); |         let mut changehash = self.changehash.borrow_mut(); | ||||||
|         if let Some(change_hash) = change_hash.as_ref() { |         if let Some(changehash) = changehash.as_ref() { | ||||||
|             change_hash.into() |             changehash.into() | ||||||
|         } else { |         } else { | ||||||
|             let hash = unsafe { (*self.body).hash() }; |             let hash = unsafe { (*self.body).hash() }; | ||||||
|             let ptr = change_hash.insert(hash); |             let ptr = changehash.insert(hash); | ||||||
|             AMbyteSpan { |             AMbyteSpan { | ||||||
|                 src: ptr.0.as_ptr(), |                 src: ptr.0.as_ptr(), | ||||||
|                 count: hash.as_ref().len(), |                 count: hash.as_ref().len(), | ||||||
|  | @ -69,10 +70,11 @@ impl AsRef<am::Change> for AMchange { | ||||||
| /// \brief Gets the first referenced actor identifier in a change.
 | /// \brief Gets the first referenced actor identifier in a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_ACTOR_ID` item.
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \return A pointer to an `AMresult` struct containing a pointer to an
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///         `AMactorId` struct.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -88,8 +90,8 @@ pub unsafe extern "C" fn AMchangeActorId(change: *const AMchange) -> *mut AMresu | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
| /// \brief Compresses the raw bytes of a change.
 | /// \brief Compresses the raw bytes of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in,out] change A pointer to an `AMchange` struct.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -105,20 +107,18 @@ pub unsafe extern "C" fn AMchangeCompress(change: *mut AMchange) { | ||||||
| /// \brief Gets the dependencies of a change.
 | /// \brief Gets the dependencies of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE_HASH` items.
 | /// \return A pointer to an `AMchangeHashes` struct or `NULL`.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 |  | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// change must be a valid pointer to an AMchange
 | /// change must be a valid pointer to an AMchange
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> *mut AMresult { | pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> AMchangeHashes { | ||||||
|     to_result(match change.as_ref() { |     match change.as_ref() { | ||||||
|         Some(change) => change.as_ref().deps(), |         Some(change) => AMchangeHashes::new(change.as_ref().deps()), | ||||||
|         None => Default::default(), |         None => Default::default(), | ||||||
|     }) |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
|  | @ -126,7 +126,7 @@ pub unsafe extern "C" fn AMchangeDeps(change: *const AMchange) -> *mut AMresult | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return An `AMbyteSpan` struct.
 | /// \return An `AMbyteSpan` struct.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -141,33 +141,32 @@ pub unsafe extern "C" fn AMchangeExtraBytes(change: *const AMchange) -> AMbyteSp | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
| /// \brief Allocates a new change and initializes it from an array of bytes value.
 | /// \brief Loads a sequence of bytes into a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] src A pointer to an array of bytes.
 | /// \param[in] src A pointer to an array of bytes.
 | ||||||
| /// \param[in] count The count of bytes to load from the array pointed to by
 | /// \param[in] count The number of bytes in \p src to load.
 | ||||||
| ///                  \p src.
 | /// \return A pointer to an `AMresult` struct containing an `AMchange` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_CHANGE` item.
 | /// \pre \p src `!= NULL`.
 | ||||||
| /// \pre \p src `!= NULL`
 | /// \pre `0 <` \p count `<= sizeof(`\p src`)`.
 | ||||||
| /// \pre `sizeof(`\p src `) > 0`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \pre \p count `<= sizeof(`\p src `)`
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 |  | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// src must be a byte array of length `>= count`
 | /// src must be a byte array of size `>= count`
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMchangeFromBytes(src: *const u8, count: usize) -> *mut AMresult { | pub unsafe extern "C" fn AMchangeFromBytes(src: *const u8, count: usize) -> *mut AMresult { | ||||||
|     let data = std::slice::from_raw_parts(src, count); |     let mut data = Vec::new(); | ||||||
|     to_result(am::Change::from_bytes(data.to_vec())) |     data.extend_from_slice(std::slice::from_raw_parts(src, count)); | ||||||
|  |     to_result(am::Change::from_bytes(data)) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
| /// \brief Gets the hash of a change.
 | /// \brief Gets the hash of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return An `AMbyteSpan` struct for a change hash.
 | /// \return A change hash as an `AMbyteSpan` struct.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -184,8 +183,8 @@ pub unsafe extern "C" fn AMchangeHash(change: *const AMchange) -> AMbyteSpan { | ||||||
| /// \brief Tests the emptiness of a change.
 | /// \brief Tests the emptiness of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return `true` if \p change is empty, `false` otherwise.
 | /// \return A boolean.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -199,37 +198,12 @@ pub unsafe extern "C" fn AMchangeIsEmpty(change: *const AMchange) -> bool { | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMchange
 |  | ||||||
| /// \brief Loads a document into a sequence of changes.
 |  | ||||||
| ///
 |  | ||||||
| /// \param[in] src A pointer to an array of bytes.
 |  | ||||||
| /// \param[in] count The count of bytes to load from the array pointed to by
 |  | ||||||
| ///                  \p src.
 |  | ||||||
| /// \return A pointer to an `AMresult` struct with `AM_VAL_TYPE_CHANGE` items.
 |  | ||||||
| /// \pre \p src `!= NULL`
 |  | ||||||
| /// \pre `sizeof(`\p src `) > 0`
 |  | ||||||
| /// \pre \p count `<= sizeof(`\p src `)`
 |  | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 |  | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 |  | ||||||
| ///
 |  | ||||||
| /// # Safety
 |  | ||||||
| /// src must be a byte array of length `>= count`
 |  | ||||||
| #[no_mangle] |  | ||||||
| pub unsafe extern "C" fn AMchangeLoadDocument(src: *const u8, count: usize) -> *mut AMresult { |  | ||||||
|     let data = std::slice::from_raw_parts(src, count); |  | ||||||
|     to_result::<Result<Vec<am::Change>, _>>( |  | ||||||
|         am::Automerge::load(data) |  | ||||||
|             .and_then(|d| d.get_changes(&[]).map(|c| c.into_iter().cloned().collect())), |  | ||||||
|     ) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
| /// \brief Gets the maximum operation index of a change.
 | /// \brief Gets the maximum operation index of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A 64-bit unsigned integer.
 | /// \return A 64-bit unsigned integer.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -247,8 +221,8 @@ pub unsafe extern "C" fn AMchangeMaxOp(change: *const AMchange) -> u64 { | ||||||
| /// \brief Gets the message of a change.
 | /// \brief Gets the message of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return An `AMbyteSpan` struct for a UTF-8 string.
 | /// \return A UTF-8 string view as an `AMbyteSpan` struct.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -266,7 +240,7 @@ pub unsafe extern "C" fn AMchangeMessage(change: *const AMchange) -> AMbyteSpan | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A 64-bit unsigned integer.
 | /// \return A 64-bit unsigned integer.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -285,7 +259,7 @@ pub unsafe extern "C" fn AMchangeSeq(change: *const AMchange) -> u64 { | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A 64-bit unsigned integer.
 | /// \return A 64-bit unsigned integer.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -293,17 +267,18 @@ pub unsafe extern "C" fn AMchangeSeq(change: *const AMchange) -> u64 { | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMchangeSize(change: *const AMchange) -> usize { | pub unsafe extern "C" fn AMchangeSize(change: *const AMchange) -> usize { | ||||||
|     if let Some(change) = change.as_ref() { |     if let Some(change) = change.as_ref() { | ||||||
|         return change.as_ref().len(); |         change.as_ref().len() | ||||||
|     } |     } else { | ||||||
|         0 |         0 | ||||||
|     } |     } | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMchange
 | /// \memberof AMchange
 | ||||||
| /// \brief Gets the start operation index of a change.
 | /// \brief Gets the start operation index of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A 64-bit unsigned integer.
 | /// \return A 64-bit unsigned integer.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -322,7 +297,7 @@ pub unsafe extern "C" fn AMchangeStartOp(change: *const AMchange) -> u64 { | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return A 64-bit signed integer.
 | /// \return A 64-bit signed integer.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -340,8 +315,8 @@ pub unsafe extern "C" fn AMchangeTime(change: *const AMchange) -> i64 { | ||||||
| /// \brief Gets the raw bytes of a change.
 | /// \brief Gets the raw bytes of a change.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] change A pointer to an `AMchange` struct.
 | /// \param[in] change A pointer to an `AMchange` struct.
 | ||||||
| /// \return An `AMbyteSpan` struct for an array of bytes.
 | /// \return An `AMbyteSpan` struct.
 | ||||||
| /// \pre \p change `!= NULL`
 | /// \pre \p change `!= NULL`.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -354,3 +329,28 @@ pub unsafe extern "C" fn AMchangeRawBytes(change: *const AMchange) -> AMbyteSpan | ||||||
|         Default::default() |         Default::default() | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchange
 | ||||||
|  | /// \brief Loads a document into a sequence of changes.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] src A pointer to an array of bytes.
 | ||||||
|  | /// \param[in] count The number of bytes in \p src to load.
 | ||||||
|  | /// \return A pointer to an `AMresult` struct containing a sequence of
 | ||||||
|  | ///         `AMchange` structs.
 | ||||||
|  | /// \pre \p src `!= NULL`.
 | ||||||
|  | /// \pre `0 <` \p count `<= sizeof(`\p src`)`.
 | ||||||
|  | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// # Safety
 | ||||||
|  | /// src must be a byte array of size `>= count`
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeLoadDocument(src: *const u8, count: usize) -> *mut AMresult { | ||||||
|  |     let mut data = Vec::new(); | ||||||
|  |     data.extend_from_slice(std::slice::from_raw_parts(src, count)); | ||||||
|  |     to_result::<Result<Vec<am::Change>, _>>( | ||||||
|  |         am::Automerge::load(&data) | ||||||
|  |             .and_then(|d| d.get_changes(&[]).map(|c| c.into_iter().cloned().collect())), | ||||||
|  |     ) | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										400
									
								
								rust/automerge-c/src/change_hashes.rs
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										400
									
								
								rust/automerge-c/src/change_hashes.rs
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,400 @@ | ||||||
|  | use automerge as am; | ||||||
|  | use std::cmp::Ordering; | ||||||
|  | use std::ffi::c_void; | ||||||
|  | use std::mem::size_of; | ||||||
|  | 
 | ||||||
|  | use crate::byte_span::AMbyteSpan; | ||||||
|  | use crate::result::{to_result, AMresult}; | ||||||
|  | 
 | ||||||
|  | #[repr(C)] | ||||||
|  | struct Detail { | ||||||
|  |     len: usize, | ||||||
|  |     offset: isize, | ||||||
|  |     ptr: *const c_void, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \note cbindgen won't propagate the value of a `std::mem::size_of<T>()` call
 | ||||||
|  | ///       (https://github.com/eqrion/cbindgen/issues/252) but it will
 | ||||||
|  | ///       propagate the name of a constant initialized from it so if the
 | ||||||
|  | ///       constant's name is a symbolic representation of the value it can be
 | ||||||
|  | ///       converted into a number by post-processing the header it generated.
 | ||||||
|  | pub const USIZE_USIZE_USIZE_: usize = size_of::<Detail>(); | ||||||
|  | 
 | ||||||
|  | impl Detail { | ||||||
|  |     fn new(change_hashes: &[am::ChangeHash], offset: isize) -> Self { | ||||||
|  |         Self { | ||||||
|  |             len: change_hashes.len(), | ||||||
|  |             offset, | ||||||
|  |             ptr: change_hashes.as_ptr() as *const c_void, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn advance(&mut self, n: isize) { | ||||||
|  |         if n == 0 { | ||||||
|  |             return; | ||||||
|  |         } | ||||||
|  |         let len = self.len as isize; | ||||||
|  |         self.offset = if self.offset < 0 { | ||||||
|  |             // It's reversed.
 | ||||||
|  |             let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); | ||||||
|  |             if unclipped >= 0 { | ||||||
|  |                 // Clip it to the forward stop.
 | ||||||
|  |                 len | ||||||
|  |             } else { | ||||||
|  |                 std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) | ||||||
|  |             } | ||||||
|  |         } else { | ||||||
|  |             let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); | ||||||
|  |             if unclipped < 0 { | ||||||
|  |                 // Clip it to the reverse stop.
 | ||||||
|  |                 -(len + 1) | ||||||
|  |             } else { | ||||||
|  |                 std::cmp::max(0, std::cmp::min(unclipped, len)) | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn get_index(&self) -> usize { | ||||||
|  |         (self.offset | ||||||
|  |             + if self.offset < 0 { | ||||||
|  |                 self.len as isize | ||||||
|  |             } else { | ||||||
|  |                 0 | ||||||
|  |             }) as usize | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn next(&mut self, n: isize) -> Option<&am::ChangeHash> { | ||||||
|  |         if self.is_stopped() { | ||||||
|  |             return None; | ||||||
|  |         } | ||||||
|  |         let slice: &[am::ChangeHash] = | ||||||
|  |             unsafe { std::slice::from_raw_parts(self.ptr as *const am::ChangeHash, self.len) }; | ||||||
|  |         let value = &slice[self.get_index()]; | ||||||
|  |         self.advance(n); | ||||||
|  |         Some(value) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn is_stopped(&self) -> bool { | ||||||
|  |         let len = self.len as isize; | ||||||
|  |         self.offset < -len || self.offset == len | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn prev(&mut self, n: isize) -> Option<&am::ChangeHash> { | ||||||
|  |         self.advance(-n); | ||||||
|  |         if self.is_stopped() { | ||||||
|  |             return None; | ||||||
|  |         } | ||||||
|  |         let slice: &[am::ChangeHash] = | ||||||
|  |             unsafe { std::slice::from_raw_parts(self.ptr as *const am::ChangeHash, self.len) }; | ||||||
|  |         Some(&slice[self.get_index()]) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn reversed(&self) -> Self { | ||||||
|  |         Self { | ||||||
|  |             len: self.len, | ||||||
|  |             offset: -(self.offset + 1), | ||||||
|  |             ptr: self.ptr, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn rewound(&self) -> Self { | ||||||
|  |         Self { | ||||||
|  |             len: self.len, | ||||||
|  |             offset: if self.offset < 0 { -1 } else { 0 }, | ||||||
|  |             ptr: self.ptr, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl From<Detail> for [u8; USIZE_USIZE_USIZE_] { | ||||||
|  |     fn from(detail: Detail) -> Self { | ||||||
|  |         unsafe { | ||||||
|  |             std::slice::from_raw_parts((&detail as *const Detail) as *const u8, USIZE_USIZE_USIZE_) | ||||||
|  |                 .try_into() | ||||||
|  |                 .unwrap() | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \struct AMchangeHashes
 | ||||||
|  | /// \installed_headerfile
 | ||||||
|  | /// \brief A random-access iterator over a sequence of change hashes.
 | ||||||
|  | #[repr(C)] | ||||||
|  | #[derive(Eq, PartialEq)] | ||||||
|  | pub struct AMchangeHashes { | ||||||
|  |     /// An implementation detail that is intentionally opaque.
 | ||||||
|  |     /// \warning Modifying \p detail will cause undefined behavior.
 | ||||||
|  |     /// \note The actual size of \p detail will vary by platform, this is just
 | ||||||
|  |     ///       the one for the platform this documentation was built on.
 | ||||||
|  |     detail: [u8; USIZE_USIZE_USIZE_], | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl AMchangeHashes { | ||||||
|  |     pub fn new(change_hashes: &[am::ChangeHash]) -> Self { | ||||||
|  |         Self { | ||||||
|  |             detail: Detail::new(change_hashes, 0).into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn advance(&mut self, n: isize) { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.advance(n); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn len(&self) -> usize { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         detail.len | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn next(&mut self, n: isize) -> Option<&am::ChangeHash> { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.next(n) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn prev(&mut self, n: isize) -> Option<&am::ChangeHash> { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.prev(n) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn reversed(&self) -> Self { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         Self { | ||||||
|  |             detail: detail.reversed().into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn rewound(&self) -> Self { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         Self { | ||||||
|  |             detail: detail.rewound().into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl AsRef<[am::ChangeHash]> for AMchangeHashes { | ||||||
|  |     fn as_ref(&self) -> &[am::ChangeHash] { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         unsafe { std::slice::from_raw_parts(detail.ptr as *const am::ChangeHash, detail.len) } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl Default for AMchangeHashes { | ||||||
|  |     fn default() -> Self { | ||||||
|  |         Self { | ||||||
|  |             detail: [0; USIZE_USIZE_USIZE_], | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Advances an iterator over a sequence of change hashes by at most
 | ||||||
|  | ///        \p |n| positions where the sign of \p n is relative to the
 | ||||||
|  | ///        iterator's direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesAdvance(change_hashes: *mut AMchangeHashes, n: isize) { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_mut() { | ||||||
|  |         change_hashes.advance(n); | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Compares the sequences of change hashes underlying a pair of
 | ||||||
|  | ///        iterators.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] change_hashes1 A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \param[in] change_hashes2 A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \return `-1` if \p change_hashes1 `<` \p change_hashes2, `0` if
 | ||||||
|  | ///         \p change_hashes1 `==` \p change_hashes2 and `1` if
 | ||||||
|  | ///         \p change_hashes1 `>` \p change_hashes2.
 | ||||||
|  | /// \pre \p change_hashes1 `!= NULL`.
 | ||||||
|  | /// \pre \p change_hashes2 `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes1 must be a valid pointer to an AMchangeHashes
 | ||||||
|  | /// change_hashes2 must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesCmp( | ||||||
|  |     change_hashes1: *const AMchangeHashes, | ||||||
|  |     change_hashes2: *const AMchangeHashes, | ||||||
|  | ) -> isize { | ||||||
|  |     match (change_hashes1.as_ref(), change_hashes2.as_ref()) { | ||||||
|  |         (Some(change_hashes1), Some(change_hashes2)) => { | ||||||
|  |             match change_hashes1.as_ref().cmp(change_hashes2.as_ref()) { | ||||||
|  |                 Ordering::Less => -1, | ||||||
|  |                 Ordering::Equal => 0, | ||||||
|  |                 Ordering::Greater => 1, | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         (None, Some(_)) => -1, | ||||||
|  |         (Some(_), None) => 1, | ||||||
|  |         (None, None) => 0, | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Allocates an iterator over a sequence of change hashes and
 | ||||||
|  | ///        initializes it from a sequence of byte spans.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] src A pointer to an array of `AMbyteSpan` structs.
 | ||||||
|  | /// \param[in] count The number of `AMbyteSpan` structs to copy from \p src.
 | ||||||
|  | /// \return A pointer to an `AMresult` struct containing an `AMchangeHashes`
 | ||||||
|  | ///         struct.
 | ||||||
|  | /// \pre \p src `!= NULL`.
 | ||||||
|  | /// \pre `0 <` \p count `<= sizeof(`\p src`) / sizeof(AMbyteSpan)`.
 | ||||||
|  | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// # Safety
 | ||||||
|  | /// src must be an AMbyteSpan array of size `>= count`
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesInit(src: *const AMbyteSpan, count: usize) -> *mut AMresult { | ||||||
|  |     let mut change_hashes = Vec::<am::ChangeHash>::new(); | ||||||
|  |     for n in 0..count { | ||||||
|  |         let byte_span = &*src.add(n); | ||||||
|  |         let slice = std::slice::from_raw_parts(byte_span.src, byte_span.count); | ||||||
|  |         match slice.try_into() { | ||||||
|  |             Ok(change_hash) => { | ||||||
|  |                 change_hashes.push(change_hash); | ||||||
|  |             } | ||||||
|  |             Err(e) => { | ||||||
|  |                 return to_result(Err(e)); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     to_result(Ok::<Vec<am::ChangeHash>, am::InvalidChangeHashSlice>( | ||||||
|  |         change_hashes, | ||||||
|  |     )) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Gets the change hash at the current position of an iterator over a
 | ||||||
|  | ///        sequence of change hashes and then advances it by at most \p |n|
 | ||||||
|  | ///        positions where the sign of \p n is relative to the iterator's
 | ||||||
|  | ///        direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \return An `AMbyteSpan` struct with `.src == NULL` when \p change_hashes
 | ||||||
|  | ///         was previously advanced past its forward/reverse limit.
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesNext( | ||||||
|  |     change_hashes: *mut AMchangeHashes, | ||||||
|  |     n: isize, | ||||||
|  | ) -> AMbyteSpan { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_mut() { | ||||||
|  |         if let Some(change_hash) = change_hashes.next(n) { | ||||||
|  |             return change_hash.into(); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     Default::default() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Advances an iterator over a sequence of change hashes by at most
 | ||||||
|  | ///        \p |n| positions where the sign of \p n is relative to the
 | ||||||
|  | ///        iterator's direction and then gets the change hash at its new
 | ||||||
|  | ///        position.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \return An `AMbyteSpan` struct with `.src == NULL` when \p change_hashes is
 | ||||||
|  | ///         presently advanced past its forward/reverse limit.
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesPrev( | ||||||
|  |     change_hashes: *mut AMchangeHashes, | ||||||
|  |     n: isize, | ||||||
|  | ) -> AMbyteSpan { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_mut() { | ||||||
|  |         if let Some(change_hash) = change_hashes.prev(n) { | ||||||
|  |             return change_hash.into(); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     Default::default() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Gets the size of the sequence of change hashes underlying an
 | ||||||
|  | ///        iterator.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \return The count of values in \p change_hashes.
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesSize(change_hashes: *const AMchangeHashes) -> usize { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_ref() { | ||||||
|  |         change_hashes.len() | ||||||
|  |     } else { | ||||||
|  |         0 | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Creates an iterator over the same sequence of change hashes as the
 | ||||||
|  | ///        given one but with the opposite position and direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \return An `AMchangeHashes` struct
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesReversed( | ||||||
|  |     change_hashes: *const AMchangeHashes, | ||||||
|  | ) -> AMchangeHashes { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_ref() { | ||||||
|  |         change_hashes.reversed() | ||||||
|  |     } else { | ||||||
|  |         Default::default() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchangeHashes
 | ||||||
|  | /// \brief Creates an iterator at the starting position over the same sequence
 | ||||||
|  | ///        of change hashes as the given one.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] change_hashes A pointer to an `AMchangeHashes` struct.
 | ||||||
|  | /// \return An `AMchangeHashes` struct
 | ||||||
|  | /// \pre \p change_hashes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// change_hashes must be a valid pointer to an AMchangeHashes
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangeHashesRewound( | ||||||
|  |     change_hashes: *const AMchangeHashes, | ||||||
|  | ) -> AMchangeHashes { | ||||||
|  |     if let Some(change_hashes) = change_hashes.as_ref() { | ||||||
|  |         change_hashes.rewound() | ||||||
|  |     } else { | ||||||
|  |         Default::default() | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										399
									
								
								rust/automerge-c/src/changes.rs
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										399
									
								
								rust/automerge-c/src/changes.rs
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,399 @@ | ||||||
|  | use automerge as am; | ||||||
|  | use std::collections::BTreeMap; | ||||||
|  | use std::ffi::c_void; | ||||||
|  | use std::mem::size_of; | ||||||
|  | 
 | ||||||
|  | use crate::byte_span::AMbyteSpan; | ||||||
|  | use crate::change::AMchange; | ||||||
|  | use crate::result::{to_result, AMresult}; | ||||||
|  | 
 | ||||||
|  | #[repr(C)] | ||||||
|  | struct Detail { | ||||||
|  |     len: usize, | ||||||
|  |     offset: isize, | ||||||
|  |     ptr: *const c_void, | ||||||
|  |     storage: *mut c_void, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \note cbindgen won't propagate the value of a `std::mem::size_of<T>()` call
 | ||||||
|  | ///       (https://github.com/eqrion/cbindgen/issues/252) but it will
 | ||||||
|  | ///       propagate the name of a constant initialized from it so if the
 | ||||||
|  | ///       constant's name is a symbolic representation of the value it can be
 | ||||||
|  | ///       converted into a number by post-processing the header it generated.
 | ||||||
|  | pub const USIZE_USIZE_USIZE_USIZE_: usize = size_of::<Detail>(); | ||||||
|  | 
 | ||||||
|  | impl Detail { | ||||||
|  |     fn new(changes: &[am::Change], offset: isize, storage: &mut BTreeMap<usize, AMchange>) -> Self { | ||||||
|  |         let storage: *mut BTreeMap<usize, AMchange> = storage; | ||||||
|  |         Self { | ||||||
|  |             len: changes.len(), | ||||||
|  |             offset, | ||||||
|  |             ptr: changes.as_ptr() as *const c_void, | ||||||
|  |             storage: storage as *mut c_void, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn advance(&mut self, n: isize) { | ||||||
|  |         if n == 0 { | ||||||
|  |             return; | ||||||
|  |         } | ||||||
|  |         let len = self.len as isize; | ||||||
|  |         self.offset = if self.offset < 0 { | ||||||
|  |             // It's reversed.
 | ||||||
|  |             let unclipped = self.offset.checked_sub(n).unwrap_or(isize::MIN); | ||||||
|  |             if unclipped >= 0 { | ||||||
|  |                 // Clip it to the forward stop.
 | ||||||
|  |                 len | ||||||
|  |             } else { | ||||||
|  |                 std::cmp::min(std::cmp::max(-(len + 1), unclipped), -1) | ||||||
|  |             } | ||||||
|  |         } else { | ||||||
|  |             let unclipped = self.offset.checked_add(n).unwrap_or(isize::MAX); | ||||||
|  |             if unclipped < 0 { | ||||||
|  |                 // Clip it to the reverse stop.
 | ||||||
|  |                 -(len + 1) | ||||||
|  |             } else { | ||||||
|  |                 std::cmp::max(0, std::cmp::min(unclipped, len)) | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn get_index(&self) -> usize { | ||||||
|  |         (self.offset | ||||||
|  |             + if self.offset < 0 { | ||||||
|  |                 self.len as isize | ||||||
|  |             } else { | ||||||
|  |                 0 | ||||||
|  |             }) as usize | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn next(&mut self, n: isize) -> Option<*const AMchange> { | ||||||
|  |         if self.is_stopped() { | ||||||
|  |             return None; | ||||||
|  |         } | ||||||
|  |         let slice: &mut [am::Change] = | ||||||
|  |             unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut am::Change, self.len) }; | ||||||
|  |         let storage = unsafe { &mut *(self.storage as *mut BTreeMap<usize, AMchange>) }; | ||||||
|  |         let index = self.get_index(); | ||||||
|  |         let value = match storage.get_mut(&index) { | ||||||
|  |             Some(value) => value, | ||||||
|  |             None => { | ||||||
|  |                 storage.insert(index, AMchange::new(&mut slice[index])); | ||||||
|  |                 storage.get_mut(&index).unwrap() | ||||||
|  |             } | ||||||
|  |         }; | ||||||
|  |         self.advance(n); | ||||||
|  |         Some(value) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn is_stopped(&self) -> bool { | ||||||
|  |         let len = self.len as isize; | ||||||
|  |         self.offset < -len || self.offset == len | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn prev(&mut self, n: isize) -> Option<*const AMchange> { | ||||||
|  |         self.advance(-n); | ||||||
|  |         if self.is_stopped() { | ||||||
|  |             return None; | ||||||
|  |         } | ||||||
|  |         let slice: &mut [am::Change] = | ||||||
|  |             unsafe { std::slice::from_raw_parts_mut(self.ptr as *mut am::Change, self.len) }; | ||||||
|  |         let storage = unsafe { &mut *(self.storage as *mut BTreeMap<usize, AMchange>) }; | ||||||
|  |         let index = self.get_index(); | ||||||
|  |         Some(match storage.get_mut(&index) { | ||||||
|  |             Some(value) => value, | ||||||
|  |             None => { | ||||||
|  |                 storage.insert(index, AMchange::new(&mut slice[index])); | ||||||
|  |                 storage.get_mut(&index).unwrap() | ||||||
|  |             } | ||||||
|  |         }) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn reversed(&self) -> Self { | ||||||
|  |         Self { | ||||||
|  |             len: self.len, | ||||||
|  |             offset: -(self.offset + 1), | ||||||
|  |             ptr: self.ptr, | ||||||
|  |             storage: self.storage, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn rewound(&self) -> Self { | ||||||
|  |         Self { | ||||||
|  |             len: self.len, | ||||||
|  |             offset: if self.offset < 0 { -1 } else { 0 }, | ||||||
|  |             ptr: self.ptr, | ||||||
|  |             storage: self.storage, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl From<Detail> for [u8; USIZE_USIZE_USIZE_USIZE_] { | ||||||
|  |     fn from(detail: Detail) -> Self { | ||||||
|  |         unsafe { | ||||||
|  |             std::slice::from_raw_parts( | ||||||
|  |                 (&detail as *const Detail) as *const u8, | ||||||
|  |                 USIZE_USIZE_USIZE_USIZE_, | ||||||
|  |             ) | ||||||
|  |             .try_into() | ||||||
|  |             .unwrap() | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \struct AMchanges
 | ||||||
|  | /// \installed_headerfile
 | ||||||
|  | /// \brief A random-access iterator over a sequence of changes.
 | ||||||
|  | #[repr(C)] | ||||||
|  | #[derive(Eq, PartialEq)] | ||||||
|  | pub struct AMchanges { | ||||||
|  |     /// An implementation detail that is intentionally opaque.
 | ||||||
|  |     /// \warning Modifying \p detail will cause undefined behavior.
 | ||||||
|  |     /// \note The actual size of \p detail will vary by platform, this is just
 | ||||||
|  |     ///       the one for the platform this documentation was built on.
 | ||||||
|  |     detail: [u8; USIZE_USIZE_USIZE_USIZE_], | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl AMchanges { | ||||||
|  |     pub fn new(changes: &[am::Change], storage: &mut BTreeMap<usize, AMchange>) -> Self { | ||||||
|  |         Self { | ||||||
|  |             detail: Detail::new(changes, 0, &mut *storage).into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn advance(&mut self, n: isize) { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.advance(n); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn len(&self) -> usize { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         detail.len | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn next(&mut self, n: isize) -> Option<*const AMchange> { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.next(n) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn prev(&mut self, n: isize) -> Option<*const AMchange> { | ||||||
|  |         let detail = unsafe { &mut *(self.detail.as_mut_ptr() as *mut Detail) }; | ||||||
|  |         detail.prev(n) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn reversed(&self) -> Self { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         Self { | ||||||
|  |             detail: detail.reversed().into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn rewound(&self) -> Self { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         Self { | ||||||
|  |             detail: detail.rewound().into(), | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl AsRef<[am::Change]> for AMchanges { | ||||||
|  |     fn as_ref(&self) -> &[am::Change] { | ||||||
|  |         let detail = unsafe { &*(self.detail.as_ptr() as *const Detail) }; | ||||||
|  |         unsafe { std::slice::from_raw_parts(detail.ptr as *const am::Change, detail.len) } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | impl Default for AMchanges { | ||||||
|  |     fn default() -> Self { | ||||||
|  |         Self { | ||||||
|  |             detail: [0; USIZE_USIZE_USIZE_USIZE_], | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Advances an iterator over a sequence of changes by at most \p |n|
 | ||||||
|  | ///        positions where the sign of \p n is relative to the iterator's
 | ||||||
|  | ///        direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesAdvance(changes: *mut AMchanges, n: isize) { | ||||||
|  |     if let Some(changes) = changes.as_mut() { | ||||||
|  |         changes.advance(n); | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Tests the equality of two sequences of changes underlying a pair of
 | ||||||
|  | ///        iterators.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] changes1 A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \param[in] changes2 A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \return `true` if \p changes1 `==` \p changes2 and `false` otherwise.
 | ||||||
|  | /// \pre \p changes1 `!= NULL`.
 | ||||||
|  | /// \pre \p changes2 `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes1 must be a valid pointer to an AMchanges
 | ||||||
|  | /// changes2 must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesEqual( | ||||||
|  |     changes1: *const AMchanges, | ||||||
|  |     changes2: *const AMchanges, | ||||||
|  | ) -> bool { | ||||||
|  |     match (changes1.as_ref(), changes2.as_ref()) { | ||||||
|  |         (Some(changes1), Some(changes2)) => changes1.as_ref() == changes2.as_ref(), | ||||||
|  |         (None, Some(_)) | (Some(_), None) | (None, None) => false, | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Allocates an iterator over a sequence of changes and initializes it
 | ||||||
|  | ///        from a sequence of byte spans.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] src A pointer to an array of `AMbyteSpan` structs.
 | ||||||
|  | /// \param[in] count The number of `AMbyteSpan` structs to copy from \p src.
 | ||||||
|  | /// \return A pointer to an `AMresult` struct containing an `AMchanges` struct.
 | ||||||
|  | /// \pre \p src `!= NULL`.
 | ||||||
|  | /// \pre `0 <` \p count `<= sizeof(`\p src`) / sizeof(AMbyteSpan)`.
 | ||||||
|  | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// # Safety
 | ||||||
|  | /// src must be an AMbyteSpan array of size `>= count`
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesInit(src: *const AMbyteSpan, count: usize) -> *mut AMresult { | ||||||
|  |     let mut changes = Vec::<am::Change>::new(); | ||||||
|  |     for n in 0..count { | ||||||
|  |         let byte_span = &*src.add(n); | ||||||
|  |         let slice = std::slice::from_raw_parts(byte_span.src, byte_span.count); | ||||||
|  |         match slice.try_into() { | ||||||
|  |             Ok(change) => { | ||||||
|  |                 changes.push(change); | ||||||
|  |             } | ||||||
|  |             Err(e) => { | ||||||
|  |                 return to_result(Err::<Vec<am::Change>, am::LoadChangeError>(e)); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     to_result(Ok::<Vec<am::Change>, am::LoadChangeError>(changes)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Gets the change at the current position of an iterator over a
 | ||||||
|  | ///        sequence of changes and then advances it by at most \p |n| positions
 | ||||||
|  | ///        where the sign of \p n is relative to the iterator's direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \return A pointer to an `AMchange` struct that's `NULL` when \p changes was
 | ||||||
|  | ///         previously advanced past its forward/reverse limit.
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesNext(changes: *mut AMchanges, n: isize) -> *const AMchange { | ||||||
|  |     if let Some(changes) = changes.as_mut() { | ||||||
|  |         if let Some(change) = changes.next(n) { | ||||||
|  |             return change; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     std::ptr::null() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Advances an iterator over a sequence of changes by at most \p |n|
 | ||||||
|  | ///        positions where the sign of \p n is relative to the iterator's
 | ||||||
|  | ///        direction and then gets the change at its new position.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in,out] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \param[in] n The direction (\p -n -> opposite, \p n -> same) and maximum
 | ||||||
|  | ///              number of positions to advance.
 | ||||||
|  | /// \return A pointer to an `AMchange` struct that's `NULL` when \p changes is
 | ||||||
|  | ///         presently advanced past its forward/reverse limit.
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesPrev(changes: *mut AMchanges, n: isize) -> *const AMchange { | ||||||
|  |     if let Some(changes) = changes.as_mut() { | ||||||
|  |         if let Some(change) = changes.prev(n) { | ||||||
|  |             return change; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     std::ptr::null() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Gets the size of the sequence of changes underlying an iterator.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \return The count of values in \p changes.
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesSize(changes: *const AMchanges) -> usize { | ||||||
|  |     if let Some(changes) = changes.as_ref() { | ||||||
|  |         changes.len() | ||||||
|  |     } else { | ||||||
|  |         0 | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Creates an iterator over the same sequence of changes as the given
 | ||||||
|  | ///        one but with the opposite position and direction.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \return An `AMchanges` struct.
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesReversed(changes: *const AMchanges) -> AMchanges { | ||||||
|  |     if let Some(changes) = changes.as_ref() { | ||||||
|  |         changes.reversed() | ||||||
|  |     } else { | ||||||
|  |         Default::default() | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// \memberof AMchanges
 | ||||||
|  | /// \brief Creates an iterator at the starting position over the same sequence
 | ||||||
|  | ///        of changes as the given one.
 | ||||||
|  | ///
 | ||||||
|  | /// \param[in] changes A pointer to an `AMchanges` struct.
 | ||||||
|  | /// \return An `AMchanges` struct
 | ||||||
|  | /// \pre \p changes `!= NULL`.
 | ||||||
|  | /// \internal
 | ||||||
|  | ///
 | ||||||
|  | /// #Safety
 | ||||||
|  | /// changes must be a valid pointer to an AMchanges
 | ||||||
|  | #[no_mangle] | ||||||
|  | pub unsafe extern "C" fn AMchangesRewound(changes: *const AMchanges) -> AMchanges { | ||||||
|  |     if let Some(changes) = changes.as_ref() { | ||||||
|  |         changes.rewound() | ||||||
|  |     } else { | ||||||
|  |         Default::default() | ||||||
|  |     } | ||||||
|  | } | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,46 +1,48 @@ | ||||||
| use automerge as am; | use automerge as am; | ||||||
| use automerge::transaction::Transactable; | use automerge::transaction::Transactable; | ||||||
| use automerge::ReadDoc; |  | ||||||
| 
 | 
 | ||||||
| use crate::byte_span::{to_str, AMbyteSpan}; | use crate::byte_span::{to_str, AMbyteSpan}; | ||||||
| use crate::doc::{to_doc, to_doc_mut, AMdoc}; | use crate::change_hashes::AMchangeHashes; | ||||||
| use crate::items::AMitems; | use crate::doc::{to_doc, to_doc_mut, to_obj_id, AMdoc}; | ||||||
| use crate::obj::{to_obj_id, to_obj_type, AMobjId, AMobjType}; | use crate::obj::{to_obj_type, AMobjId, AMobjType}; | ||||||
| use crate::result::{to_result, AMresult}; | use crate::result::{to_result, AMresult}; | ||||||
| 
 | 
 | ||||||
|  | pub mod item; | ||||||
|  | pub mod items; | ||||||
|  | 
 | ||||||
| macro_rules! adjust { | macro_rules! adjust { | ||||||
|     ($pos:expr, $insert:expr, $len:expr) => {{ |     ($index:expr, $insert:expr, $len:expr) => {{ | ||||||
|         // An empty object can only be inserted into.
 |         // An empty object can only be inserted into.
 | ||||||
|         let insert = $insert || $len == 0; |         let insert = $insert || $len == 0; | ||||||
|         let end = if insert { $len } else { $len - 1 }; |         let end = if insert { $len } else { $len - 1 }; | ||||||
|         if $pos > end && $pos != usize::MAX { |         if $index > end && $index != usize::MAX { | ||||||
|             return AMresult::error(&format!("Invalid pos {}", $pos)).into(); |             return AMresult::err(&format!("Invalid index {}", $index)).into(); | ||||||
|         } |         } | ||||||
|         (std::cmp::min($pos, end), insert) |         (std::cmp::min($index, end), insert) | ||||||
|     }}; |     }}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| macro_rules! to_range { | macro_rules! to_range { | ||||||
|     ($begin:expr, $end:expr) => {{ |     ($begin:expr, $end:expr) => {{ | ||||||
|         if $begin > $end { |         if $begin > $end { | ||||||
|             return AMresult::error(&format!("Invalid range [{}-{})", $begin, $end)).into(); |             return AMresult::err(&format!("Invalid range [{}-{})", $begin, $end)).into(); | ||||||
|         }; |         }; | ||||||
|         ($begin..$end) |         ($begin..$end) | ||||||
|     }}; |     }}; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Deletes an item from a list object.
 | /// \brief Deletes an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item.
 | ///                  `SIZE_MAX` to indicate its last index.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -50,109 +52,101 @@ macro_rules! to_range { | ||||||
| pub unsafe extern "C" fn AMlistDelete( | pub unsafe extern "C" fn AMlistDelete( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, _) = adjust!(pos, false, doc.length(obj_id)); |     let (index, _) = adjust!(index, false, doc.length(obj_id)); | ||||||
|     to_result(doc.delete(obj_id, pos)) |     to_result(doc.delete(obj_id, index)) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Gets a current or historical item within a list object.
 | /// \brief Gets the current or historical value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item.
 | ///                  `SIZE_MAX` to indicate its last index.
 | ||||||
| /// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH`
 | /// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical
 | ||||||
| ///                  items to select a historical item at \p pos or `NULL`
 | ///                  value or `NULL` for the current value.
 | ||||||
| ///                  to select the current item at \p pos.
 | /// \return A pointer to an `AMresult` struct that doesn't contain a void.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AMitem` struct.
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///          in order to prevent a memory leak.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// doc must be a valid pointer to an AMdoc
 | /// doc must be a valid pointer to an AMdoc
 | ||||||
| /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | ||||||
| /// heads must be a valid pointer to an AMitems or std::ptr::null()
 | /// heads must be a valid pointer to an AMchangeHashes or std::ptr::null()
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMlistGet( | pub unsafe extern "C" fn AMlistGet( | ||||||
|     doc: *const AMdoc, |     doc: *const AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     heads: *const AMitems, |     heads: *const AMchangeHashes, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc!(doc); |     let doc = to_doc!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, _) = adjust!(pos, false, doc.length(obj_id)); |     let (index, _) = adjust!(index, false, doc.length(obj_id)); | ||||||
|     match heads.as_ref() { |     to_result(match heads.as_ref() { | ||||||
|         None => to_result(doc.get(obj_id, pos)), |         None => doc.get(obj_id, index), | ||||||
|         Some(heads) => match <Vec<am::ChangeHash>>::try_from(heads) { |         Some(heads) => doc.get_at(obj_id, index, heads.as_ref()), | ||||||
|             Ok(heads) => to_result(doc.get_at(obj_id, pos, &heads)), |     }) | ||||||
|             Err(e) => AMresult::error(&e.to_string()).into(), |  | ||||||
|         }, |  | ||||||
|     } |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Gets all of the historical items at a position within a list object
 | /// \brief Gets all of the historical values at an index in a list object until
 | ||||||
| ///        until its current one or a specific one.
 | ///        its current one or a specific one.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item.
 | ///                  `SIZE_MAX` to indicate its last index.
 | ||||||
| /// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH`
 | /// \param[in] heads A pointer to an `AMchangeHashes` struct for a historical
 | ||||||
| ///                  items to select a historical last item or `NULL` to select
 | ///                  last value or `NULL` for the current last value.
 | ||||||
| ///                  the current last item.
 | /// \return A pointer to an `AMresult` struct containing an `AMobjItems` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AMitems` struct.
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///          in order to prevent a memory leak.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// doc must be a valid pointer to an AMdoc
 | /// doc must be a valid pointer to an AMdoc
 | ||||||
| /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | ||||||
| /// heads must be a valid pointer to an AMitems or std::ptr::null()
 | /// heads must be a valid pointer to an AMchangeHashes or std::ptr::null()
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMlistGetAll( | pub unsafe extern "C" fn AMlistGetAll( | ||||||
|     doc: *const AMdoc, |     doc: *const AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     heads: *const AMitems, |     heads: *const AMchangeHashes, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc!(doc); |     let doc = to_doc!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, _) = adjust!(pos, false, doc.length(obj_id)); |     let (index, _) = adjust!(index, false, doc.length(obj_id)); | ||||||
|     match heads.as_ref() { |     match heads.as_ref() { | ||||||
|         None => to_result(doc.get_all(obj_id, pos)), |         None => to_result(doc.get_all(obj_id, index)), | ||||||
|         Some(heads) => match <Vec<am::ChangeHash>>::try_from(heads) { |         Some(heads) => to_result(doc.get_all_at(obj_id, index, heads.as_ref())), | ||||||
|             Ok(heads) => to_result(doc.get_all_at(obj_id, pos, &heads)), |  | ||||||
|             Err(e) => AMresult::error(&e.to_string()).into(), |  | ||||||
|         }, |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Increments a counter value in an item within a list object by the
 | /// \brief Increments a counter at an index in a list object by the given
 | ||||||
| ///        given value.
 | ///        value.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item.
 | ///                  `SIZE_MAX` to indicate its last index.
 | ||||||
| /// \param[in] value A 64-bit signed integer.
 | /// \param[in] value A 64-bit signed integer.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -162,33 +156,32 @@ pub unsafe extern "C" fn AMlistGetAll( | ||||||
| pub unsafe extern "C" fn AMlistIncrement( | pub unsafe extern "C" fn AMlistIncrement( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     value: i64, |     value: i64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, _) = adjust!(pos, false, doc.length(obj_id)); |     let (index, _) = adjust!(index, false, doc.length(obj_id)); | ||||||
|     to_result(doc.increment(obj_id, pos, value)) |     to_result(doc.increment(obj_id, index, value)) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a boolean value into an item within a list object.
 | /// \brief Puts a boolean as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A boolean.
 | /// \param[in] value A boolean.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -198,85 +191,84 @@ pub unsafe extern "C" fn AMlistIncrement( | ||||||
| pub unsafe extern "C" fn AMlistPutBool( | pub unsafe extern "C" fn AMlistPutBool( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: bool, |     value: bool, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let value = am::ScalarValue::Boolean(value); |     let value = am::ScalarValue::Boolean(value); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts an array of bytes value at a position within a list object.
 | /// \brief Puts a sequence of bytes as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p src before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p src over \p index.
 | ||||||
| ///                   \p pos.
 | /// \param[in] src A pointer to an array of bytes.
 | ||||||
| /// \param[in] value A view onto the array of bytes to copy from as an
 | /// \param[in] count The number of bytes to copy from \p src.
 | ||||||
| ///                  `AMbyteSpan` struct.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre \p src `!= NULL`.
 | ||||||
| /// \pre \p value.src `!= NULL`
 | /// \pre `0 <` \p count `<= sizeof(`\p src`)`.
 | ||||||
| /// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///          in order to prevent a memory leak.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// doc must be a valid pointer to an AMdoc
 | /// doc must be a valid pointer to an AMdoc
 | ||||||
| /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | ||||||
| /// value.src must be a byte array of length >= value.count
 | /// src must be a byte array of size `>= count`
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMlistPutBytes( | pub unsafe extern "C" fn AMlistPutBytes( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: AMbyteSpan, |     val: AMbyteSpan, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let value: Vec<u8> = (&value).into(); |     let mut value = Vec::new(); | ||||||
|  |     value.extend_from_slice(std::slice::from_raw_parts(val.src, val.count)); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a CRDT counter value into an item within a list object.
 | /// \brief Puts a CRDT counter as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A 64-bit signed integer.
 | /// \param[in] value A 64-bit signed integer.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -286,39 +278,38 @@ pub unsafe extern "C" fn AMlistPutBytes( | ||||||
| pub unsafe extern "C" fn AMlistPutCounter( | pub unsafe extern "C" fn AMlistPutCounter( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: i64, |     value: i64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let value = am::ScalarValue::Counter(value.into()); |     let value = am::ScalarValue::Counter(value.into()); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a float value into an item within a list object.
 | /// \brief Puts a float as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A 64-bit float.
 | /// \param[in] value A 64-bit float.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -328,38 +319,37 @@ pub unsafe extern "C" fn AMlistPutCounter( | ||||||
| pub unsafe extern "C" fn AMlistPutF64( | pub unsafe extern "C" fn AMlistPutF64( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: f64, |     value: f64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a signed integer value into an item within a list object.
 | /// \brief Puts a signed integer as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A 64-bit signed integer.
 | /// \param[in] value A 64-bit signed integer.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -369,37 +359,36 @@ pub unsafe extern "C" fn AMlistPutF64( | ||||||
| pub unsafe extern "C" fn AMlistPutInt( | pub unsafe extern "C" fn AMlistPutInt( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: i64, |     value: i64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a null value into an item within a list object.
 | /// \brief Puts null as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///          in order to prevent a memory leak.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -409,37 +398,38 @@ pub unsafe extern "C" fn AMlistPutInt( | ||||||
| pub unsafe extern "C" fn AMlistPutNull( | pub unsafe extern "C" fn AMlistPutNull( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, ()) |         doc.insert(obj_id, index, ()) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, ()) |         doc.put(obj_id, index, ()) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts an empty object value into an item within a list object.
 | /// \brief Puts an empty object as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///                   writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] obj_type An `AMobjIdType` enum tag.
 | /// \param[in] obj_type An `AMobjIdType` enum tag.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_OBJ_TYPE` item.
 | /// \return A pointer to an `AMresult` struct containing a pointer to an
 | ||||||
| /// \pre \p doc `!= NULL`
 | ///         `AMobjId` struct.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | /// \pre \p obj_type != `AM_OBJ_TYPE_VOID`.
 | ||||||
|  | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -449,85 +439,82 @@ pub unsafe extern "C" fn AMlistPutNull( | ||||||
| pub unsafe extern "C" fn AMlistPutObject( | pub unsafe extern "C" fn AMlistPutObject( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     obj_type: AMobjType, |     obj_type: AMobjType, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let obj_type = to_obj_type!(obj_type); |     let object = to_obj_type!(obj_type); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         (doc.insert_object(obj_id, pos, obj_type), obj_type) |         doc.insert_object(obj_id, index, object) | ||||||
|     } else { |     } else { | ||||||
|         (doc.put_object(obj_id, pos, obj_type), obj_type) |         doc.put_object(obj_id, index, object) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a UTF-8 string value into an item within a list object.
 | /// \brief Puts a UTF-8 string as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct.
 | /// \param[in] value A UTF-8 string view as an `AMbyteSpan` struct.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \pre \p value.src `!= NULL`
 | /// \pre \p value `!= NULL`.
 | ||||||
| /// \pre `0 <` \p value.count `<= sizeof(`\p value.src `)`
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | ///          in order to prevent a memory leak.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 |  | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// doc must be a valid pointer to an AMdoc
 | /// doc must be a valid pointer to an AMdoc
 | ||||||
| /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | ||||||
| /// value.src must be a byte array of length >= value.count
 | /// value must be a null-terminated array of `c_char`
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMlistPutStr( | pub unsafe extern "C" fn AMlistPutStr( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: AMbyteSpan, |     value: AMbyteSpan, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let value = to_str!(value); |     let value = to_str!(value); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts a *nix timestamp (milliseconds) value into an item within a
 | /// \brief Puts a *nix timestamp (milliseconds) as the value at an index in a
 | ||||||
| ///        list object.
 | ///        list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A 64-bit signed integer.
 | /// \param[in] value A 64-bit signed integer.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -537,39 +524,38 @@ pub unsafe extern "C" fn AMlistPutStr( | ||||||
| pub unsafe extern "C" fn AMlistPutTimestamp( | pub unsafe extern "C" fn AMlistPutTimestamp( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: i64, |     value: i64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     let value = am::ScalarValue::Timestamp(value); |     let value = am::ScalarValue::Timestamp(value); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Puts an unsigned integer value into an item within a list object.
 | /// \brief Puts an unsigned integer as the value at an index in a list object.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in,out] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] pos The position of an item within the list object identified by
 | /// \param[in] index An index in the list object identified by \p obj_id or
 | ||||||
| ///                \p obj_id or `SIZE_MAX` to indicate its last item if
 | ///                  `SIZE_MAX` to indicate its last index if \p insert
 | ||||||
| ///                \p insert `== false` or one past its last item if
 | ///                  `== false` or one past its last index if \p insert
 | ||||||
| ///                \p insert `== true`.
 | ///                  `== true`.
 | ||||||
| /// \param[in] insert A flag for inserting a new item for \p value before
 | /// \param[in] insert A flag to insert \p value before \p index instead of
 | ||||||
| ///                   \p pos instead of putting \p value into the item at
 | ///            writing \p value over \p index.
 | ||||||
| ///                   \p pos.
 |  | ||||||
| /// \param[in] value A 64-bit unsigned integer.
 | /// \param[in] value A 64-bit unsigned integer.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AM_VAL_TYPE_VOID` item.
 | /// \return A pointer to an `AMresult` struct containing a void.
 | ||||||
| /// \pre \p doc `!= NULL`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \pre `0 <=` \p pos `<= AMobjSize(`\p obj_id `)` or \p pos `== SIZE_MAX`
 | /// \pre `0 <=` \p index `<= AMobjSize(`\p obj_id`)` or \p index `== SIZE_MAX`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
|  | @ -579,58 +565,56 @@ pub unsafe extern "C" fn AMlistPutTimestamp( | ||||||
| pub unsafe extern "C" fn AMlistPutUint( | pub unsafe extern "C" fn AMlistPutUint( | ||||||
|     doc: *mut AMdoc, |     doc: *mut AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     pos: usize, |     index: usize, | ||||||
|     insert: bool, |     insert: bool, | ||||||
|     value: u64, |     value: u64, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc_mut!(doc); |     let doc = to_doc_mut!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let (pos, insert) = adjust!(pos, insert, doc.length(obj_id)); |     let (index, insert) = adjust!(index, insert, doc.length(obj_id)); | ||||||
|     to_result(if insert { |     to_result(if insert { | ||||||
|         doc.insert(obj_id, pos, value) |         doc.insert(obj_id, index, value) | ||||||
|     } else { |     } else { | ||||||
|         doc.put(obj_id, pos, value) |         doc.put(obj_id, index, value) | ||||||
|     }) |     }) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /// \memberof AMdoc
 | /// \memberof AMdoc
 | ||||||
| /// \brief Gets the current or historical items in the list object within the
 | /// \brief Gets the current or historical indices and values of the list object
 | ||||||
| ///        given range.
 | ///        within the given range.
 | ||||||
| ///
 | ///
 | ||||||
| /// \param[in] doc A pointer to an `AMdoc` struct.
 | /// \param[in] doc A pointer to an `AMdoc` struct.
 | ||||||
| /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | /// \param[in] obj_id A pointer to an `AMobjId` struct or `AM_ROOT`.
 | ||||||
| /// \param[in] begin The first pos in a range of indices.
 | /// \param[in] begin The first index in a range of indices.
 | ||||||
| /// \param[in] end At least one past the last pos in a range of indices.
 | /// \param[in] end At least one past the last index in a range of indices.
 | ||||||
| /// \param[in] heads A pointer to an `AMitems` struct with `AM_VAL_TYPE_CHANGE_HASH`
 | /// \param[in] heads A pointer to an `AMchangeHashes` struct for historical
 | ||||||
| ///                  items to select historical items or `NULL` to select
 | ///                  indices and values or `NULL` for current indices and
 | ||||||
| ///                  current items.
 | ///                  values.
 | ||||||
| /// \return A pointer to an `AMresult` struct with an `AMitems` struct.
 | /// \return A pointer to an `AMresult` struct containing an `AMlistItems`
 | ||||||
| /// \pre \p doc `!= NULL`
 | ///         struct.
 | ||||||
| /// \pre \p begin `<=` \p end `<= SIZE_MAX`
 | /// \pre \p doc `!= NULL`.
 | ||||||
| /// \warning The returned `AMresult` struct pointer must be passed to
 | /// \pre \p begin `<=` \p end `<= SIZE_MAX`.
 | ||||||
| ///          `AMresultFree()` in order to avoid a memory leak.
 | /// \warning The returned `AMresult` struct must be deallocated with `AMfree()`
 | ||||||
|  | ///          in order to prevent a memory leak.
 | ||||||
| /// \internal
 | /// \internal
 | ||||||
| ///
 | ///
 | ||||||
| /// # Safety
 | /// # Safety
 | ||||||
| /// doc must be a valid pointer to an AMdoc
 | /// doc must be a valid pointer to an AMdoc
 | ||||||
| /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | /// obj_id must be a valid pointer to an AMobjId or std::ptr::null()
 | ||||||
| /// heads must be a valid pointer to an AMitems or std::ptr::null()
 | /// heads must be a valid pointer to an AMchangeHashes or std::ptr::null()
 | ||||||
| #[no_mangle] | #[no_mangle] | ||||||
| pub unsafe extern "C" fn AMlistRange( | pub unsafe extern "C" fn AMlistRange( | ||||||
|     doc: *const AMdoc, |     doc: *const AMdoc, | ||||||
|     obj_id: *const AMobjId, |     obj_id: *const AMobjId, | ||||||
|     begin: usize, |     begin: usize, | ||||||
|     end: usize, |     end: usize, | ||||||
|     heads: *const AMitems, |     heads: *const AMchangeHashes, | ||||||
| ) -> *mut AMresult { | ) -> *mut AMresult { | ||||||
|     let doc = to_doc!(doc); |     let doc = to_doc!(doc); | ||||||
|     let obj_id = to_obj_id!(obj_id); |     let obj_id = to_obj_id!(obj_id); | ||||||
|     let range = to_range!(begin, end); |     let range = to_range!(begin, end); | ||||||
|     match heads.as_ref() { |     match heads.as_ref() { | ||||||
|         None => to_result(doc.list_range(obj_id, range)), |         None => to_result(doc.list_range(obj_id, range)), | ||||||
|         Some(heads) => match <Vec<am::ChangeHash>>::try_from(heads) { |         Some(heads) => to_result(doc.list_range_at(obj_id, range, heads.as_ref())), | ||||||
|             Ok(heads) => to_result(doc.list_range_at(obj_id, range, &heads)), |  | ||||||
|             Err(e) => AMresult::error(&e.to_string()).into(), |  | ||||||
|         }, |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue