mirror of
https://github.com/rosenpass/rosenpass.git
synced 2025-12-18 13:24:38 +03:00
Compare commits
63 Commits
dev/karo/d
...
arabich/an
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
444e782854 | ||
|
|
0a945aa152 | ||
|
|
2c35c5b4ad | ||
|
|
3a1b981ce3 | ||
|
|
481977510d | ||
|
|
4d4c214600 | ||
|
|
e14fc12f88 | ||
|
|
6af6fb6b2a | ||
|
|
e3b43a59bf | ||
|
|
3942bfa65e | ||
|
|
7b1a62b6bb | ||
|
|
d1a33981b1 | ||
|
|
f20fd1acc3 | ||
|
|
3ce0d262d9 | ||
|
|
a389e3c222 | ||
|
|
cb16bd44bb | ||
|
|
3f4c7c2786 | ||
|
|
475f4593f9 | ||
|
|
13c5edbe44 | ||
|
|
b1ac5d9244 | ||
|
|
a4ff3d4eb5 | ||
|
|
19ebce79f1 | ||
|
|
3f2a9bb96b | ||
|
|
8dfa67a2dd | ||
|
|
f31d635df8 | ||
|
|
75702dfc03 | ||
|
|
3af479a27e | ||
|
|
e76e5b253f | ||
|
|
0d944afbd8 | ||
|
|
8d81be56f3 | ||
|
|
16b3914c46 | ||
|
|
ae060f7cfb | ||
|
|
afa6212264 | ||
|
|
3c744c253b | ||
|
|
53e6553c8b | ||
|
|
4cd2cdfcff | ||
|
|
a5ae83e726 | ||
|
|
9327c2c4f3 | ||
|
|
b140c56359 | ||
|
|
3e03e47935 | ||
|
|
d20bb137c9 | ||
|
|
c259be76c8 | ||
|
|
73d180c4cf | ||
|
|
d44a96e6b6 | ||
|
|
ff20fbbe3a | ||
|
|
5232ab3a8e | ||
|
|
2e27753f4a | ||
|
|
2628adbac8 | ||
|
|
744cf6fb50 | ||
|
|
4bdc464b5b | ||
|
|
eb64f50d99 | ||
|
|
73b04cdc12 | ||
|
|
437c591b2d | ||
|
|
7cbd6576d4 | ||
|
|
ac5a5cf76d | ||
|
|
18359ef3f4 | ||
|
|
3e161d8c8d | ||
|
|
56db757de3 | ||
|
|
5ff3bc944e | ||
|
|
fb93258fcc | ||
|
|
9ab120843a | ||
|
|
25f2abac80 | ||
|
|
c7ec12be9a |
128
.github/workflows/supply-chain.yml
vendored
128
.github/workflows/supply-chain.yml
vendored
@@ -28,10 +28,10 @@ jobs:
|
||||
~/.cargo/registry/cache/
|
||||
~/.cache/cargo-supply-chain/
|
||||
key: cargo-supply-chain-cache
|
||||
- name: Install stable toolchain # Cargo-supply-chain is incompatible with older versions
|
||||
- name: Install nightly toolchain
|
||||
run: |
|
||||
rustup toolchain install stable
|
||||
rustup default stable
|
||||
rustup toolchain install nightly
|
||||
rustup override set nightly
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.tool_cache }}/cargo-supply-chain
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
- name: Add the tool cache directory to the search path
|
||||
run: echo "${{ runner.tool_cache }}/cargo-supply-chain/bin" >> $GITHUB_PATH
|
||||
- name: Ensure that the tool cache is populated with the cargo-supply-chain binary
|
||||
run: cargo +stable install --root ${{ runner.tool_cache }}/cargo-supply-chain cargo-supply-chain
|
||||
run: cargo install --root ${{ runner.tool_cache }}/cargo-supply-chain cargo-supply-chain
|
||||
- name: Update data for cargo-supply-chain
|
||||
run: cargo supply-chain update
|
||||
- name: Generate cargo-supply-chain report about publishers
|
||||
@@ -54,6 +54,8 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
@@ -61,10 +63,10 @@ jobs:
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
key: cargo-vet-cache
|
||||
- name: Install stable toolchain # Since we are running/compiling cargo-vet, we should rely on the stable toolchain.
|
||||
- name: Install nightly toolchain
|
||||
run: |
|
||||
rustup toolchain install stable
|
||||
rustup default stable
|
||||
rustup toolchain install nightly
|
||||
rustup override set nightly
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.tool_cache }}/cargo-vet
|
||||
@@ -72,24 +74,104 @@ jobs:
|
||||
- name: Add the tool cache directory to the search path
|
||||
run: echo "${{ runner.tool_cache }}/cargo-vet/bin" >> $GITHUB_PATH
|
||||
- name: Ensure that the tool cache is populated with the cargo-vet binary
|
||||
run: cargo +stable install --root ${{ runner.tool_cache }}/cargo-vet cargo-vet
|
||||
- name: Regenerate vet exemptions for dependabot PRs
|
||||
if: github.actor == 'dependabot[bot]' # Run only for Dependabot PRs
|
||||
run: cargo vet regenerate exemptions
|
||||
- name: Check for changes in case of dependabot PR
|
||||
if: github.actor == 'dependabot[bot]' # Run only for Dependabot PRs
|
||||
run: git diff --exit-code || echo "Changes detected, committing..."
|
||||
- name: Commit and push changes for dependabot PRs
|
||||
if: success() && github.actor == 'dependabot[bot]'
|
||||
run: cargo install --root ${{ runner.tool_cache }}/cargo-vet cargo-vet
|
||||
- name: Check which event triggered this CI run, a push or a pull request.
|
||||
run: |
|
||||
git fetch origin ${{ github.head_ref }}
|
||||
git switch ${{ github.head_ref }}
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions@github.com"
|
||||
git add supply-chain/*
|
||||
git commit -m "Regenerate cargo vet exemptions"
|
||||
git push origin ${{ github.head_ref }}
|
||||
EVENT_NAME="${{ github.event_name }}"
|
||||
IS_PR="false"
|
||||
IS_PUSH="false"
|
||||
if [[ "$EVENT_NAME" == "pull_request" ]]; then
|
||||
echo "This CI run was triggered in the context of a pull request."
|
||||
IS_PR="true"
|
||||
elif [[ "$EVENT_NAME" == "push" ]]; then
|
||||
echo "This CI run was triggered in the context of a push."
|
||||
IS_PUSH="true"
|
||||
else
|
||||
echo "ERROR: This CI run was not triggered in the context of a pull request or a push. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
echo "IS_PR=$IS_PR" >> $GITHUB_ENV
|
||||
echo "IS_PUSH=$IS_PUSH" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- name: Check if last commit was by Dependabot
|
||||
run: |
|
||||
# Depending on the trigger for, the relevant commit has to be deduced differently.
|
||||
if [[ "$IS_PR" == true ]]; then
|
||||
# This is the commit ID for the last commit to the head branch of the pull request.
|
||||
# If we used github.sha here instead, it would point to a merge commit between the PR and the main branch, which is only created for the CI run.
|
||||
SHA="${{ github.event.pull_request.head.sha }}"
|
||||
REF="${{ github.head_ref }}"
|
||||
elif [[ "$IS_PUSH" == "true" ]]; then
|
||||
SHA="${{ github.sha }}" # This is the last commit to the branch.
|
||||
REF=${GITHUB_REF#refs/heads/}
|
||||
else
|
||||
echo "ERROR: This action only supports pull requests and push events as triggers. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
echo "Commit SHA is $SHA"
|
||||
echo "Branch is $REF"
|
||||
echo "REF=$REF" >> $GITHUB_ENV
|
||||
|
||||
COMMIT_AUTHOR=$(gh api repos/${{ github.repository }}/commits/$SHA --jq .author.login) # .author.login might be null, but for dependabot it will always be there and cannot be spoofed in contrast to .commit.author.name
|
||||
echo "The author of the last commit is $COMMIT_AUTHOR"
|
||||
if [[ "$COMMIT_AUTHOR" == "dependabot[bot]" ]]; then
|
||||
echo "The last commit was made by dependabot"
|
||||
LAST_COMMIT_IS_BY_DEPENDABOT=true
|
||||
else
|
||||
echo "The last commit was made by $COMMIT_AUTHOR not by dependabot"
|
||||
LAST_COMMIT_IS_BY_DEPENDABOT=false
|
||||
fi
|
||||
echo "LAST_COMMIT_IS_BY_DEPENDABOT=$LAST_COMMIT_IS_BY_DEPENDABOT" >> $GITHUB_ENV
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
shell: bash
|
||||
- name: Check if the last commit's message ends in "--regenerate-exemptions"
|
||||
run: |
|
||||
# Get commit message
|
||||
COMMIT_MESSAGE=$(git log -1 --pretty=format:"%s")
|
||||
if [[ "$COMMIT_MESSAGE" == *"--regenerate-exemptions" ]]; then
|
||||
echo "The last commit message ends in --regenerate-exemptions"
|
||||
REGEN_EXEMP=true
|
||||
else
|
||||
echo "The last commit message does not end in --regenerate-exemptions"
|
||||
REGEN_EXEMP=false
|
||||
fi
|
||||
echo "REGEN_EXEMP=$REGEN_EXEMP" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- name: Check if the CI run happens in the context of a dependabot PR # Even if a PR is created by dependabot, the last commit can, and often should be, the regeneration of the cargo vet exemptions. It could also be from an individual making manual changes.
|
||||
run: |
|
||||
IN_DEPENDABOT_PR_CONTEXT="false"
|
||||
if [[ $IS_PR == "true" && "${{ github.event.pull_request.user.login }}" == "dependabot[bot]" ]]; then
|
||||
IN_DEPENDABOT_PR_CONTEXT="true"
|
||||
echo "This CI run is in the context of PR by dependabot."
|
||||
else
|
||||
echo "This CI run is NOT in the context of PR by dependabot."
|
||||
IN_DEPENDABOT_PR_CONTEXT="false"
|
||||
fi
|
||||
echo "IN_DEPENDABOT_PR_CONTEXT=$IN_DEPENDABOT_PR_CONTEXT" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
- uses: actions/checkout@v4
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CI_BOT_PAT }}
|
||||
- name: In case of a dependabot PR, ensure that we are not in a detached HEAD state
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true'
|
||||
run: |
|
||||
git fetch origin $REF # ensure that we are up to date.
|
||||
git switch $REF # ensure that we are NOT in a detached HEAD state. This is important for the commit action in the end
|
||||
shell: bash
|
||||
- name: Regenerate cargo vet exemptions if we are in the context of a PR created by dependabot and the last commit is by dependabot or a regeneration of cargo vet exemptions was explicitly requested.
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true' && (env.LAST_COMMIT_IS_BY_DEPENDABOT == 'true' || env.REGEN_EXEMP=='true') # Run only for Dependabot PRs or if specifically requested
|
||||
run: cargo vet regenerate exemptions
|
||||
- name: Commit and push changes if we are in the context of a PR created by dependabot and the last commit is by dependabot or a regeneration of cargo vet exemptions was explicitly requested.
|
||||
if: env.IN_DEPENDABOT_PR_CONTEXT == 'true' && (env.LAST_COMMIT_IS_BY_DEPENDABOT == 'true' || env.REGEN_EXEMP=='true')
|
||||
uses: stefanzweifel/git-auto-commit-action@v6
|
||||
with:
|
||||
commit_message: Regenerate cargo vet exemptions
|
||||
commit_user_name: rosenpass-ci-bot[bot]
|
||||
commit_user_email: noreply@rosenpass.eu
|
||||
commit_author: Rosenpass CI Bot <noreply@rosenpass.eu>
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.CI_BOT_PAT }}
|
||||
- name: Invoke cargo-vet
|
||||
run: cargo vet --locked
|
||||
|
||||
35
Cargo.lock
generated
35
Cargo.lock
generated
@@ -408,9 +408,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
|
||||
|
||||
[[package]]
|
||||
name = "clap_mangen"
|
||||
version = "0.2.24"
|
||||
version = "0.2.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbae9cbfdc5d4fa8711c09bd7b83f644cb48281ac35bf97af3e47b0675864bdf"
|
||||
checksum = "27b4c3c54b30f0d9adcb47f25f61fcce35c4dd8916638c6b82fbd5f4fb4179e2"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"roff",
|
||||
@@ -1153,6 +1153,17 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipc-channel"
|
||||
version = "0.18.3"
|
||||
@@ -1246,9 +1257,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.169"
|
||||
version = "0.2.174"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
|
||||
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
|
||||
|
||||
[[package]]
|
||||
name = "libcrux"
|
||||
@@ -1408,7 +1419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2461,12 +2472,12 @@ checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.8"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
|
||||
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2630,20 +2641,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.44.2"
|
||||
version = "1.47.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48"
|
||||
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -48,7 +48,7 @@ rand = "0.8.5"
|
||||
typenum = "1.17.0"
|
||||
log = { version = "0.4.22" }
|
||||
clap = { version = "4.5.23", features = ["derive"] }
|
||||
clap_mangen = "0.2.24"
|
||||
clap_mangen = "0.2.29"
|
||||
clap_complete = "4.5.40"
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
arbitrary = { version = "1.4.1", features = ["derive"] }
|
||||
@@ -67,7 +67,7 @@ chacha20poly1305 = { version = "0.10.1", default-features = false, features = [
|
||||
zerocopy = { version = "0.7.35", features = ["derive"] }
|
||||
home = "=0.5.9" # 5.11 requires rustc 1.81
|
||||
derive_builder = "0.20.1"
|
||||
tokio = { version = "1.42", features = ["macros", "rt-multi-thread"] }
|
||||
tokio = { version = "1.46", features = ["macros", "rt-multi-thread"] }
|
||||
postcard = { version = "1.1.1", features = ["alloc"] }
|
||||
libcrux = { version = "0.0.2-pre.2" }
|
||||
libcrux-chacha20poly1305 = { version = "0.0.2-beta.3" }
|
||||
|
||||
@@ -88,6 +88,18 @@ set verboseCompleted=VERBOSE.
|
||||
#define SES_EV(...)
|
||||
#endif
|
||||
|
||||
#if COOKIE_EVENTS
|
||||
#define COOKIE_EV(...) __VA_ARGS__
|
||||
#else
|
||||
#define COOKIE_EV(...)
|
||||
#endif
|
||||
|
||||
#if KEM_EVENTS
|
||||
#define KEM_EV(...) __VA_ARGS__
|
||||
#else
|
||||
#define KEM_EV(...)
|
||||
#endif
|
||||
|
||||
|
||||
(* TODO: Authentication timing properties *)
|
||||
(* TODO: Proof that every adversary submitted package is equivalent to one generated by the proper algorithm using different coins. This probably requires introducing an oracle that extracts the coins used and explicitly adding the notion of coins used for Packet->Packet steps and an inductive RNG notion. *)
|
||||
|
||||
8
marzipan/README.md
Normal file
8
marzipan/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Rewriting analyze.sh in Python
|
||||
|
||||
* `../analyze.sh` is the old script
|
||||
* `src/__init__.py` is the new script
|
||||
|
||||
* call the old script from the Rosenpass repository's root directory with `./analyze.sh`
|
||||
* call the new script from the marzipan directory:
|
||||
* `nix run .# -- analyze $repo` where `$repo` is the absolute(?) path to the root directory of the Rosenpass repository.
|
||||
66
marzipan/TODO.md
Normal file
66
marzipan/TODO.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# TODO for the project of rewriting Marzipan
|
||||
|
||||
## Done
|
||||
|
||||
* ~~figure out why ProVerif is started on the non-processed mpv file~~
|
||||
* ~~rework rebound warnings (`clean_warnings` Bash function)~~
|
||||
```bash
|
||||
rosenpass$ rosenpass-marzipan run-proverif target/proverif/03_identity_hiding_responder.entry.o.pv target/proverif/03_identity_hiding_responder.entry.log
|
||||
```
|
||||
* ~~provide log parameter to `rosenpass-marzipan`-call~~ (no, it was intentionally not used)
|
||||
* ~~cpp pre-processing stuff~~
|
||||
* ~~awk pre-processing stuff~~
|
||||
* ~~`pretty_output` Bash function~~
|
||||
* ~~pretty_output_line~~
|
||||
* ~~click function intervention weirdness~~
|
||||
* ~~why is everything red in the pretty output? (see line 96 in __init__.py)~~
|
||||
* ~~awk RESULT flush in marzipan()~~
|
||||
* ~~move the whole metaverif function to Python~~
|
||||
* ~move the whole analyze function to Python~
|
||||
* ~find the files~
|
||||
* ~start subprocesses in parallel~
|
||||
* ~wait for them to finish~
|
||||
* ~~rebase from main~~
|
||||
* ~~see if we still need the `extra_args is None` check in `_run_proverif`~`
|
||||
* ~~set colors differently to prevent injection attack~~
|
||||
* ~~by calling a function~~
|
||||
* ~~by prepared statements~~
|
||||
* ~~standalone function parse_result_line is no longer necessary~~
|
||||
* ~~is the clean function still necessary?~~
|
||||
* ~~implement better main function for click~~
|
||||
* ~~why does analyze fail when the target/proverif directory is not empty?~~
|
||||
* ~~return an exit status that is meaningful for CI~~
|
||||
* ~~exception handling in analyze() and in run_proverif()~~
|
||||
* ~~refactor filtering in run_proverif (see karo's comment)~~
|
||||
* ~configurable target directory~
|
||||
* ~lark parser: multiline comments, how???~
|
||||
* ~parse errors~
|
||||
* ~error when trying with: `nix run .# -- parse ../target/proverif/01_secrecy.entry.i.pv`~
|
||||
* ~`in(C, Cinit_conf(Ssskm, Spsk, Sspkt, ic));`~
|
||||
* ~ ^~
|
||||
* ~04_dos… has a syntax error (see below)~
|
||||
* ~~rewrite marzipan.awk into Python/LARK~~
|
||||
* ~~define a LARK grammar for marzipan.awk rules~~
|
||||
* ~~write python code for processing marzipan rules, e.g. alias replacement (step: i.pv->o.pv)~~
|
||||
|
||||
## Next Steps
|
||||
|
||||
* integrate marzipan.awk into Python, somehow
|
||||
* options term special cases (c.f. manual page 133, starting with "fun" term)
|
||||
* complete with CryptoVerif options
|
||||
* do not assume that the repo path has subdir marzipan
|
||||
* do not assume that the repo path has subdir analysis
|
||||
* rewrite cpp into Python/LARK (step: mpv->i.pv)
|
||||
* integrate the Nix flake into the main Nix flake
|
||||
* pull the gawk dependency into the Nix flake
|
||||
* think about next steps
|
||||
* integrate this upstream, into the CI?
|
||||
* “make it beautiful” steps? more resiliency to working directory?
|
||||
* rewrite our awk usages into Python/…?
|
||||
* yes, possibly as extension to the LARK grammar
|
||||
* and rewrite the AST within Python
|
||||
* reconstruct ProVerif input file for ProVerif
|
||||
* rewrite our CPP usages into Python/…?
|
||||
* low priority: nested comments in ProVerif code
|
||||
|
||||
“it replaces the Bash script and is idiomatic Python code”
|
||||
190
marzipan/flake.lock
generated
Normal file
190
marzipan/flake.lock
generated
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1726560853,
|
||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-github-actions": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"poetry2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1729742964,
|
||||
"narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"rev": "e04df33f62cdcf93d73e9a04142464753a16db67",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736166416,
|
||||
"narHash": "sha256-U47xeACNBpkSO6IcCm0XvahsVXpJXzjPIQG7TZlOToU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b30f97d8c32d804d2d832ee837d0f1ca0695faa5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730157240,
|
||||
"narHash": "sha256-P8wF4ag6Srmpb/gwskYpnIsnspbjZlRvu47iN527ABQ=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "75e28c029ef2605f9841e0baa335d70065fe7ae2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"poetry2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-github-actions": "nix-github-actions",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems_3",
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736280331,
|
||||
"narHash": "sha256-mkVHnky9h/s2EA+t9eEC8qxgcNTE3V+vb/9XgG4fCig=",
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"rev": "4d260d908f3d95fa4b3ef6a98781ff64e1eede22",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"poetry2nix": "poetry2nix"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"poetry2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1730120726,
|
||||
"narHash": "sha256-LqHYIxMrl/1p3/kvm2ir925tZ8DkI0KA10djk8wecSk=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "9ef337e492a5555d8e17a51c911ff1f02635be15",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
18
marzipan/flake.nix
Normal file
18
marzipan/flake.nix
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
inputs.poetry2nix.url = "github:nix-community/poetry2nix";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
outputs = (inputs:
|
||||
let scoped = (scope: scope.result);
|
||||
in scoped rec {
|
||||
inherit (builtins) removeAttrs;
|
||||
|
||||
result = (import ./nix/init.nix) {
|
||||
scoped = scoped;
|
||||
flake.self = inputs.self;
|
||||
flake.inputs = removeAttrs inputs ["self"];
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
1220
marzipan/nix/hyuga/poetry.lock
generated
Normal file
1220
marzipan/nix/hyuga/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
16
marzipan/nix/hyuga/pyproject.toml
Normal file
16
marzipan/nix/hyuga/pyproject.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[tool.poetry]
|
||||
name = "hyuga-language-server-installer"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = []
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.12,<3.13"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
hyuga = "^1.0.0"
|
||||
poetry = "^2.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
32
marzipan/nix/init.nix
Normal file
32
marzipan/nix/init.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
outer_ctx: outer_ctx.scoped rec {
|
||||
inherit (builtins) trace;
|
||||
|
||||
ctx = outer_ctx // { inherit config; };
|
||||
|
||||
inherit (ctx) scoped;
|
||||
|
||||
inherit (ctx.flake.inputs) nixpkgs flake-utils;
|
||||
inherit (nixpkgs.lib) genAttrs zipAttrsWith;
|
||||
inherit (nixpkgs.lib.debug) traceVal;
|
||||
inherit (flake-utils.lib) allSystems eachSystem;
|
||||
|
||||
result = {
|
||||
devShells = eachSupportedSystem (system: (setupSystem system).devShells);
|
||||
packages = eachSupportedSystem (system: (setupSystem system).packages);
|
||||
apps = eachSupportedSystem (system: (setupSystem system).apps);
|
||||
};
|
||||
|
||||
setupSystem = (system_name: scoped rec {
|
||||
result = (import ./system.nix) (ctx // {
|
||||
system.name = system_name;
|
||||
system.pkgs = nixpkgs.legacyPackages.${system_name};
|
||||
});
|
||||
});
|
||||
|
||||
config = {
|
||||
supportedSystems = allSystems;
|
||||
poetry.projectDir = ctx.flake.self;
|
||||
};
|
||||
|
||||
eachSupportedSystem = genAttrs config.supportedSystems;
|
||||
}
|
||||
47
marzipan/nix/system.nix
Normal file
47
marzipan/nix/system.nix
Normal file
@@ -0,0 +1,47 @@
|
||||
ctx: ctx.scoped rec {
|
||||
inherit (ctx.system) pkgs;
|
||||
inherit (ctx.flake.inputs) poetry2nix flake-utils;
|
||||
inherit (pkgs) mkShellNoCC writeShellApplication;
|
||||
inherit (flake-utils.lib) mkApp;
|
||||
|
||||
poetryCtx = poetry2nix.lib.mkPoetry2Nix { inherit pkgs; };
|
||||
inherit (poetryCtx) mkPoetryEnv mkPoetryApplication;
|
||||
|
||||
deps = [poetryEnv];
|
||||
dev-deps = []
|
||||
++ deps
|
||||
++ [poetryHyugaEnv]
|
||||
++ (with pkgs; [poetry]);
|
||||
|
||||
poetryCfg = ctx.config.poetry // { overrides = poetryOverrides; };
|
||||
poetryEnv = mkPoetryEnv poetryCfg;
|
||||
|
||||
poetryHyugaCfg = poetryCfg // { projectDir = ./hyuga; };
|
||||
poetryHyugaEnv = mkPoetryEnv poetryHyugaCfg;
|
||||
|
||||
poetryOverrides = poetryCtx.defaultPoetryOverrides.extend (final: prev: {
|
||||
hyuga = prev.hyuga.overridePythonAttrs (old: {
|
||||
buildInputs = []
|
||||
++ (old.buildInputs or [ ])
|
||||
++ [ final.poetry-core ];
|
||||
preferWheel = true;
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
result.packages.default = mkPoetryApplication poetryCfg;
|
||||
result.devShells.default = mkShellNoCC {
|
||||
packages = dev-deps;
|
||||
};
|
||||
|
||||
result.apps.replPython = mkShellApp "python-repl" ''python'';
|
||||
result.apps.replHy = mkShellApp "hy-repl" ''hy'';
|
||||
|
||||
mkShellApp = (name: script: mkApp {
|
||||
drv = writeShellApplication {
|
||||
inherit name;
|
||||
text = script;
|
||||
runtimeInputs = dev-deps;
|
||||
};
|
||||
});
|
||||
}
|
||||
1415
marzipan/poetry.lock
generated
Normal file
1415
marzipan/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
31
marzipan/pyproject.toml
Normal file
31
marzipan/pyproject.toml
Normal file
@@ -0,0 +1,31 @@
|
||||
[tool.poetry]
|
||||
name = "rosenpass-marzipan"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Author Name <author@example.com>"]
|
||||
# readme = "README.md"
|
||||
# license = "BSD"
|
||||
packages = [
|
||||
{ include = "**/*.[hp]y", from = "src", to = "rosenpass_marzipan" },
|
||||
{ include = "**/*.sh", from = "src", to = "rosenpass_marzipan" },
|
||||
#{ include = "**/*.lark", from = "src", to = "rosenpass_marzipan" },
|
||||
]
|
||||
|
||||
[tool.poetry.scripts]
|
||||
rosenpass-marzipan = 'rosenpass_marzipan:main'
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.12,<3.13"
|
||||
hy = "^1.0.0"
|
||||
lark = "^1.2.2"
|
||||
hyrule = "^0.8.0"
|
||||
ipython = "^8.32.0"
|
||||
click = "^8.1.8"
|
||||
rich = "^13.9.4"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
poetry = "^2.0.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
314
marzipan/src/__init__.py
Normal file
314
marzipan/src/__init__.py
Normal file
@@ -0,0 +1,314 @@
|
||||
# from rich.console import Console
|
||||
import click
|
||||
|
||||
from .parser import *
|
||||
from .util import export, pkgs, rename, setup_exports
|
||||
|
||||
target_subdir = "target/proverif"
|
||||
|
||||
(__all__, export) = setup_exports()
|
||||
export(setup_exports)
|
||||
|
||||
|
||||
console = pkgs.rich.console.Console()
|
||||
logger = pkgs.logging.getLogger(__name__)
|
||||
|
||||
|
||||
def set_logging_logrecordfactory(filename):
|
||||
old_factory = pkgs.logging.getLogRecordFactory()
|
||||
|
||||
def record_factory(*args, **kwargs):
|
||||
record = old_factory(*args, **kwargs)
|
||||
record.filename = filename
|
||||
return record
|
||||
|
||||
pkgs.logging.setLogRecordFactory(record_factory)
|
||||
|
||||
|
||||
# def set_logging_format(max_length):
|
||||
# pass
|
||||
# #format_str = "{levelname:<8} {filename:<" + str(max_length + 2) + "} {message}"
|
||||
# #pkgs.logging.basicConfig(level=pkgs.logging.DEBUG, style="{", format=format_str)
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pkgs.logging.basicConfig(level=pkgs.logging.DEBUG)
|
||||
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, **{"file": pkgs.sys.stderr, **kwargs})
|
||||
|
||||
|
||||
def exc(argv, **kwargs):
|
||||
eprint("$", *argv)
|
||||
command = pkgs.subprocess.run(argv, **kwargs)
|
||||
|
||||
if command.returncode != 0:
|
||||
logger.error("subprocess with terminated with non-zero return code.")
|
||||
eprint("", *argv)
|
||||
exit(command.returncode)
|
||||
|
||||
if command.stdout is not None:
|
||||
return command.stdout.decode("utf-8")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def exc_piped(argv, **kwargs):
|
||||
eprint("$", *argv)
|
||||
return pkgs.subprocess.Popen(argv, **kwargs)
|
||||
|
||||
|
||||
def clean_line(prev_line, line):
|
||||
line = line.rstrip()
|
||||
if pkgs.re.match(r"^Warning: identifier \w+ rebound.$", line) or prev_line is None:
|
||||
return None
|
||||
return prev_line
|
||||
|
||||
|
||||
def run_proverif(file, log_file, extra_args=[]):
|
||||
params = ["proverif", "-test", *extra_args, file]
|
||||
logger.debug(params)
|
||||
|
||||
process = exc_piped(
|
||||
params,
|
||||
stderr=pkgs.subprocess.PIPE,
|
||||
stdout=pkgs.subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
)
|
||||
try:
|
||||
prev_line = None
|
||||
with open(log_file, "a") as log:
|
||||
for line in process.stdout:
|
||||
log.write(line)
|
||||
cleaned_line = clean_line(prev_line, line)
|
||||
prev_line = line
|
||||
if cleaned_line is not None:
|
||||
yield cleaned_line
|
||||
if prev_line is not None:
|
||||
yield prev_line
|
||||
|
||||
except Exception as e:
|
||||
# When does this happen? Should the error even be ignored? Metaverif should probably just abort here, right? --karo
|
||||
logger.error(f"Proverif generated an exception with {params}: {e}")
|
||||
exit(1)
|
||||
finally:
|
||||
process.stdout.close()
|
||||
return_code = process.wait()
|
||||
|
||||
if return_code != 0:
|
||||
logger.error(
|
||||
f"Proverif exited with a non-zero error code {params}: {return_code}"
|
||||
)
|
||||
if prev_line is not None:
|
||||
logger.error(f"Last line of log file {log_file}:")
|
||||
logger.error(f"> {prev_line.rstrip()}")
|
||||
exit(return_code)
|
||||
|
||||
|
||||
def cpp(file, cpp_prep):
|
||||
logger.debug(f"_cpp: {file}, {cpp_prep}")
|
||||
file_path = pkgs.pathlib.Path(file)
|
||||
|
||||
dirname = file_path.parent
|
||||
cwd = pkgs.pathlib.Path.cwd()
|
||||
|
||||
params = ["cpp", "-P", f"-I{dirname}", file, "-o", cpp_prep]
|
||||
return exc(params, stderr=pkgs.sys.stderr)
|
||||
|
||||
|
||||
def awk(repo_path, cpp_prep, awk_prep):
|
||||
params = [
|
||||
"awk",
|
||||
"-f",
|
||||
str(pkgs.os.path.join(repo_path, "marzipan/marzipan.awk")),
|
||||
cpp_prep,
|
||||
]
|
||||
with open(awk_prep, "w") as file:
|
||||
exc(params, stderr=pkgs.sys.stderr, stdout=file)
|
||||
file.write("\nprocess main")
|
||||
|
||||
|
||||
def pretty_output_line(prefix, mark, color, text):
|
||||
content = f"{mark} {text}"
|
||||
console.print(prefix, style="grey42", end="", no_wrap=True)
|
||||
console.print(content, style=color)
|
||||
|
||||
|
||||
def pretty_output_init(file_path):
|
||||
expected = []
|
||||
descs = []
|
||||
|
||||
with open(file_path, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Process lemmas first
|
||||
result = pkgs.re.findall(r"@(lemma)(?=\s+\"([^\"]*)\")", content)
|
||||
if result:
|
||||
# The regex only returns lemmas. For lemmas, we always expect the result 'true' from ProVerif.
|
||||
expected.extend([True for _ in range(len(result))])
|
||||
descs.extend([e[1] for e in result])
|
||||
|
||||
# Then process regular queries
|
||||
result = pkgs.re.findall(r'@(query|reachable)(?=\s+"[^\"]*")', content)
|
||||
if result:
|
||||
# For queries, we expect 'true' from ProVerif, for reachable, we expect 'false'.
|
||||
expected.extend([e == "@query" for e in result])
|
||||
reachable_result = pkgs.re.findall(
|
||||
r'@(query|reachable)\s+"([^\"]*)"', content
|
||||
)
|
||||
descs.extend([e[1] for e in reachable_result])
|
||||
|
||||
ta = pkgs.time.time()
|
||||
res = 0
|
||||
ctr = 0
|
||||
return (ta, res, ctr, expected, descs)
|
||||
|
||||
|
||||
def pretty_output_step(file_path, line, expected, descs, res, ctr, ta):
|
||||
tz = pkgs.time.time()
|
||||
|
||||
# Output from ProVerif contains a trailing newline, which we do not have in the expected output. Remove it for meaningful matching.
|
||||
outp_clean_raw = line.rstrip()
|
||||
if outp_clean_raw == "true":
|
||||
outp_clean = True
|
||||
elif outp_clean_raw == "false":
|
||||
outp_clean = False
|
||||
else:
|
||||
outp_clean = outp_clean_raw
|
||||
|
||||
if outp_clean == expected[ctr]:
|
||||
pretty_output_line(f"{int(tz - ta)}s ", "✔", "green", descs[ctr])
|
||||
else:
|
||||
res = 1
|
||||
pretty_output_line(f"{int(tz - ta)}s ", "✖", "red", descs[ctr])
|
||||
|
||||
ctr += 1
|
||||
ta = tz
|
||||
|
||||
return (res, ctr, ta)
|
||||
|
||||
|
||||
def pretty_output(file_path):
|
||||
(ta, res, ctr, expected, descs) = pretty_output_init(file_path)
|
||||
for line in pkgs.sys.stdin:
|
||||
(res, ctr, ta) = pretty_output_step(
|
||||
file_path, line, expected, descs, res, ctr, ta
|
||||
)
|
||||
|
||||
|
||||
def get_target_dir(path, output):
|
||||
if output is not None and not output == "":
|
||||
return pkgs.pathlib.Path(output)
|
||||
else:
|
||||
return pkgs.os.path.join(path, target_subdir)
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option("--output", "output", required=False)
|
||||
@click.argument("repo_path")
|
||||
def analyze(repo_path, output):
|
||||
target_dir = get_target_dir(repo_path, output)
|
||||
pkgs.os.makedirs(target_dir, exist_ok=True)
|
||||
|
||||
entries = []
|
||||
analysis_dir = pkgs.os.path.join(repo_path, "analysis")
|
||||
full_paths = sorted(pkgs.glob.glob(str(analysis_dir) + "/*.entry.mpv"))
|
||||
entries.extend(full_paths)
|
||||
|
||||
# modelnames = [pkgs.os.path.basename(path).replace('.entry.mpv', '') for path in full_paths]
|
||||
# max_length = max(len(modelname) for modelname in modelnames) if modelnames else 0
|
||||
# set_logging_format(max_length)
|
||||
|
||||
with pkgs.concurrent.futures.ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(metaverif, repo_path, target_dir, entry): entry
|
||||
for entry in entries
|
||||
}
|
||||
for future in pkgs.concurrent.futures.as_completed(futures):
|
||||
cmd = futures[future]
|
||||
logger.info(f"Metaverif {cmd} finished.")
|
||||
|
||||
print("all processes finished.")
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option("--output", "output", required=False)
|
||||
@click.argument("repo_path")
|
||||
def clean(repo_path, output):
|
||||
cleans_failed = 0
|
||||
target_dir = get_target_dir(repo_path, output)
|
||||
if pkgs.os.path.isdir(target_dir):
|
||||
for filename in pkgs.os.listdir(target_dir):
|
||||
file_path = pkgs.os.path.join(target_dir, filename)
|
||||
if pkgs.os.path.isfile(file_path) and pkgs.os.path.splitext(file_path)[
|
||||
1
|
||||
] in [".pv", ".log"]:
|
||||
try:
|
||||
pkgs.os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error deleting {file_path}: {str(e)}")
|
||||
cleans_failed += 1
|
||||
|
||||
if cleans_failed > 0:
|
||||
print(f"{cleans_failed} could not be deleted.")
|
||||
exit(1)
|
||||
|
||||
|
||||
def metaverif(repo_path, tmpdir, file):
|
||||
# Extract the name using regex
|
||||
name_match = pkgs.re.search(r"([^/]*)(?=\.entry\.mpv)", file)
|
||||
if name_match:
|
||||
name = name_match.group(0) # Get the matched name
|
||||
|
||||
set_logging_logrecordfactory(name)
|
||||
logger.info(f"Start metaverif on {file}")
|
||||
|
||||
# Create the file paths
|
||||
cpp_prep = pkgs.os.path.join(tmpdir, f"{name}.i.pv")
|
||||
awk_prep = pkgs.os.path.join(tmpdir, f"{name}.o.pv")
|
||||
|
||||
# Output the results
|
||||
print(f"Name: {name}")
|
||||
print(f"CPP Prep Path: {cpp_prep}")
|
||||
print(f"AWK Prep Path: {awk_prep}")
|
||||
|
||||
cpp(file, cpp_prep)
|
||||
parse_main(cpp_prep, awk_prep)
|
||||
# awk(repo_path, cpp_prep, awk_prep)
|
||||
|
||||
log_file = pkgs.os.path.join(tmpdir, f"{name}.log")
|
||||
|
||||
ta, res, ctr, expected, descs = pretty_output_init(cpp_prep)
|
||||
with open(log_file, "a") as log:
|
||||
generator = run_proverif(awk_prep, log_file)
|
||||
for line in generator:
|
||||
# parse-result-line:
|
||||
match = pkgs.re.search(r"^RESULT .* \b(true|false)\b\.$", line)
|
||||
if match:
|
||||
result = match.group(1)
|
||||
# pretty-output:
|
||||
res, ctr, ta = pretty_output_step(
|
||||
cpp_prep, result, expected, descs, res, ctr, ta
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"No match found for the filename {file}: extension should be .mpv"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.argument("i_path")
|
||||
@click.argument("o_path")
|
||||
def parse(i_path, o_path):
|
||||
try:
|
||||
parse_main(i_path, o_path)
|
||||
except pkgs.lark.exceptions.UnexpectedCharacters as e:
|
||||
logger.error(f"Error {type(e).__name__} parsing {i_path}: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
104
marzipan/src/analyze.sh
Executable file
104
marzipan/src/analyze.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
exc() {
|
||||
echo >&2 "\$" "$@"
|
||||
"$@"
|
||||
}
|
||||
|
||||
run_proverif() {
|
||||
local file; file="$1"; shift
|
||||
local log; log="$1"; shift # intentionally unused
|
||||
|
||||
exc rosenpass-marzipan run-proverif "${file}" "${@}"
|
||||
}
|
||||
|
||||
clean_warnings() {
|
||||
exc rosenpass-marzipan clean-warnings
|
||||
}
|
||||
|
||||
color_red='red'
|
||||
color_green='green'
|
||||
color_gray='gray'
|
||||
color_clear=''
|
||||
|
||||
checkmark="✔"
|
||||
cross="❌"
|
||||
|
||||
pretty_output() {
|
||||
exc rosenpass-marzipan pretty-output "${@}"
|
||||
}
|
||||
|
||||
metaverif() {
|
||||
local file; file="$1"; shift
|
||||
local name; name="$(echo "${file}" | grep -Po '[^/]*(?=\.mpv)')"
|
||||
|
||||
local cpp_prep; cpp_prep="${tmpdir}/${name}.i.pv"
|
||||
local awk_prep; awk_prep="${tmpdir}/${name}.o.pv"
|
||||
|
||||
exc rosenpass-marzipan cpp ${file} ${cpp_prep}
|
||||
exc rosenpass-marzipan awk-prep ${cpp_prep} ${awk_prep}
|
||||
|
||||
local log; log="${tmpdir}/${name}.log"
|
||||
{
|
||||
run_proverif "${awk_prep}" "$@" \
|
||||
| clean_warnings \
|
||||
| tee "${log}" \
|
||||
| exc rosenpass-marzipan parse-result-line \
|
||||
| pretty_output "${cpp_prep}"
|
||||
} || {
|
||||
echo "TODO: Commented out some debug output"
|
||||
#if ! grep -q "^Verification summary" "${log}"; then
|
||||
# echo -ne "\033[0\r"
|
||||
# cat "${log}"
|
||||
#fi
|
||||
}
|
||||
}
|
||||
|
||||
analyze() {
|
||||
mkdir -p "${tmpdir}"
|
||||
|
||||
entries=()
|
||||
readarray -t -O "${#entries[@]}" entries < <(
|
||||
find analysis -iname '*.entry.mpv' | sort)
|
||||
|
||||
local entry
|
||||
local procs; procs=()
|
||||
for entry in "${entries[@]}"; do
|
||||
echo "call metaverif"
|
||||
# TODO: commented out for testing
|
||||
#exc rosenpass-marzipan metaverif "${tmpdir}" "${entry}" >&2 & procs+=("$!")
|
||||
exc rosenpass-marzipan metaverif "${tmpdir}" "${entry}" >&2
|
||||
done
|
||||
|
||||
# TODO: commented out for testing
|
||||
# for entry in "${procs[@]}"; do
|
||||
# exc wait -f "${entry}"
|
||||
# done
|
||||
}
|
||||
|
||||
err_usage() {
|
||||
echo >&1 "USAGE: ${0} analyze PATH"
|
||||
echo >&1 "The script will cd into PATH and continue there."
|
||||
exit 1
|
||||
}
|
||||
|
||||
main() {
|
||||
set -e -o pipefail
|
||||
|
||||
local cmd="$1"; shift || err_usage
|
||||
local dir="$1"; shift || err_usage
|
||||
|
||||
cd -- "${dir}"
|
||||
tmpdir="target/proverif"
|
||||
|
||||
echo "call main"
|
||||
|
||||
case "${cmd}" in
|
||||
analyze) analyze ;;
|
||||
clean_warnings) clean_warnings ;;
|
||||
*) err_usage
|
||||
esac
|
||||
}
|
||||
|
||||
# Do not execute main if sourced
|
||||
(return 0 2>/dev/null) || main "$@"
|
||||
7
marzipan/src/grammar/_cryptoverif.lark
Normal file
7
marzipan/src/grammar/_cryptoverif.lark
Normal file
@@ -0,0 +1,7 @@
|
||||
# TODO: finish defining these (comes from Cryptoverif)
|
||||
#param_decl: "param" _non_empty_seq{IDENT} options "."
|
||||
#proba_decl: "proba" IDENT ["(...)"] options "."
|
||||
#letproba_decl: "letproba" IDENT ["(...)"] "= ..." "."
|
||||
#proof_decl: "proof" "{" proof "}"
|
||||
#def_decl: "def" IDENT "(" _maybe_empty_seq{typeid} ")" "{" decl* "}"
|
||||
#expand_decl: "expand" IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
10
marzipan/src/grammar/_main.lark
Normal file
10
marzipan/src/grammar/_main.lark
Normal file
@@ -0,0 +1,10 @@
|
||||
start: decl* PROCESS process
|
||||
%import .common.*
|
||||
%import .process.*
|
||||
%import .term.*
|
||||
%import .decl.*
|
||||
%import .query.*
|
||||
|
||||
%import common (WORD, DIGIT, NUMBER, WS) // imports from terminal library
|
||||
%ignore WS // Disregard spaces in text
|
||||
%ignore COMMENT
|
||||
111
marzipan/src/grammar/common.lark
Normal file
111
marzipan/src/grammar/common.lark
Normal file
@@ -0,0 +1,111 @@
|
||||
PROCESS: "process"
|
||||
EQUIVALENCE: "equivalence"
|
||||
YIELD: "yield"
|
||||
channel: CHANNEL
|
||||
CHANNEL: "channel"
|
||||
IDENT: /[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/
|
||||
ZERO: "0"
|
||||
INFIX: "||"
|
||||
| "&&"
|
||||
| "="
|
||||
| "<>"
|
||||
| "<="
|
||||
| ">="
|
||||
| "<"
|
||||
| ">"
|
||||
typeid: channel
|
||||
| IDENT
|
||||
_non_empty_seq{x}: x ("," x)*
|
||||
_maybe_empty_seq{x}: [ _non_empty_seq{x} ]
|
||||
|
||||
OPTIONS_FUN_CONST: "data" | "private" | "typeConverter"
|
||||
OPTIONS_FUN: OPTIONS_FUN_CONST
|
||||
OPTIONS_CONST: OPTIONS_FUN_CONST
|
||||
OPTIONS_FREE_REDUC: "private"
|
||||
OPTIONS_PRED: "memberOptim" | "block"
|
||||
OPTIONS_PROCESS: "precise"
|
||||
OPTIONS_QUERY_LEMMA_AXIOM: "noneSat" | "discardSat" | "instantiateSat" | "fullSat" | "noneVerif" | "discardVerif" | "instantiateVerif" | "fullVerif"
|
||||
OPTIONS_AXIOM: OPTIONS_QUERY_LEMMA_AXIOM
|
||||
OPTIONS_QUERY_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "induction" | "noInduction"
|
||||
OPTIONS_LEMMA: OPTIONS_QUERY_LEMMA | "maxSubset"
|
||||
OPTIONS_QUERY: OPTIONS_QUERY_LEMMA | "proveAll"
|
||||
OPTIONS_QUERY_SECRET: "reachability" | "pv_reachability" | "real_or_random" | "pv_real_or_random" | "/cv_[a-zA-Z0-9À-ÿ'_]*/"
|
||||
OPTIONS_RESTRICTION: "removeEvents" | "keepEvents" | "keep" # transl_option_lemma_query in pitsyntax.ml
|
||||
OPTIONS_EQUATION: "convergent" | "linear" # check_equations in pitsyntax.ml
|
||||
OPTIONS_TYPE: "fixed" | "bounded" | "large" | "nonuniform" | "password" | "size" NAT | "size" NAT "_" NAT | "pcoll" NAT | "small" | "ghost" # from Page 22 in CryptoVerif reference manual
|
||||
options{idents}: [ "[" _non_empty_seq{idents} "]" ]
|
||||
BOOL : "true" | "false"
|
||||
NONE: "none"
|
||||
FULL: "full"
|
||||
ALL: "all"
|
||||
FUNC: IDENT
|
||||
ignoretype_options: BOOL | ALL | NONE | "attacker"
|
||||
boolean_settings_names: "privateCommOnPublicTerms"
|
||||
| "rejectChoiceTrueFalse"
|
||||
| "rejectNoSimplif"
|
||||
| "allowDiffPatterns"
|
||||
| "inductionQueries"
|
||||
| "inductionLemmas"
|
||||
| "movenew"
|
||||
| "movelet"
|
||||
| "stopTerm"
|
||||
| "removeEventsForLemma"
|
||||
| "simpEqAll"
|
||||
| "eqInNames"
|
||||
| "preciseLetExpand"
|
||||
| "expandSimplifyIfCst"
|
||||
| "featureFuns"
|
||||
| "featureNames"
|
||||
| "featurePredicates"
|
||||
| "featureEvents"
|
||||
| "featureTables"
|
||||
| "featureDepth"
|
||||
| "featureWidth"
|
||||
| "simplifyDerivation"
|
||||
| "abbreviateDerivation"
|
||||
| "explainDerivation"
|
||||
| "unifyDerivation"
|
||||
| "reconstructDerivation"
|
||||
| "displayDerivation"
|
||||
| "traceBacktracking"
|
||||
| "interactiveSwapping"
|
||||
| "color"
|
||||
| "verboseLemmas"
|
||||
| "abbreviateClauses"
|
||||
| "removeUselessClausesBeforeDisplay"
|
||||
| "verboseEq"
|
||||
| "verboseDestructors"
|
||||
| "verboseTerm"
|
||||
| "verboseStatistics"
|
||||
| "verboseRules"
|
||||
| "verboseBase"
|
||||
| "verboseRedundant"
|
||||
| "verboseCompleted"
|
||||
| "verboseGoalReachable"
|
||||
|
||||
_decl_pair{name, value}: "set" name "=" value "."
|
||||
|
||||
set_settings_boolean_decl: _decl_pair{boolean_settings_names, BOOL}
|
||||
|
||||
ignore_types_values: BOOL | "all" | "none" | "attacker"
|
||||
simplify_process_values: BOOL | "interactive"
|
||||
precise_actions_values: BOOL | "trueWithoutArgsInNames"
|
||||
redundant_hyp_elim_values: BOOL | "beginOnly"
|
||||
reconstruct_trace_values: BOOL | NAT | "yes" | "no" # TODO(blipp): check whether yes/no is always allowed for BOOL
|
||||
attacker_values: "active" | "passive"
|
||||
key_compromise_values: "none" | "approx" | "strict"
|
||||
predicates_implementable: "check" | "nocheck"
|
||||
application_values: "instantiate" | "full" | "none" | "discard"
|
||||
max_values: "none" | "n"
|
||||
sel_fun_values: "TermMaxsize" | "Term"| "NounifsetMaxsize" | "Nounifset"
|
||||
redundancy_elim_values: "best" | "simple" | "no"
|
||||
nounif_ignore_a_few_times_values: "none" | "auto" | "all"
|
||||
nounif_ignore_ntimes_values: "n"
|
||||
trace_display_values: "short" | "long" | "none"
|
||||
verbose_clauses_values: "none" | "explained" | "short"
|
||||
INT: NAT | "-" NAT
|
||||
COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
|
||||
%import common (WORD, DIGIT, NUMBER, WS, ESCAPED_STRING) // imports from terminal library
|
||||
%ignore WS // Disregard spaces in text
|
||||
%ignore COMMENT
|
||||
101
marzipan/src/grammar/decl.lark
Normal file
101
marzipan/src/grammar/decl.lark
Normal file
@@ -0,0 +1,101 @@
|
||||
decl: type_decl
|
||||
| channel_decl
|
||||
| free_decl
|
||||
| const_decl
|
||||
| fun_decl
|
||||
| letfun_decl
|
||||
| reduc_decl
|
||||
| fun_reduc_decl
|
||||
| equation_decl
|
||||
| pred_decl
|
||||
| table_decl
|
||||
| let_decl
|
||||
| set_settings_decl
|
||||
| event_decl
|
||||
| query_decl
|
||||
| axiom_decl
|
||||
| restriction_decl
|
||||
| lemma_decl
|
||||
| noninterf_decl
|
||||
| weaksecret_decl
|
||||
| not_decl
|
||||
| select_decl
|
||||
| noselect_decl
|
||||
| nounif_decl
|
||||
| elimtrue_decl
|
||||
| clauses_decl
|
||||
| module_decl
|
||||
#| param_decl
|
||||
#| proba_decl
|
||||
#| letproba_decl
|
||||
#| proof_decl
|
||||
#| def_decl
|
||||
#| expand_decl
|
||||
|
||||
type_decl: "type" IDENT "."
|
||||
#type_decl: "type" IDENT options{OPTIONS_TYPE} "."
|
||||
channel_decl: "channel" _non_empty_seq{IDENT} "."
|
||||
free_decl: "free" _non_empty_seq{IDENT} ":" typeid "."
|
||||
#free_decl: "free" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FREE_REDUC} "."
|
||||
const_decl: "const" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
fun_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
letfun_decl: "letfun" IDENT [ "(" [ typedecl ] ")" ] "=" pterm "."
|
||||
reduc_decl: "reduc" eqlist options{OPTIONS_FREE_REDUC} "."
|
||||
fun_reduc_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid "reduc" mayfailreduc options{OPTIONS_FUN_CONST} "."
|
||||
equation_decl: "equation" eqlist options{OPTIONS_EQUATION} "."
|
||||
pred_decl: "pred" IDENT [ "(" [ _maybe_empty_seq{typeid} ] ")" ] options{OPTIONS_PRED} "."
|
||||
table_decl: "table" IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
let_decl: "let" IDENT [ "(" [ typedecl ] ")" ] "=" process "."
|
||||
|
||||
set_settings_decl: set_settings_boolean_decl
|
||||
| _decl_pair{"ignoreTypes", ignore_types_values}
|
||||
| _decl_pair{"simplifyProcess", simplify_process_values}
|
||||
| _decl_pair{"preciseActions", precise_actions_values}
|
||||
| _decl_pair{"redundantHypElim", redundant_hyp_elim_values}
|
||||
| _decl_pair{"reconstructTrace", reconstruct_trace_values}
|
||||
| _decl_pair{"attacker", attacker_values}
|
||||
| _decl_pair{"keyCompromise", key_compromise_values}
|
||||
| _decl_pair{"predicatesImplementable", predicates_implementable}
|
||||
| _decl_pair{"saturationApplication", application_values}
|
||||
| _decl_pair{"verificationApplication", application_values}
|
||||
| _decl_pair{"maxDepth", max_values}
|
||||
| _decl_pair{"maxHyp", max_values}
|
||||
| _decl_pair{"selFun", sel_fun_values}
|
||||
| _decl_pair{"redundancyElim", redundancy_elim_values}
|
||||
| _decl_pair{"nounifIgnoreAFewTimes", nounif_ignore_a_few_times_values}
|
||||
| _decl_pair{"nounifIgnoreNtimes", nounif_ignore_ntimes_values}
|
||||
| _decl_pair{"traceDisplay", trace_display_values}
|
||||
| _decl_pair{"verboseClauses", verbose_clauses_values}
|
||||
| set_strategy
|
||||
| set_symb_order
|
||||
|
||||
_swap_strategy_seq{x}: x ("->" x)*
|
||||
set_strategy: "set" "swapping" "=" _swap_strategy_seq{TAG} "."
|
||||
_symb_ord_seq{x}: x (">" x)*
|
||||
set_symb_order: "set" "symbOrder" "=" _symb_ord_seq{FUNC} "."
|
||||
|
||||
event_decl: "event" IDENT ["(" _maybe_empty_seq{typeid} ")"] "."
|
||||
select_decl: "select" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
noselect_decl: "noselect" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
nounif_decl: "nounif" [ typedecl ";"] nounifdecl [ "/" INT ] [ "["_non_empty_seq{nounifoption} "]" ] "."
|
||||
|
||||
elimtrue_decl: "elimtrue" [ failtypedecl ";" ] term "."
|
||||
clauses_decl: "clauses" clauses "."
|
||||
|
||||
module_decl: "@module" " " IDENT
|
||||
|
||||
noninterf_decl: "noninterf" [ typedecl ";"] _maybe_empty_seq{nidecl} "."
|
||||
weaksecret_decl: "weaksecret" IDENT "."
|
||||
|
||||
nidecl: IDENT [ "among" "(" _non_empty_seq{term} ")" ]
|
||||
equality: term "=" term
|
||||
| "let" IDENT "=" term "in" equality
|
||||
mayfailequality: IDENT mayfailterm_seq "=" mayfailterm
|
||||
eqlist: [ "forall" typedecl ";" ] equality [ ";" eqlist ]
|
||||
clause: term
|
||||
| term "->" term
|
||||
| term "<->" term
|
||||
| term "<=>" term
|
||||
clauses: [ "forall" failtypedecl ";" ] clause [ ";" clauses ]
|
||||
mayfailreduc: [ "forall" failtypedecl ";" ] mayfailequality [ "otherwise" mayfailreduc ]
|
||||
NAT: DIGIT+
|
||||
6
marzipan/src/grammar/lemma.lark
Normal file
6
marzipan/src/grammar/lemma.lark
Normal file
@@ -0,0 +1,6 @@
|
||||
lemma: gterm [";" lemma]
|
||||
| gterm "for" "{" "public_vars" _non_empty_seq{IDENT} "}" [";" lemma]
|
||||
| gterm "for" "{" "secret" IDENT [ "public_vars" _non_empty_seq{IDENT}] "[real_or_random]" "}" [";" lemma]
|
||||
axiom_decl: "axiom" [ typedecl ";"] lemma options{OPTIONS_AXIOM} "."
|
||||
restriction_decl: "restriction" [ typedecl ";"] lemma options{OPTIONS_RESTRICTION} "."
|
||||
lemma_decl: "lemma" [ typedecl ";"] lemma options{OPTIONS_LEMMA} "."
|
||||
45
marzipan/src/grammar/process.lark
Normal file
45
marzipan/src/grammar/process.lark
Normal file
@@ -0,0 +1,45 @@
|
||||
start: decl* PROCESS process | decl* EQUIVALENCE process process
|
||||
process: ZERO
|
||||
| YIELD
|
||||
| IDENT [ "(" _maybe_empty_seq{pterm} ")" ]
|
||||
| bracketed_process
|
||||
| piped_process
|
||||
| replicated_process
|
||||
| replicated_process_bounds
|
||||
| sample_process
|
||||
| if_process
|
||||
| in_process
|
||||
| out_process
|
||||
| let_process
|
||||
| insert_process
|
||||
| get_process
|
||||
| event_process
|
||||
| phase
|
||||
| sync
|
||||
bracketed_process: "(" process ")"
|
||||
piped_process: process "|" process
|
||||
replicated_process: "!" process
|
||||
replicated_process_bounds: "!" IDENT "<=" IDENT process
|
||||
| "foreach" IDENT "<=" IDENT "do" process
|
||||
sample_process: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" process]
|
||||
| IDENT "<-R" typeid [";" process]
|
||||
let_process: "let" pattern "=" pterm ["in" process [ "else" process ]]
|
||||
| IDENT [":" typeid] "<-" pterm [";" process]
|
||||
| "let" typedecl "suchthat" pterm options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
if_process: "if" pterm "then" process [ "else" process ]
|
||||
in_process: "in" "(" pterm "," pattern ")" options{OPTIONS_PROCESS} [ ";" process ]
|
||||
get_process: "get" IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
out_process: "out" "(" pterm "," pterm ")" [ ";" process ]
|
||||
insert_process: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" [ ";" process ]
|
||||
event_process: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] [ ";" process ]
|
||||
term: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{term} ")"
|
||||
| IDENT "(" _maybe_empty_seq{term} ")"
|
||||
| term ( "+" | "-" ) NAT
|
||||
| NAT "+" term
|
||||
| term INFIX term
|
||||
| "not" "(" term ")"
|
||||
|
||||
phase: "phase" NAT [";" process]
|
||||
sync: "sync" NAT ["[" TAG "]"] [";" process]
|
||||
24
marzipan/src/grammar/query.lark
Normal file
24
marzipan/src/grammar/query.lark
Normal file
@@ -0,0 +1,24 @@
|
||||
query: gterm ["public_vars" _non_empty_seq{IDENT}] [";" query]
|
||||
| "secret" IDENT ["public_vars" _non_empty_seq{IDENT}] options{OPTIONS_QUERY_SECRET} [";" query]
|
||||
| "putbegin" "event" ":" _non_empty_seq{IDENT} [";" query] // Opportunistically left a space between "event" and ":", ProVerif might not accept it with spaces.
|
||||
| "putbegin" "inj-event" ":" _non_empty_seq{IDENT} [";" query]
|
||||
nounifdecl: "let" IDENT "=" gformat "in" nounifdecl
|
||||
| IDENT ["(" _maybe_empty_seq{gformat} ")" ["phase" NAT]]
|
||||
gformat: IDENT
|
||||
| "*" IDENT
|
||||
| IDENT "(" _maybe_empty_seq{gformat} ")"
|
||||
| "choice" "[" gformat "," gformat "]"
|
||||
| "not" "(" _maybe_empty_seq{gformat} ")"
|
||||
| "new" IDENT [ "[" [ fbinding ] "]" ]
|
||||
| "let" IDENT "=" gformat "in" gformat
|
||||
fbinding: "!" NAT "=" gformat [";" fbinding]
|
||||
| IDENT "=" gformat [";" fbinding]
|
||||
nounifoption: "hypothesis"
|
||||
| "conclusion"
|
||||
| "ignoreAFewTimes"
|
||||
| "inductionOn" "=" IDENT
|
||||
| "inductionOn" "=" "{" _non_empty_seq{IDENT} "}"
|
||||
|
||||
query_decl: "query" [ typedecl ";"] query options{OPTIONS_QUERY} "."
|
||||
not_decl: "not" [ typedecl ";"] gterm "."
|
||||
TAG: IDENT
|
||||
67
marzipan/src/grammar/term.lark
Normal file
67
marzipan/src/grammar/term.lark
Normal file
@@ -0,0 +1,67 @@
|
||||
gterm: ident_gterm
|
||||
| fun_gterm
|
||||
| choice_gterm
|
||||
| infix_gterm
|
||||
| arith_gterm
|
||||
| arith2_gterm
|
||||
| event_gterm
|
||||
| injevent_gterm
|
||||
| implies_gterm
|
||||
| paren_gterm
|
||||
| sample_gterm
|
||||
| let_gterm
|
||||
ident_gterm: IDENT
|
||||
fun_gterm: IDENT "(" _maybe_empty_seq{gterm} ")" ["phase" NAT] ["@" IDENT]
|
||||
choice_gterm: "choice" "[" gterm "," gterm "]"
|
||||
infix_gterm: gterm INFIX gterm
|
||||
arith_gterm: gterm ( "+" | "-" ) NAT
|
||||
arith2_gterm: NAT "+" gterm
|
||||
event_gterm: "event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
injevent_gterm: "inj-event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
implies_gterm: gterm "==>" gterm
|
||||
paren_gterm: "(" _maybe_empty_seq{gterm} ")"
|
||||
sample_gterm: "new" IDENT [ "[" [ gbinding ] "]" ]
|
||||
let_gterm: "let" IDENT "=" gterm "in" gterm
|
||||
|
||||
gbinding: "!" NAT "=" gterm [";" gbinding]
|
||||
| IDENT "=" gterm [";" gbinding]
|
||||
|
||||
pterm: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{pterm} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pterm} ")"
|
||||
| choice_pterm
|
||||
| pterm ("+" | "-") NAT
|
||||
| NAT "+" pterm
|
||||
| pterm INFIX pterm
|
||||
| not_pterm
|
||||
| sample_pterm
|
||||
| if_pterm
|
||||
| let_pterm
|
||||
| insert_pterm
|
||||
| get_pterm
|
||||
| event_pterm
|
||||
choice_pterm: "choice[" pterm "," pterm "]"
|
||||
if_pterm: "if" pterm "then" pterm [ "else" pterm ]
|
||||
not_pterm: "not" "(" pterm ")"
|
||||
let_pterm: "let" pattern "=" pterm "in" pterm [ "else" pterm ]
|
||||
| IDENT [":" typeid] "<-" pterm ";" pterm
|
||||
| "let" typedecl "suchthat" pterm "in" pterm [ "else" pterm ]
|
||||
sample_pterm: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" pterm]
|
||||
| IDENT "<-R" typeid [";" pterm]
|
||||
insert_pterm: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" ";" pterm
|
||||
event_pterm: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] ";" pterm
|
||||
get_pterm: "get" IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" pterm [ "else" pterm ] ]
|
||||
pattern: IDENT [":" typeid]
|
||||
| "_" [ ":" typeid ]
|
||||
| NAT
|
||||
| pattern "+" NAT
|
||||
| NAT "+" pattern
|
||||
| "(" _maybe_empty_seq{pattern} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pattern} ")"
|
||||
| "=" pterm
|
||||
mayfailterm: term
|
||||
| "fail"
|
||||
mayfailterm_seq: "(" _non_empty_seq{mayfailterm} ")"
|
||||
typedecl: _non_empty_seq{IDENT} ":" typeid [ "," typedecl ]
|
||||
failtypedecl: _non_empty_seq{IDENT} ":" typeid [ "or fail" ] [ "," failtypedecl ]
|
||||
362
marzipan/src/grammars/awk_proverif.lark
Normal file
362
marzipan/src/grammars/awk_proverif.lark
Normal file
@@ -0,0 +1,362 @@
|
||||
|
||||
QUERY: "@query"
|
||||
REACHABLE: "@reachable"
|
||||
LEMMA: "@lemma"
|
||||
PROCESS: "process"
|
||||
EQUIVALENCE: "equivalence"
|
||||
YIELD: "yield"
|
||||
channel: CHANNEL
|
||||
CHANNEL: "channel"
|
||||
IDENT: /[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/
|
||||
ZERO: "0"
|
||||
INFIX: "||"
|
||||
| "&&"
|
||||
| "="
|
||||
| "<>"
|
||||
| "<="
|
||||
| ">="
|
||||
| "<"
|
||||
| ">"
|
||||
typeid: channel
|
||||
| IDENT
|
||||
_non_empty_seq{x}: x ("," x)*
|
||||
_maybe_empty_seq{x}: [ _non_empty_seq{x} ]
|
||||
|
||||
OPTIONS_FUN_CONST: "data" | "private" | "typeConverter"
|
||||
OPTIONS_FUN: OPTIONS_FUN_CONST
|
||||
OPTIONS_CONST: OPTIONS_FUN_CONST
|
||||
OPTIONS_FREE_REDUC: "private"
|
||||
OPTIONS_PRED: "memberOptim" | "block"
|
||||
OPTIONS_PROCESS: "precise"
|
||||
OPTIONS_QUERY_LEMMA_AXIOM: "noneSat" | "discardSat" | "instantiateSat" | "fullSat" | "noneVerif" | "discardVerif" | "instantiateVerif" | "fullVerif"
|
||||
OPTIONS_AXIOM: OPTIONS_QUERY_LEMMA_AXIOM
|
||||
OPTIONS_QUERY_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "induction" | "noInduction"
|
||||
OPTIONS_LEMMA: OPTIONS_QUERY_LEMMA | "maxSubset"
|
||||
OPTIONS_QUERY: OPTIONS_QUERY_LEMMA | "proveAll"
|
||||
OPTIONS_QUERY_SECRET: "reachability" | "pv_reachability" | "real_or_random" | "pv_real_or_random" | "/cv_[a-zA-Z0-9À-ÿ'_]*/"
|
||||
OPTIONS_RESTRICTION: "removeEvents" | "keepEvents" | "keep" # transl_option_lemma_query in pitsyntax.ml
|
||||
OPTIONS_EQUATION: "convergent" | "linear" # check_equations in pitsyntax.ml
|
||||
OPTIONS_TYPE: "fixed" | "bounded" | "large" | "nonuniform" | "password" | "size" NAT | "size" NAT "_" NAT | "pcoll" NAT | "small" | "ghost" # from Page 22 in CryptoVerif reference manual
|
||||
options{idents}: [ "[" _non_empty_seq{idents} "]" ]
|
||||
BOOL : "true" | "false"
|
||||
NONE: "none"
|
||||
FULL: "full"
|
||||
ALL: "all"
|
||||
FUNC: IDENT
|
||||
ignoretype_options: BOOL | ALL | NONE | "attacker"
|
||||
boolean_settings_names: "privateCommOnPublicTerms"
|
||||
| "rejectChoiceTrueFalse"
|
||||
| "rejectNoSimplif"
|
||||
| "allowDiffPatterns"
|
||||
| "inductionQueries"
|
||||
| "inductionLemmas"
|
||||
| "movenew"
|
||||
| "movelet"
|
||||
| "stopTerm"
|
||||
| "removeEventsForLemma"
|
||||
| "simpEqAll"
|
||||
| "eqInNames"
|
||||
| "preciseLetExpand"
|
||||
| "expandSimplifyIfCst"
|
||||
| "featureFuns"
|
||||
| "featureNames"
|
||||
| "featurePredicates"
|
||||
| "featureEvents"
|
||||
| "featureTables"
|
||||
| "featureDepth"
|
||||
| "featureWidth"
|
||||
| "simplifyDerivation"
|
||||
| "abbreviateDerivation"
|
||||
| "explainDerivation"
|
||||
| "unifyDerivation"
|
||||
| "reconstructDerivation"
|
||||
| "displayDerivation"
|
||||
| "traceBacktracking"
|
||||
| "interactiveSwapping"
|
||||
| "color"
|
||||
| "verboseLemmas"
|
||||
| "abbreviateClauses"
|
||||
| "removeUselessClausesBeforeDisplay"
|
||||
| "verboseEq"
|
||||
| "verboseDestructors"
|
||||
| "verboseTerm"
|
||||
| "verboseStatistics"
|
||||
| "verboseRules"
|
||||
| "verboseBase"
|
||||
| "verboseRedundant"
|
||||
| "verboseCompleted"
|
||||
| "verboseGoalReachable"
|
||||
|
||||
_decl_pair{name, value}: "set" name "=" value "."
|
||||
|
||||
set_settings_boolean_decl: _decl_pair{boolean_settings_names, BOOL}
|
||||
|
||||
ignore_types_values: BOOL | "all" | "none" | "attacker"
|
||||
simplify_process_values: BOOL | "interactive"
|
||||
precise_actions_values: BOOL | "trueWithoutArgsInNames"
|
||||
redundant_hyp_elim_values: BOOL | "beginOnly"
|
||||
reconstruct_trace_values: BOOL | NAT | "yes" | "no" # TODO(blipp): check whether yes/no is always allowed for BOOL
|
||||
attacker_values: "active" | "passive"
|
||||
key_compromise_values: "none" | "approx" | "strict"
|
||||
predicates_implementable: "check" | "nocheck"
|
||||
application_values: "instantiate" | "full" | "none" | "discard"
|
||||
max_values: "none" | "n"
|
||||
sel_fun_values: "TermMaxsize" | "Term"| "NounifsetMaxsize" | "Nounifset"
|
||||
redundancy_elim_values: "best" | "simple" | "no"
|
||||
nounif_ignore_a_few_times_values: "none" | "auto" | "all"
|
||||
nounif_ignore_ntimes_values: "n"
|
||||
trace_display_values: "short" | "long" | "none"
|
||||
verbose_clauses_values: "none" | "explained" | "short"
|
||||
INT: NAT | "-" NAT
|
||||
COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
|
||||
%import common (WORD, DIGIT, NUMBER, WS, ESCAPED_STRING) // imports from terminal library
|
||||
%ignore WS // Disregard spaces in text
|
||||
%ignore COMMENT
|
||||
decl: type_decl
|
||||
| channel_decl
|
||||
| free_decl
|
||||
| const_decl
|
||||
| fun_decl
|
||||
| letfun_decl
|
||||
| reduc_decl
|
||||
| fun_reduc_decl
|
||||
| equation_decl
|
||||
| pred_decl
|
||||
| table_decl
|
||||
| let_decl
|
||||
| set_settings_decl
|
||||
| event_decl
|
||||
| query_decl
|
||||
| axiom_decl
|
||||
| restriction_decl
|
||||
| lemma_decl
|
||||
| noninterf_decl
|
||||
| weaksecret_decl
|
||||
| not_decl
|
||||
| select_decl
|
||||
| noselect_decl
|
||||
| nounif_decl
|
||||
| elimtrue_decl
|
||||
| clauses_decl
|
||||
| module_decl
|
||||
#| param_decl
|
||||
#| proba_decl
|
||||
#| letproba_decl
|
||||
#| proof_decl
|
||||
#| def_decl
|
||||
#| expand_decl
|
||||
|
||||
type_decl: "type" IDENT "."
|
||||
#type_decl: "type" IDENT options{OPTIONS_TYPE} "."
|
||||
channel_decl: "channel" _non_empty_seq{IDENT} "."
|
||||
free_decl: "free" _non_empty_seq{IDENT} ":" typeid "."
|
||||
#free_decl: "free" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FREE_REDUC} "."
|
||||
const_decl: "const" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
fun_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
letfun_decl: "letfun" IDENT [ "(" [ typedecl ] ")" ] "=" pterm "."
|
||||
reduc_decl: "reduc" eqlist options{OPTIONS_FREE_REDUC} "."
|
||||
fun_reduc_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid "reduc" mayfailreduc options{OPTIONS_FUN_CONST} "."
|
||||
equation_decl: "equation" eqlist options{OPTIONS_EQUATION} "."
|
||||
pred_decl: "pred" IDENT [ "(" [ _maybe_empty_seq{typeid} ] ")" ] options{OPTIONS_PRED} "."
|
||||
table_decl: "table" IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
let_decl: "let" IDENT [ "(" [ typedecl ] ")" ] "=" process "."
|
||||
|
||||
set_settings_decl: set_settings_boolean_decl
|
||||
| _decl_pair{"ignoreTypes", ignore_types_values}
|
||||
| _decl_pair{"simplifyProcess", simplify_process_values}
|
||||
| _decl_pair{"preciseActions", precise_actions_values}
|
||||
| _decl_pair{"redundantHypElim", redundant_hyp_elim_values}
|
||||
| _decl_pair{"reconstructTrace", reconstruct_trace_values}
|
||||
| _decl_pair{"attacker", attacker_values}
|
||||
| _decl_pair{"keyCompromise", key_compromise_values}
|
||||
| _decl_pair{"predicatesImplementable", predicates_implementable}
|
||||
| _decl_pair{"saturationApplication", application_values}
|
||||
| _decl_pair{"verificationApplication", application_values}
|
||||
| _decl_pair{"maxDepth", max_values}
|
||||
| _decl_pair{"maxHyp", max_values}
|
||||
| _decl_pair{"selFun", sel_fun_values}
|
||||
| _decl_pair{"redundancyElim", redundancy_elim_values}
|
||||
| _decl_pair{"nounifIgnoreAFewTimes", nounif_ignore_a_few_times_values}
|
||||
| _decl_pair{"nounifIgnoreNtimes", nounif_ignore_ntimes_values}
|
||||
| _decl_pair{"traceDisplay", trace_display_values}
|
||||
| _decl_pair{"verboseClauses", verbose_clauses_values}
|
||||
| set_strategy
|
||||
| set_symb_order
|
||||
|
||||
_swap_strategy_seq{x}: x ("->" x)*
|
||||
set_strategy: "set" "swapping" "=" _swap_strategy_seq{TAG} "."
|
||||
_symb_ord_seq{x}: x (">" x)*
|
||||
set_symb_order: "set" "symbOrder" "=" _symb_ord_seq{FUNC} "."
|
||||
|
||||
event_decl: "event" IDENT ["(" _maybe_empty_seq{typeid} ")"] "."
|
||||
select_decl: "select" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
noselect_decl: "noselect" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
nounif_decl: "nounif" [ typedecl ";"] nounifdecl [ "/" INT ] [ "["_non_empty_seq{nounifoption} "]" ] "."
|
||||
|
||||
elimtrue_decl: "elimtrue" [ failtypedecl ";" ] term "."
|
||||
clauses_decl: "clauses" clauses "."
|
||||
|
||||
module_decl: "@module" " " IDENT
|
||||
|
||||
noninterf_decl: "noninterf" [ typedecl ";"] _maybe_empty_seq{nidecl} "."
|
||||
weaksecret_decl: "weaksecret" IDENT "."
|
||||
|
||||
nidecl: IDENT [ "among" "(" _non_empty_seq{term} ")" ]
|
||||
equality: term "=" term
|
||||
| "let" IDENT "=" term "in" equality
|
||||
mayfailequality: IDENT mayfailterm_seq "=" mayfailterm
|
||||
eqlist: [ "forall" typedecl ";" ] equality [ ";" eqlist ]
|
||||
clause: term
|
||||
| term "->" term
|
||||
| term "<->" term
|
||||
| term "<=>" term
|
||||
clauses: [ "forall" failtypedecl ";" ] clause [ ";" clauses ]
|
||||
mayfailreduc: [ "forall" failtypedecl ";" ] mayfailequality [ "otherwise" mayfailreduc ]
|
||||
NAT: DIGIT+
|
||||
start: decl* PROCESS process | decl* EQUIVALENCE process process
|
||||
process: ZERO
|
||||
| YIELD
|
||||
| IDENT [ "(" _maybe_empty_seq{pterm} ")" ]
|
||||
| bracketed_process
|
||||
| piped_process
|
||||
| replicated_process
|
||||
| replicated_process_bounds
|
||||
| sample_process
|
||||
| if_process
|
||||
| in_process
|
||||
| out_process
|
||||
| let_process
|
||||
| insert_process
|
||||
| get_process
|
||||
| event_process
|
||||
| phase
|
||||
| sync
|
||||
bracketed_process: "(" process ")"
|
||||
piped_process: process "|" process
|
||||
replicated_process: "!" process
|
||||
replicated_process_bounds: "!" IDENT "<=" IDENT process
|
||||
| "foreach" IDENT "<=" IDENT "do" process
|
||||
sample_process: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" process]
|
||||
| IDENT "<-R" typeid [";" process]
|
||||
let_process: "let" pattern "=" pterm ["in" process [ "else" process ]]
|
||||
| IDENT [":" typeid] "<-" pterm [";" process]
|
||||
| "let" typedecl "suchthat" pterm options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
if_process: "if" pterm "then" process [ "else" process ]
|
||||
in_process: "in" "(" pterm "," pattern ")" options{OPTIONS_PROCESS} [ ";" process ]
|
||||
get_process: "get" IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
out_process: "out" "(" pterm "," pterm ")" [ ";" process ]
|
||||
insert_process: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" [ ";" process ]
|
||||
event_process: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] [ ";" process ]
|
||||
term: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{term} ")"
|
||||
| IDENT "(" _maybe_empty_seq{term} ")"
|
||||
| term ( "+" | "-" ) NAT
|
||||
| NAT "+" term
|
||||
| term INFIX term
|
||||
| "not" "(" term ")"
|
||||
|
||||
phase: "phase" NAT [";" process]
|
||||
sync: "sync" NAT ["[" TAG "]"] [";" process]
|
||||
query: gterm ["public_vars" _non_empty_seq{IDENT}] [";" query]
|
||||
| "secret" IDENT ["public_vars" _non_empty_seq{IDENT}] options{OPTIONS_QUERY_SECRET} [";" query]
|
||||
| "putbegin" "event" ":" _non_empty_seq{IDENT} [";" query] // Opportunistically left a space between "event" and ":", ProVerif might not accept it with spaces.
|
||||
| "putbegin" "inj-event" ":" _non_empty_seq{IDENT} [";" query]
|
||||
nounifdecl: "let" IDENT "=" gformat "in" nounifdecl
|
||||
| IDENT ["(" _maybe_empty_seq{gformat} ")" ["phase" NAT]]
|
||||
gformat: IDENT
|
||||
| "*" IDENT
|
||||
| IDENT "(" _maybe_empty_seq{gformat} ")"
|
||||
| "choice" "[" gformat "," gformat "]"
|
||||
| "not" "(" _maybe_empty_seq{gformat} ")"
|
||||
| "new" IDENT [ "[" [ fbinding ] "]" ]
|
||||
| "let" IDENT "=" gformat "in" gformat
|
||||
fbinding: "!" NAT "=" gformat [";" fbinding]
|
||||
| IDENT "=" gformat [";" fbinding]
|
||||
nounifoption: "hypothesis"
|
||||
| "conclusion"
|
||||
| "ignoreAFewTimes"
|
||||
| "inductionOn" "=" IDENT
|
||||
| "inductionOn" "=" "{" _non_empty_seq{IDENT} "}"
|
||||
|
||||
query_annotation: [(REACHABLE|QUERY) ESCAPED_STRING]
|
||||
query_decl: query_annotation query_decl_core
|
||||
query_decl_core: "query" [ typedecl ";"] query options{OPTIONS_QUERY} "."
|
||||
not_decl: "not" [ typedecl ";"] gterm "."
|
||||
TAG: IDENT
|
||||
lemma: gterm [";" lemma]
|
||||
| gterm "for" "{" "public_vars" _non_empty_seq{IDENT} "}" [";" lemma]
|
||||
| gterm "for" "{" "secret" IDENT [ "public_vars" _non_empty_seq{IDENT}] "[real_or_random]" "}" [";" lemma]
|
||||
axiom_decl: "axiom" [ typedecl ";"] lemma options{OPTIONS_AXIOM} "."
|
||||
restriction_decl: "restriction" [ typedecl ";"] lemma options{OPTIONS_RESTRICTION} "."
|
||||
lemma_annotation: [LEMMA ESCAPED_STRING]
|
||||
lemma_decl: lemma_annotation lemma_decl_core
|
||||
lemma_decl_core: "lemma" [ typedecl ";"] lemma options{OPTIONS_LEMMA} "."
|
||||
gterm: ident_gterm
|
||||
| fun_gterm
|
||||
| choice_gterm
|
||||
| infix_gterm
|
||||
| arith_gterm
|
||||
| arith2_gterm
|
||||
| event_gterm
|
||||
| injevent_gterm
|
||||
| implies_gterm
|
||||
| paren_gterm
|
||||
| sample_gterm
|
||||
| let_gterm
|
||||
ident_gterm: IDENT
|
||||
fun_gterm: IDENT "(" _maybe_empty_seq{gterm} ")" ["phase" NAT] ["@" IDENT]
|
||||
choice_gterm: "choice" "[" gterm "," gterm "]"
|
||||
infix_gterm: gterm INFIX gterm
|
||||
arith_gterm: gterm ( "+" | "-" ) NAT
|
||||
arith2_gterm: NAT "+" gterm
|
||||
event_gterm: "event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
injevent_gterm: "inj-event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
implies_gterm: gterm "==>" gterm
|
||||
paren_gterm: "(" _maybe_empty_seq{gterm} ")"
|
||||
sample_gterm: "new" IDENT [ "[" [ gbinding ] "]" ]
|
||||
let_gterm: "let" IDENT "=" gterm "in" gterm
|
||||
|
||||
gbinding: "!" NAT "=" gterm [";" gbinding]
|
||||
| IDENT "=" gterm [";" gbinding]
|
||||
|
||||
pterm: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{pterm} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pterm} ")"
|
||||
| choice_pterm
|
||||
| pterm ("+" | "-") NAT
|
||||
| NAT "+" pterm
|
||||
| pterm INFIX pterm
|
||||
| not_pterm
|
||||
| sample_pterm
|
||||
| if_pterm
|
||||
| let_pterm
|
||||
| insert_pterm
|
||||
| get_pterm
|
||||
| event_pterm
|
||||
choice_pterm: "choice[" pterm "," pterm "]"
|
||||
if_pterm: "if" pterm "then" pterm [ "else" pterm ]
|
||||
not_pterm: "not" "(" pterm ")"
|
||||
let_pterm: "let" pattern "=" pterm "in" pterm [ "else" pterm ]
|
||||
| IDENT [":" typeid] "<-" pterm ";" pterm
|
||||
| "let" typedecl "suchthat" pterm "in" pterm [ "else" pterm ]
|
||||
sample_pterm: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" pterm]
|
||||
| IDENT "<-R" typeid [";" pterm]
|
||||
insert_pterm: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" ";" pterm
|
||||
event_pterm: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] ";" pterm
|
||||
get_pterm: "get" IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" pterm [ "else" pterm ] ]
|
||||
pattern: IDENT [":" typeid]
|
||||
| "_" [ ":" typeid ]
|
||||
| NAT
|
||||
| pattern "+" NAT
|
||||
| NAT "+" pattern
|
||||
| "(" _maybe_empty_seq{pattern} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pattern} ")"
|
||||
| "=" pterm
|
||||
mayfailterm: term
|
||||
| "fail"
|
||||
mayfailterm_seq: "(" _non_empty_seq{mayfailterm} ")"
|
||||
typedecl: _non_empty_seq{IDENT} ":" typeid [ "," typedecl ]
|
||||
failtypedecl: _non_empty_seq{IDENT} ":" typeid [ "or fail" ] [ "," failtypedecl ]
|
||||
47
marzipan/src/grammars/marzipan_awk.lark
Normal file
47
marzipan/src/grammars/marzipan_awk.lark
Normal file
@@ -0,0 +1,47 @@
|
||||
start: line*
|
||||
|
||||
line: directive_line NEWLINE
|
||||
| code_line NEWLINE
|
||||
| NEWLINE
|
||||
|
||||
directive_line: module_directive
|
||||
| alias_directive
|
||||
| long_alias_start
|
||||
| long_alias_end
|
||||
| query_directive
|
||||
| reachable_directive
|
||||
| lemma_directive
|
||||
|
||||
code_line: /[^\n]*/ // Anything not recognized as a directive
|
||||
|
||||
module_directive: MODULE NAME
|
||||
|
||||
alias_directive: ALIAS alias_pair (alias_pair)*
|
||||
|
||||
alias_pair: IDENT "=" TOKEN
|
||||
|
||||
long_alias_start: LONG_ALIAS IDENT
|
||||
long_alias_end: LONG_ALIAS_END
|
||||
|
||||
query_directive: QUERY quoted_string
|
||||
reachable_directive: REACHABLE quoted_string
|
||||
lemma_directive: LEMMA quoted_string
|
||||
|
||||
|
||||
quoted_string: ESCAPED_STRING
|
||||
|
||||
NAME: IDENT
|
||||
TOKEN: /[^ \t\n]+/
|
||||
IDENT: /[A-Za-z_][A-Za-z0-9_']*/
|
||||
MODULE: "@module"
|
||||
ALIAS: "@alias"
|
||||
LONG_ALIAS: "@long-alias"
|
||||
LONG_ALIAS_END: "@long-alias-end"
|
||||
QUERY: "@query"
|
||||
REACHABLE: "@reachable"
|
||||
LEMMA: "@lemma"
|
||||
NEWLINE: /\r?\n/
|
||||
|
||||
%import common.ESCAPED_STRING
|
||||
%import common.WS
|
||||
%ignore WS
|
||||
361
marzipan/src/grammars/proverif.lark
Normal file
361
marzipan/src/grammars/proverif.lark
Normal file
@@ -0,0 +1,361 @@
|
||||
PROCESS: "process"
|
||||
start: decl* PROCESS process
|
||||
YIELD: "yield"
|
||||
channel: CHANNEL
|
||||
CHANNEL: "channel"
|
||||
IDENT: /[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/
|
||||
ZERO: "0"
|
||||
INFIX: "||"
|
||||
| "&&"
|
||||
| "="
|
||||
| "<>"
|
||||
| "<="
|
||||
| ">="
|
||||
| "<"
|
||||
| ">"
|
||||
typeid: channel
|
||||
| IDENT
|
||||
_non_empty_seq{x}: x ("," x)*
|
||||
_maybe_empty_seq{x}: [ _non_empty_seq{x} ]
|
||||
|
||||
OPTIONS_FUN_CONST: "data" | "private" | "typeConverter"
|
||||
OPTIONS_FUN: OPTIONS_FUN_CONST
|
||||
OPTIONS_CONST: OPTIONS_FUN_CONST
|
||||
OPTIONS_FREE_REDUC: "private"
|
||||
OPTIONS_PRED: "memberOptim" | "block"
|
||||
OPTIONS_PROCESS: "precise"
|
||||
OPTIONS_QUERY_LEMMA_AXIOM: "noneSat" | "discardSat" | "instantiateSat" | "fullSat" | "noneVerif" | "discardVerif" | "instantiateVerif" | "fullVerif"
|
||||
OPTIONS_AXIOM: OPTIONS_QUERY_LEMMA_AXIOM
|
||||
OPTIONS_QUERY_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "induction" | "noInduction"
|
||||
OPTIONS_LEMMA: OPTIONS_QUERY_LEMMA_AXIOM | "maxSubset"
|
||||
OPTIONS_QUERY: OPTIONS_QUERY_LEMMA_AXIOM | "proveAll"
|
||||
OPTIONS_QUERY_SECRET: "reachability" | "pv_reachability" | "real_or_random" | "pv_real_or_random" | "/cv_[a-zA-Z0-9À-ÿ'_]*/"
|
||||
OPTIONS_RESTRICTION: "removeEvents" | "keepEvents" | "keep" # transl_option_lemma_query in pitsyntax.ml
|
||||
OPTIONS_EQUATION: "convergent" | "linear" # check_equations in pitsyntax.ml
|
||||
OPTIONS_TYPE: "fixed" | "bounded" # TODO(blipp): complete this. These are only for compatibility with CryptoVerif and are ignored
|
||||
options{idents}: [ "[" _non_empty_seq{idents} "]" ]
|
||||
process: ZERO
|
||||
| YIELD
|
||||
| IDENT [ "(" _maybe_empty_seq{pterm} ")" ]
|
||||
| bracketed_process
|
||||
| piped_process
|
||||
| replicated_process
|
||||
| replicated_process_bounds
|
||||
| sample_process
|
||||
| if_process
|
||||
| in_process
|
||||
| out_process
|
||||
| let_process
|
||||
| insert_process
|
||||
| get_process
|
||||
| event_process
|
||||
| phase
|
||||
| sync
|
||||
bracketed_process: "(" process ")"
|
||||
piped_process: process "|" process
|
||||
replicated_process: "!" process
|
||||
replicated_process_bounds: "!" IDENT "<=" IDENT process
|
||||
| "foreach" IDENT "<=" IDENT "do" process
|
||||
sample_process: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" process]
|
||||
| IDENT "<-R" typeid [";" process]
|
||||
let_process: "let" pattern "=" pterm ["in" process [ "else" process ]]
|
||||
| IDENT [":" typeid] "<-" pterm [";" process]
|
||||
| "let" typedecl "suchthat" pterm options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
if_process: "if" pterm "then" process [ "else" process ]
|
||||
in_process: "in" "(" pterm "," pattern ")" options{OPTIONS_PROCESS} [ ";" process ]
|
||||
get_process: IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" process [ "else" process ] ]
|
||||
out_process: "out" "(" pterm "," pterm ")" [ ";" process ]
|
||||
insert_process: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" [ ";" process ]
|
||||
event_process: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] [ ";" process ]
|
||||
term: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{term} ")"
|
||||
| IDENT "(" _maybe_empty_seq{term} ")"
|
||||
| term ( "+" | "-" ) NAT
|
||||
| NAT "+" term
|
||||
| term INFIX term
|
||||
| "not" "(" term ")"
|
||||
|
||||
query: gterm ["public_vars" _non_empty_seq{IDENT}] [";" query]
|
||||
| "secret" IDENT ["public_vars" _non_empty_seq{IDENT}] options{OPTIONS_QUERY_SECRET} [";" query]
|
||||
| "putbegin" "event" ":" _non_empty_seq{IDENT} [";" query] // Opportunistically left a space between "event" and ":", ProVerif might not accept it with spaces.
|
||||
| "putbegin" "inj-event" ":" _non_empty_seq{IDENT} [";" query]
|
||||
lemma: gterm [";" lemma]
|
||||
| gterm "for" "{" "public_vars" _non_empty_seq{IDENT} "}" [";" lemma]
|
||||
| gterm "for" "{" "secret" IDENT [ "public_vars" _non_empty_seq{IDENT}] "[real_or_random]" "}" [";" lemma]
|
||||
gterm: ident_gterm
|
||||
| fun_gterm
|
||||
| choice_gterm
|
||||
| infix_gterm
|
||||
| arith_gterm
|
||||
| arith2_gterm
|
||||
| event_gterm
|
||||
| injevent_gterm
|
||||
| implies_gterm
|
||||
| paren_gterm
|
||||
| sample_gterm
|
||||
| let_gterm
|
||||
ident_gterm: IDENT
|
||||
fun_gterm: IDENT "(" _maybe_empty_seq{gterm} ")" ["phase" NAT] ["@" IDENT]
|
||||
choice_gterm: "choice" "[" gterm "," gterm "]"
|
||||
infix_gterm: gterm INFIX gterm
|
||||
arith_gterm: gterm ( "+" | "-" ) NAT
|
||||
arith2_gterm: NAT "+" gterm
|
||||
event_gterm: "event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
injevent_gterm: "inj-event" "(" _maybe_empty_seq{gterm} ")" ["@" IDENT]
|
||||
implies_gterm: gterm "==>" gterm
|
||||
paren_gterm: "(" _maybe_empty_seq{gterm} ")"
|
||||
sample_gterm: "new" IDENT [ "[" [ gbinding ] "]" ]
|
||||
let_gterm: "let" IDENT "=" gterm "in" gterm
|
||||
|
||||
gbinding: "!" NAT "=" gterm [";" gbinding]
|
||||
| IDENT "=" gterm [";" gbinding]
|
||||
|
||||
nounifdecl: "let" IDENT "=" gformat "in" nounifdecl
|
||||
| IDENT ["(" _maybe_empty_seq{gformat} ")" ["phase" NAT]]
|
||||
gformat: IDENT
|
||||
| "*" IDENT
|
||||
| IDENT "(" _maybe_empty_seq{gformat} ")"
|
||||
| "choice" "[" gformat "," gformat "]"
|
||||
| "not" "(" _maybe_empty_seq{gformat} ")"
|
||||
| "new" IDENT [ "[" [ fbinding ] "]" ]
|
||||
| "let" IDENT "=" gformat "in" gformat
|
||||
fbinding: "!" NAT "=" gformat [";" fbinding]
|
||||
| IDENT "=" gformat [";" fbinding]
|
||||
nounifoption: "hypothesis"
|
||||
| "conclusion"
|
||||
| "ignoreAFewTimes"
|
||||
| "inductionOn" "=" IDENT
|
||||
| "inductionOn" "=" "{" _non_empty_seq{IDENT} "}"
|
||||
|
||||
pterm: IDENT
|
||||
| NAT
|
||||
| "(" _maybe_empty_seq{pterm} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pterm} ")"
|
||||
| choice_pterm
|
||||
| pterm ("+" | "-") NAT
|
||||
| NAT "+" pterm
|
||||
| pterm INFIX pterm
|
||||
| not_pterm
|
||||
| sample_pterm
|
||||
| if_pterm
|
||||
| let_pterm
|
||||
| insert_pterm
|
||||
| get_pterm
|
||||
| event_pterm
|
||||
choice_pterm: "choice[" pterm "," pterm "]"
|
||||
if_pterm: "if" pterm "then" pterm [ "else" pterm ]
|
||||
not_pterm: "not" "(" pterm ")"
|
||||
let_pterm: "let" pattern "=" pterm "in" pterm [ "else" pterm ]
|
||||
| IDENT [":" typeid] "<-" pterm ";" pterm
|
||||
| "let" typedecl "suchthat" pterm "in" pterm [ "else" pterm ]
|
||||
sample_pterm: "new" IDENT [ "[" _maybe_empty_seq{IDENT} "]" ] ":" typeid [";" pterm]
|
||||
| IDENT "<-R" typeid [";" pterm]
|
||||
insert_pterm: "insert" IDENT "(" _maybe_empty_seq{pterm} ")" ";" pterm
|
||||
event_pterm: "event" IDENT [ "(" _maybe_empty_seq{pterm} ")" ] ";" pterm
|
||||
get_pterm: IDENT "(" _maybe_empty_seq{pattern} ")" [ "suchthat" pterm ] options{OPTIONS_PROCESS} [ "in" pterm [ "else" pterm ] ]
|
||||
pattern: IDENT [":" typeid]
|
||||
| "_" [ ":" typeid ]
|
||||
| NAT
|
||||
| pattern "+" NAT
|
||||
| NAT "+" pattern
|
||||
| "(" _maybe_empty_seq{pattern} ")"
|
||||
| IDENT "(" _maybe_empty_seq{pattern} ")"
|
||||
| "=" pterm
|
||||
mayfailterm: term
|
||||
| "fail"
|
||||
mayfailterm_seq: "(" _non_empty_seq{mayfailterm} ")"
|
||||
typedecl: _non_empty_seq{IDENT} ":" typeid [ "," typedecl ]
|
||||
failtypedecl: _non_empty_seq{IDENT} ":" typeid [ "or fail" ] [ "," failtypedecl ]
|
||||
|
||||
decl: type_decl
|
||||
| channel_decl
|
||||
| free_decl
|
||||
| const_decl
|
||||
| fun_decl
|
||||
| letfun_decl
|
||||
| reduc_decl
|
||||
| fun_reduc_decl
|
||||
| equation_decl
|
||||
| pred_decl
|
||||
| table_decl
|
||||
| let_decl
|
||||
| set_settings_decl
|
||||
| event_decl
|
||||
| query_decl
|
||||
| axiom_decl
|
||||
| restriction_decl
|
||||
| lemma_decl
|
||||
| noninterf_decl
|
||||
| weaksecret_decl
|
||||
| not_decl
|
||||
| select_decl
|
||||
| noselect_decl
|
||||
| nounif_decl
|
||||
| elimtrue_decl
|
||||
| clauses_decl
|
||||
| module_decl
|
||||
#| param_decl
|
||||
#| proba_decl
|
||||
#| letproba_decl
|
||||
#| proof_decl
|
||||
#| def_decl
|
||||
#| expand_decl
|
||||
|
||||
type_decl: "type" IDENT options{OPTIONS_TYPE} "."
|
||||
channel_decl: "channel" _non_empty_seq{IDENT} "."
|
||||
free_decl: "free" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FREE_REDUC} "."
|
||||
const_decl: "const" _non_empty_seq{IDENT} ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
fun_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid options{OPTIONS_FUN_CONST} "."
|
||||
letfun_decl: "letfun" IDENT [ "(" [ typedecl ] ")" ] "=" pterm "."
|
||||
reduc_decl: "reduc" eqlist options{OPTIONS_FREE_REDUC} "."
|
||||
fun_reduc_decl: "fun" IDENT "(" _maybe_empty_seq{typeid} ")" ":" typeid "reduc" mayfailreduc options{OPTIONS_FUN_CONST} "."
|
||||
equation_decl: "equation" eqlist options{OPTIONS_EQUATION} "."
|
||||
pred_decl: "pred" IDENT [ "(" [ _maybe_empty_seq{typeid} ] ")" ] options{OPTIONS_PRED} "."
|
||||
table_decl: IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
let_decl: "let" IDENT [ "(" [ typedecl ] ")" ] "=" process "."
|
||||
|
||||
BOOL : "true" | "false"
|
||||
NONE: "none"
|
||||
FULL: "full"
|
||||
ALL: "all"
|
||||
FUNC: IDENT
|
||||
ignoretype_options: BOOL | ALL | NONE | "attacker"
|
||||
boolean_settings_names: "privateCommOnPublicTerms"
|
||||
| "rejectChoiceTrueFalse"
|
||||
| "rejectNoSimplif"
|
||||
| "allowDiffPatterns"
|
||||
| "inductionQueries"
|
||||
| "inductionLemmas"
|
||||
| "movenew"
|
||||
| "movelet"
|
||||
| "stopTerm"
|
||||
| "removeEventsForLemma"
|
||||
| "simpEqAll"
|
||||
| "eqInNames"
|
||||
| "preciseLetExpand"
|
||||
| "expandSimplifyIfCst"
|
||||
| "featureFuns"
|
||||
| "featureNames"
|
||||
| "featurePredicates"
|
||||
| "featureEvents"
|
||||
| "featureTables"
|
||||
| "featureDepth"
|
||||
| "featureWidth"
|
||||
| "simplifyDerivation"
|
||||
| "abbreviateDerivation"
|
||||
| "explainDerivation"
|
||||
| "unifyDerivation"
|
||||
| "reconstructDerivation"
|
||||
| "displayDerivation"
|
||||
| "traceBacktracking"
|
||||
| "interactiveSwapping"
|
||||
| "color"
|
||||
| "verboseLemmas"
|
||||
| "abbreviateClauses"
|
||||
| "removeUselessClausesBeforeDisplay"
|
||||
| "verboseEq"
|
||||
| "verboseDestructors"
|
||||
| "verboseTerm"
|
||||
| "verboseStatistics"
|
||||
| "verboseRules"
|
||||
| "verboseBase"
|
||||
| "verboseRedundant"
|
||||
| "verboseCompleted"
|
||||
| "verboseGoalReachable"
|
||||
|
||||
_decl_pair{name, value}: "set" name "=" value "."
|
||||
|
||||
set_settings_boolean_decl: _decl_pair{boolean_settings_names, BOOL}
|
||||
|
||||
ignore_types_values: BOOL | "all" | "none" | "attacker"
|
||||
simplify_process_values: BOOL | "interactive"
|
||||
precise_actions_values: BOOL | "trueWithoutArgsInNames"
|
||||
redundant_hyp_elim_values: BOOL | "beginOnly"
|
||||
reconstruct_trace_values: BOOL | "n"
|
||||
attacker_values: "active" | "passive"
|
||||
key_compromise_values: "none" | "approx" | "strict"
|
||||
predicates_implementable: "check" | "nocheck"
|
||||
application_values: "instantiate" | "full" | "none" | "discard"
|
||||
max_values: "none" | "n"
|
||||
sel_fun_values: "TermMaxsize" | "Term"| "NounifsetMaxsize" | "Nounifset"
|
||||
redundancy_elim_values: "best" | "simple" | "no"
|
||||
nounif_ignore_a_few_times_values: "none" | "auto" | "all"
|
||||
nounif_ignore_ntimes_values: "n"
|
||||
trace_display_values: "short" | "long" | "none"
|
||||
verbose_clauses_values: "none" | "explained" | "short"
|
||||
set_settings_decl: set_settings_boolean_decl
|
||||
| _decl_pair{"ignoreTypes", ignore_types_values}
|
||||
| _decl_pair{"simplifyProcess", simplify_process_values}
|
||||
| _decl_pair{"preciseActions", precise_actions_values}
|
||||
| _decl_pair{"redundantHypElim", redundant_hyp_elim_values}
|
||||
| _decl_pair{"reconstructTrace", reconstruct_trace_values}
|
||||
| _decl_pair{"attacker", attacker_values}
|
||||
| _decl_pair{"keyCompromise", key_compromise_values}
|
||||
| _decl_pair{"predicatesImplementable", predicates_implementable}
|
||||
| _decl_pair{"saturationApplication", application_values}
|
||||
| _decl_pair{"verificationApplication", application_values}
|
||||
| _decl_pair{"maxDepth", max_values}
|
||||
| _decl_pair{"maxHyp", max_values}
|
||||
| _decl_pair{"selFun", sel_fun_values}
|
||||
| _decl_pair{"redundancyElim", redundancy_elim_values}
|
||||
| _decl_pair{"nounifIgnoreAFewTimes", nounif_ignore_a_few_times_values}
|
||||
| _decl_pair{"nounifIgnoreNtimes", nounif_ignore_ntimes_values}
|
||||
| _decl_pair{"traceDisplay", trace_display_values}
|
||||
| _decl_pair{"verboseClauses", verbose_clauses_values}
|
||||
| set_strategy
|
||||
| set_symb_order
|
||||
|
||||
_swap_strategy_seq{x}: x ("->" x)*
|
||||
set_strategy: "set" "swapping" "=" _swap_strategy_seq{TAG} "."
|
||||
_symb_ord_seq{x}: x (">" x)*
|
||||
set_symb_order: "set" "symbOrder" "=" _symb_ord_seq{FUNC} "."
|
||||
|
||||
event_decl: "event" IDENT ["(" _maybe_empty_seq{typeid} ")"] "."
|
||||
query_decl: "query" [ typedecl ";"] query options{OPTIONS_QUERY} "."
|
||||
|
||||
axiom_decl: "axiom" [ typedecl ";"] lemma options{OPTIONS_AXIOM} "."
|
||||
restriction_decl: "restriction" [ typedecl ";"] lemma options{OPTIONS_RESTRICTION} "."
|
||||
lemma_decl: "lemma" [ typedecl ";"] lemma options{OPTIONS_LEMMA} "."
|
||||
|
||||
noninterf_decl: [ typedecl ";"] _maybe_empty_seq{nidecl} "."
|
||||
weaksecret_decl: "weaksecret" IDENT "."
|
||||
not_decl: "not" [ typedecl ";"] gterm "."
|
||||
|
||||
INT: NAT | "-" NAT
|
||||
select_decl: "select" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
noselect_decl: "noselect" [ typedecl ";"] nounifdecl [ "/" INT ] [ "[" _non_empty_seq{nounifoption} "]" ] "."
|
||||
nounif_decl: "nounif" [ typedecl ";"] nounifdecl [ "/" INT ] [ "["_non_empty_seq{nounifoption} "]" ] "."
|
||||
|
||||
elimtrue_decl: "elimtrue" [ failtypedecl ";" ] term "."
|
||||
clauses_decl: "clauses" clauses "."
|
||||
|
||||
module_decl: "@module" " " IDENT
|
||||
|
||||
# TODO: finish defining these (comes from Cryptoverif)
|
||||
#param_decl: "param" _non_empty_seq{IDENT} options "."
|
||||
#proba_decl: "proba" IDENT ["(...)"] options "."
|
||||
#letproba_decl: "letproba" IDENT ["(...)"] "= ..." "."
|
||||
#proof_decl: "proof" "{" proof "}"
|
||||
#def_decl: "def" IDENT "(" _maybe_empty_seq{typeid} ")" "{" decl* "}"
|
||||
#expand_decl: "expand" IDENT "(" _maybe_empty_seq{typeid} ")" "."
|
||||
|
||||
nidecl: IDENT [ "among" "(" _non_empty_seq{term} ")" ]
|
||||
equality: term "=" term
|
||||
| "let" IDENT "=" term "in" equality
|
||||
mayfailequality: IDENT mayfailterm_seq "=" mayfailterm
|
||||
eqlist: [ "forall" typedecl ";" ] equality [ ";" eqlist ]
|
||||
clause: term
|
||||
| term "->" term
|
||||
| term "<->" term
|
||||
| term "<=>" term
|
||||
clauses: [ "forall" failtypedecl ";" ] clause [ ";" clauses ]
|
||||
mayfailreduc: [ "forall" failtypedecl ";" ] mayfailequality [ "otherwise" mayfailreduc ]
|
||||
NAT: DIGIT+
|
||||
phase: "phase" NAT [";" process]
|
||||
TAG: IDENT
|
||||
sync: "sync" NAT ["[" TAG "]"] [";" process]
|
||||
COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
%import common (WORD, DIGIT, NUMBER, WS) // imports from terminal library
|
||||
%ignore WS // Disregard spaces in text
|
||||
%ignore COMMENT
|
||||
230
marzipan/src/parser.py
Normal file
230
marzipan/src/parser.py
Normal file
@@ -0,0 +1,230 @@
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from lark import Discard, Lark, Transformer
|
||||
from lark.reconstruct import Reconstructor
|
||||
|
||||
|
||||
class TreeVisitor(Transformer):
|
||||
# def __default_token__(self, token):
|
||||
# if token.type == "PROCESS":
|
||||
# return ""
|
||||
# elif token.type == "IDENT":
|
||||
# return token
|
||||
# # return f"<ident>{token}</ident>"
|
||||
|
||||
# def __default__(self, data, children, meta):
|
||||
# #return "\n".join([c for c in children if c])
|
||||
# amazing_str = ""
|
||||
# for c in children:
|
||||
# if c:
|
||||
# amazing_str += c
|
||||
# return f"<{data}> {amazing_str} </{data}>"
|
||||
|
||||
# lemma_decl_core: original_stuff
|
||||
# lemma_annotation: [LEMMA ESCAPED_STRING]
|
||||
# lemma_decl: lemma_annotation lemma_decl_core
|
||||
# lemma_decl: original_stuff
|
||||
def lemma_annotation(self, children):
|
||||
return Discard
|
||||
|
||||
def query_annotation(self, children):
|
||||
return Discard
|
||||
|
||||
|
||||
# taken from Page 17 in the ProVerif manual
|
||||
# At the moment, we do not reject a ProVerif model that uses reserved words as identifier,
|
||||
# because this caused problems with the LARK grammar. We plan to check this in a later
|
||||
# processing step.
|
||||
reserved_words = [
|
||||
"among",
|
||||
"axiom",
|
||||
"channel",
|
||||
"choice",
|
||||
"clauses",
|
||||
"const",
|
||||
"def",
|
||||
"diff",
|
||||
"do",
|
||||
"elimtrue",
|
||||
"else",
|
||||
"equation",
|
||||
"equivalence", # no rule yet (this is CryptoVerif-specific)
|
||||
"event",
|
||||
"expand",
|
||||
"fail",
|
||||
"for",
|
||||
"forall",
|
||||
"foreach",
|
||||
"free",
|
||||
"fun",
|
||||
"get",
|
||||
"if",
|
||||
"implementation", # no rule yet (this is CryptoVerif-specific)
|
||||
"in",
|
||||
"inj-event",
|
||||
"insert",
|
||||
"lemma",
|
||||
"let",
|
||||
"letfun",
|
||||
"letproba",
|
||||
"new",
|
||||
"noninterf",
|
||||
"noselect",
|
||||
"not",
|
||||
"nounif",
|
||||
"or",
|
||||
"otherwise",
|
||||
"out",
|
||||
"param",
|
||||
"phase",
|
||||
"pred",
|
||||
"proba",
|
||||
"process",
|
||||
"proof",
|
||||
"public_vars",
|
||||
"putbegin",
|
||||
"query",
|
||||
"reduc",
|
||||
"restriction",
|
||||
"secret",
|
||||
"select",
|
||||
"set",
|
||||
"suchthat",
|
||||
"sync",
|
||||
"table",
|
||||
"then",
|
||||
"type",
|
||||
"weaksecret",
|
||||
"yield",
|
||||
]
|
||||
|
||||
ident_regex = (
|
||||
"/^" + "".join(f"(?!{w}$)" for w in reserved_words) + "[a-zA-Z][a-zA-Z0-9À-ÿ'_]*/"
|
||||
)
|
||||
|
||||
"""
|
||||
It might be desirable to move the grammar files around such that we only need
|
||||
to import _main.lark and import all other .larks files in _main.lark.
|
||||
This does not work at the moment due to cyclic dependencies between the grammar files,
|
||||
so we are loading all files as strings and simply concatenating for now...
|
||||
"""
|
||||
# gawk_file = "./src/grammars/marzipan_awk.lark"
|
||||
# gproverif = "./src/grammars/proverif.lark"
|
||||
# gmain = "./src/grammar/main.lark"
|
||||
# gfile = gmain
|
||||
# with open(gfile) as f:
|
||||
# parser = Lark(
|
||||
# f
|
||||
# #grammar=grammar,
|
||||
# # debug=True,
|
||||
# # lexer_callbacks={"COMMENT": comments.append},
|
||||
# )
|
||||
# files = sorted(Path("./src/grammar").glob("*.lark"))
|
||||
# gfiles = [f for f in files if not f.name.startswith("_")]
|
||||
# gfiles = ["common.lark", "decl.lark", "process.lark", "query.lark", "term.lark"]
|
||||
# grammar = "\n".join(parent_dir + f.read_text() for f in gfiles)
|
||||
|
||||
parent_dir = "./src/grammar/"
|
||||
|
||||
common_rules = Path(parent_dir + "common.lark").read_text()
|
||||
decl_rules = Path(parent_dir + "decl.lark").read_text()
|
||||
process_rules = Path(parent_dir + "process.lark").read_text()
|
||||
query_rules = Path(parent_dir + "query.lark").read_text()
|
||||
lemma_rules = Path(parent_dir + "lemma.lark").read_text()
|
||||
term_rules = Path(parent_dir + "term.lark").read_text()
|
||||
|
||||
# marzipan additives
|
||||
common_rules = (
|
||||
"""
|
||||
QUERY: "@query"
|
||||
REACHABLE: "@reachable"
|
||||
LEMMA: "@lemma"
|
||||
"""
|
||||
+ common_rules
|
||||
)
|
||||
|
||||
|
||||
# add @query and @reachable to query_decl, @lemma to lemma_decl
|
||||
def modify_decl_rule(
|
||||
grammar: str, old_rule_name: str, annot_rule_name: str, annot_rule: str
|
||||
) -> str:
|
||||
old_decl_renamed = f"{old_rule_name}_core"
|
||||
rename_target = f"{old_rule_name}\s*:"
|
||||
# rename *_decl -> *_decl_core
|
||||
grammar, count = re.subn(rename_target, f"{old_decl_renamed}:", grammar, count=1)
|
||||
if count == 0:
|
||||
raise RuntimeError("*_decl not found!")
|
||||
# lemma_annotation: [LEMMA ESCAPED_STRING]
|
||||
annot_rule = f"{annot_rule_name}: {annot_rule}"
|
||||
# lemma_decl: lemma_annotation lemma_decl_core
|
||||
wrapper = f"{old_rule_name}: {annot_rule_name} {old_decl_renamed}"
|
||||
old_decl_target = f"{old_decl_renamed}\s*:"
|
||||
# get index of *_decl_core rule
|
||||
match = re.search(old_decl_target, grammar, flags=re.MULTILINE)
|
||||
if not match:
|
||||
raise RuntimeError("*_decl_core: rule not found after rename")
|
||||
insert_pos = match.start()
|
||||
grammar = (
|
||||
grammar[:insert_pos] + annot_rule + "\n" + wrapper + "\n" + grammar[insert_pos:]
|
||||
)
|
||||
return grammar
|
||||
|
||||
|
||||
query_rules = modify_decl_rule(
|
||||
query_rules, "query_decl", "query_annotation", "[(REACHABLE|QUERY) ESCAPED_STRING]"
|
||||
)
|
||||
|
||||
lemma_rules = modify_decl_rule(
|
||||
lemma_rules, "lemma_decl", "lemma_annotation", "[LEMMA ESCAPED_STRING]"
|
||||
)
|
||||
|
||||
grammar = (
|
||||
common_rules + decl_rules + process_rules + query_rules + lemma_rules + term_rules
|
||||
)
|
||||
|
||||
with open("./src/grammars/awk_proverif.lark", "w") as f:
|
||||
f.write(grammar)
|
||||
|
||||
parser = Lark(grammar, maybe_placeholders=False)
|
||||
|
||||
# COMMENT: /\(\*(\*(?!\))|[^*])*\*\)/
|
||||
# COMMENT: "(*" /(\*(?!\))|[^*])*/ "*)"
|
||||
# comment: /\(\*(?:(?!\(\*|\*\)).|(?R))*\*\)/
|
||||
|
||||
# TODO Open ProVerif compatibility questions
|
||||
# TODO * does it allow leading zeros for NAT?
|
||||
# TODO * tag is not defined? is it ident?
|
||||
# TODO * are spaces between "event" and ":" allowed?
|
||||
# TODO * spaces between "nat" and "("? "choice" and "["?
|
||||
|
||||
|
||||
def parsertest(input):
|
||||
parsetree = parser.parse(input)
|
||||
# tree.pydot__tree_to_png(parsetree, name + ".png")
|
||||
return parsetree
|
||||
|
||||
|
||||
"""
|
||||
parse in i.pv file using new awk-marzipan grammar,
|
||||
eliminate aliases from i.pv using lark TreeVisitor,
|
||||
then return o.pv (awk_prep)
|
||||
"""
|
||||
|
||||
|
||||
def parse_main(ipv_path, opv_path):
|
||||
with open(ipv_path, "r") as f:
|
||||
content = f.read()
|
||||
# content += "\nprocess main"
|
||||
|
||||
forest = parsertest(content)
|
||||
with open("i.pv", "w") as ipvf:
|
||||
ipvf.write(forest.pretty())
|
||||
|
||||
tree = TreeVisitor().transform(forest)
|
||||
# TODO: tree -> o.pv
|
||||
new_json = Reconstructor(parser).reconstruct(tree)
|
||||
|
||||
with open(opv_path, "w") as opvf:
|
||||
# opvf.write(tree.pretty())
|
||||
opvf.write(new_json)
|
||||
168
marzipan/src/splitter.py
Normal file
168
marzipan/src/splitter.py
Normal file
@@ -0,0 +1,168 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
"""
|
||||
ChatGPT generated code for initial proverif grammar split
|
||||
"""
|
||||
|
||||
# ---------- CONFIG ----------
|
||||
OUT = Path("grammar")
|
||||
OUT.mkdir(exist_ok=True)
|
||||
|
||||
# Raw grammar text (paste full grammar here)
|
||||
GRAMMAR = read("grammars/proverif.lark")
|
||||
|
||||
# Rules grouped by prefix – expandable later if needed
|
||||
GROUPS = {
|
||||
"common.lark": [
|
||||
"PROCESS",
|
||||
"YIELD",
|
||||
"CHANNEL",
|
||||
"IDENT",
|
||||
"ZERO",
|
||||
"INFIX",
|
||||
"typeid",
|
||||
"_non_empty_seq",
|
||||
"_maybe_empty_seq",
|
||||
"OPTIONS_", # all options-related rules
|
||||
"BOOL",
|
||||
"NONE",
|
||||
"FULL",
|
||||
"ALL",
|
||||
"FUNC",
|
||||
"ignoretype_options",
|
||||
"boolean_settings_names",
|
||||
"INT",
|
||||
"COMMENT",
|
||||
],
|
||||
"process.lark": [
|
||||
"start",
|
||||
"process",
|
||||
"bracketed_process",
|
||||
"piped_process",
|
||||
"replicated_process",
|
||||
"replicated_process_bounds",
|
||||
"sample_process",
|
||||
"let_process",
|
||||
"if_process",
|
||||
"in_process",
|
||||
"out_process",
|
||||
"insert_process",
|
||||
"event_process",
|
||||
"term",
|
||||
"phase",
|
||||
"sync",
|
||||
],
|
||||
"term.lark": [
|
||||
"gterm",
|
||||
"ident_gterm",
|
||||
"fun_gterm",
|
||||
"choice_gterm",
|
||||
"infix_gterm",
|
||||
"arith_gterm",
|
||||
"arith2_gterm",
|
||||
"event_gterm",
|
||||
"injevent_gterm",
|
||||
"implies_gterm",
|
||||
"paren_gterm",
|
||||
"sample_gterm",
|
||||
"let_gterm",
|
||||
"gbinding",
|
||||
"pterm",
|
||||
"choice_pterm",
|
||||
"if_pterm",
|
||||
"not_pterm",
|
||||
"let_pterm",
|
||||
"sample_pterm",
|
||||
"insert_pterm",
|
||||
"event_pterm",
|
||||
"get_pterm",
|
||||
"pattern",
|
||||
"mayfailterm",
|
||||
"mayfailterm_seq",
|
||||
"typedecl",
|
||||
"failtypedecl",
|
||||
],
|
||||
"decl.lark": [
|
||||
"decl",
|
||||
"type_decl",
|
||||
"channel_decl",
|
||||
"free_decl",
|
||||
"const_decl",
|
||||
"fun_decl",
|
||||
"letfun_decl",
|
||||
"reduc_decl",
|
||||
"fun_reduc_decl",
|
||||
"equation_decl",
|
||||
"pred_decl",
|
||||
"table_decl",
|
||||
"let_decl",
|
||||
"set_settings_decl",
|
||||
"event_decl",
|
||||
"select_decl",
|
||||
"noselect_decl",
|
||||
"nounif_decl",
|
||||
"elimtrue_decl",
|
||||
"clauses_decl",
|
||||
"module_decl",
|
||||
"nidecl",
|
||||
"equality",
|
||||
"mayfailequality",
|
||||
"eqlist",
|
||||
"clause",
|
||||
"clauses",
|
||||
"mayfailreduc",
|
||||
],
|
||||
"query.lark": [
|
||||
"query",
|
||||
"lemma",
|
||||
"nounifdecl",
|
||||
"gformat",
|
||||
"fbinding",
|
||||
"nounifoption",
|
||||
"TAG",
|
||||
],
|
||||
}
|
||||
|
||||
# ---------- SPLITTING LOGIC ----------
|
||||
rules = GRAMMAR.strip().splitlines()
|
||||
buckets = {k: [] for k in GROUPS}
|
||||
|
||||
|
||||
def match(rule, prefixes):
|
||||
return any(rule.startswith(p) for p in prefixes)
|
||||
|
||||
|
||||
current_target = None
|
||||
|
||||
for line in rules:
|
||||
striped = line.strip()
|
||||
if ":" in striped and not striped.startswith("%"):
|
||||
rule_name = striped.split(":")[0].strip()
|
||||
for module, prefixes in GROUPS.items():
|
||||
if match(rule_name, prefixes):
|
||||
current_target = module
|
||||
break
|
||||
if current_target:
|
||||
buckets[current_target].append(line)
|
||||
|
||||
# main.lark will import everything else
|
||||
main_lark = """start: decl* PROCESS process
|
||||
%import .common.*
|
||||
%import .process.*
|
||||
%import .term.*
|
||||
%import .decl.*
|
||||
%import .query.*
|
||||
%ignore WS
|
||||
%ignore COMMENT
|
||||
"""
|
||||
|
||||
# ---------- OUTPUT ----------
|
||||
for filename, lines in buckets.items():
|
||||
path = OUT / filename
|
||||
with open(path, "w") as f:
|
||||
f.write("\n".join(lines) + "\n")
|
||||
|
||||
(OUT / "main.lark").write_text(main_lark)
|
||||
|
||||
print("Grammar split completed into:", OUT)
|
||||
130
marzipan/src/util.py
Normal file
130
marzipan/src/util.py
Normal file
@@ -0,0 +1,130 @@
|
||||
from typing import Callable, Any, Tuple, List, TypeVar
|
||||
from types import ModuleType as Module
|
||||
from importlib import import_module
|
||||
from dataclasses import dataclass
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
def setup_exports() -> Tuple[List[str], Callable[[T], T]]:
|
||||
__all__ = []
|
||||
|
||||
"""
|
||||
Helper to provide an export() function with little boilerplate.
|
||||
|
||||
```
|
||||
from marzipan.util import setup_exports
|
||||
(__all__, export) = setup_exports()
|
||||
```
|
||||
"""
|
||||
def export(what: T) -> T:
|
||||
match what:
|
||||
case str():
|
||||
__all__.append(what)
|
||||
case object(__name__ = name):
|
||||
__all__.append(name)
|
||||
case _:
|
||||
raise TypeError(
|
||||
f"Unsupported export type `{what}`: Export is neither `str` nor has it an attribute named `__name__`.")
|
||||
return what
|
||||
|
||||
return (__all__, export)
|
||||
|
||||
(__all__, export) = setup_exports()
|
||||
export(setup_exports)
|
||||
|
||||
@export
|
||||
def rename(name: str) -> Callable[[T], T]:
|
||||
def rename_impl(v: T) -> T:
|
||||
v.__name__ = name
|
||||
return v
|
||||
return rename_impl
|
||||
|
||||
@export
|
||||
def attempt(fn):
|
||||
# TODO: Documentation tests
|
||||
"""
|
||||
Call a function returning a tuple of (result, exception).
|
||||
|
||||
The following example uses safe_call to implement a checked_divide
|
||||
function that returns None if the division by zero is caught.
|
||||
|
||||
```python
|
||||
try_divide = attempt(lambda a, b: a/b)
|
||||
|
||||
def checked_divide(a, b):
|
||||
match try_divide(a, b):
|
||||
case (result, None):
|
||||
return result
|
||||
case (None, ZeroDivisionError()):
|
||||
return None
|
||||
case _:
|
||||
raise RuntimeError("Unreachable")
|
||||
|
||||
assert(checked_divide(1, 0) == None)
|
||||
assert(checked_divide(0, 1) == 0)
|
||||
assert(checked_divide(1, 1) == 1)
|
||||
```
|
||||
"""
|
||||
def retfn(*args, **kwargs):
|
||||
try:
|
||||
return (fn(*args, **kwargs), None)
|
||||
except Exception as e:
|
||||
return (None, e)
|
||||
retfn.__name__ = f"try_{fn.__name__}"
|
||||
return retfn
|
||||
|
||||
@export
|
||||
def scoped(fn: Callable[[], Any]) -> Any:
|
||||
"""
|
||||
Scoped variable assignment.
|
||||
|
||||
Just an alias for `call`. Use as a decorator to immediately call a function,
|
||||
assigning the return value to the function name.
|
||||
"""
|
||||
return fn()
|
||||
|
||||
@export
|
||||
def try_import(name : str) -> Tuple[Module | None, Exception | None]:
|
||||
return attempt(import_module)(name)
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Pkgs:
|
||||
__mod__: Module | None
|
||||
__prefix__: str | None
|
||||
|
||||
def __get__(self, k: str):
|
||||
return getattr(self, k)
|
||||
|
||||
def __getattribute__(self, k: str):
|
||||
match k:
|
||||
case "__mod__" | "__prefix__" | "__class__":
|
||||
# Access the underlying module value
|
||||
return super().__getattribute__(k)
|
||||
|
||||
match self:
|
||||
case Pkgs(None, None):
|
||||
# Import package from root
|
||||
return Pkgs(import_module(k), k)
|
||||
|
||||
# Try importing a subpackage
|
||||
name = f"{self.__prefix__}.{k}"
|
||||
match try_import(name):
|
||||
case (child, None):
|
||||
# Imported subpackage
|
||||
return Pkgs(child, name)
|
||||
case (_, ModuleNotFoundError()):
|
||||
# No such module; access module property instead
|
||||
return getattr(self.__mod__, k)
|
||||
case (_, err):
|
||||
# Unknown error, pass error on
|
||||
raise err
|
||||
|
||||
@scoped
|
||||
@export
|
||||
def pkgs() -> Pkgs:
|
||||
"""
|
||||
Global package scope.
|
||||
|
||||
`pkgs.marzipan` imports the package `marzipan`
|
||||
"""
|
||||
return Pkgs(None, None)
|
||||
265
marzipan/test-gpt-oss-2.py
Normal file
265
marzipan/test-gpt-oss-2.py
Normal file
@@ -0,0 +1,265 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Below is a **more “Pythonic”** rewrite of the original AWK‑to‑Python translator.
|
||||
# The logic is exactly the same – the same error messages, line numbers and exit
|
||||
# codes – but the code is organized into small, reusable functions, uses
|
||||
# `dataclasses`, type hints, `Path.read_text()`, `re.sub()` and other idiomatic
|
||||
# constructs. It is also easier to read and to extend.
|
||||
|
||||
|
||||
"""
|
||||
py_awk_translator.py
|
||||
|
||||
A line‑by‑line pre‑processor that implements the same behaviour as the
|
||||
original AWK script you posted (handling @module, @alias, @long‑alias,
|
||||
private‑variable expansion, @query/@reachable/@lemma checks and token‑wise
|
||||
alias substitution).
|
||||
|
||||
Usage
|
||||
|
||||
python3 py_awk_translator.py file1.pv file2.pv
|
||||
# or
|
||||
cat file.pv | python3 py_awk_translator.py
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Helper utilities
|
||||
# ----------------------------------------------------------------------
|
||||
TOKEN_RE = re.compile(r"[0-9A-Za-z_']")
|
||||
|
||||
def is_token_char(ch: str) -> bool:
|
||||
"""Return True if *ch* can be part of an identifier token."""
|
||||
return bool(TOKEN_RE.fullmatch(ch))
|
||||
|
||||
def die(msg: str, fname: str, lineno: int) -> None:
|
||||
"""Print an error to stderr and exit with status 1 (exactly like AWK)."""
|
||||
sys.stderr.write(f"{fname}:{lineno}: {msg}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Core translator – holds the mutable state that the AWK script kept in
|
||||
# global variables.
|
||||
# ----------------------------------------------------------------------
|
||||
@dataclass
|
||||
class Translator:
|
||||
"""Collects state while processing a file line‑by‑line."""
|
||||
|
||||
# final output buffer
|
||||
out: list[str] = field(default_factory=list)
|
||||
|
||||
# current @module name (used when expanding "~")
|
||||
module: str = ""
|
||||
|
||||
# simple one‑line aliases: name → replacement text
|
||||
aliases: Dict[str, str] = field(default_factory=dict)
|
||||
|
||||
# multi‑line alias handling
|
||||
long_name: str = ""
|
||||
long_value: str = ""
|
||||
|
||||
# error flag – mirrors the AWK variable `err`
|
||||
err: int = 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public entry point for a single line
|
||||
# ------------------------------------------------------------------
|
||||
def process(self, raw: str, fname: str, lineno: int) -> None:
|
||||
"""Apply all transformation rules to *raw* and store the result."""
|
||||
line = raw.rstrip("\n") # keep a copy for error messages
|
||||
original = line # keep the untouched line for later
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 1️⃣ @module
|
||||
# --------------------------------------------------------------
|
||||
if line.startswith("@module"):
|
||||
parts = line.split(maxsplit=1)
|
||||
self.module = parts[1] if len(parts) > 1 else ""
|
||||
self.aliases.clear()
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 2️⃣ @alias
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@alias"):
|
||||
for token in line.split()[1:]:
|
||||
if "=" in token:
|
||||
name, value = token.split("=", 1)
|
||||
self.aliases[name] = value
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 3️⃣ @long-alias‑end
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@long-alias-end"):
|
||||
if not self.long_name:
|
||||
die("Long alias not started", fname, lineno)
|
||||
# collapse multiple spaces → single space, strip trailing space
|
||||
self.long_value = re.sub(r" +", " ", self.long_value).strip()
|
||||
self.aliases[self.long_name] = self.long_value
|
||||
self.long_name = self.long_value = ""
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 4️⃣ @long-alias (start)
|
||||
# --------------------------------------------------------------
|
||||
elif line.startswith("@long-alias"):
|
||||
parts = line.split(maxsplit=1)
|
||||
self.long_name = parts[1] if len(parts) > 1 else ""
|
||||
self.long_value = ""
|
||||
line = ""
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 5️⃣ PRIVATE__ detection (illegal use of "~")
|
||||
# --------------------------------------------------------------
|
||||
elif "PRIVATE__" in line:
|
||||
die(
|
||||
"Used private variable without ~:\n\n"
|
||||
f" {lineno} > {original}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 6️⃣ @query / @reachable / @lemma validation
|
||||
# --------------------------------------------------------------
|
||||
elif re.search(r"@(query|reachable|lemma)", line):
|
||||
if not re.search(r'@(query|reachable|lemma)\s+"[^"]*"', line):
|
||||
die(
|
||||
"@query or @reachable statement without parameter:\n\n"
|
||||
f" {lineno} > {original}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
# replace the quoted part with blanks (preserve line length)
|
||||
m = re.search(r'@(query|reachable|lemma)\s+"[^"]*"', line)
|
||||
start, end = m.span()
|
||||
line = line[:start] + " " * (end - start) + line[end:]
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 7️⃣ Expand "~" to the private‑variable prefix
|
||||
# --------------------------------------------------------------
|
||||
if "~" in line:
|
||||
line = line.replace("~", f"PRIVATE__{self.module}__")
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 8️⃣ Token‑wise alias substitution (the long AWK loop)
|
||||
# --------------------------------------------------------------
|
||||
line = self._expand_aliases(line)
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 9️⃣ Accumulate a multi‑line alias, if we are inside one
|
||||
# --------------------------------------------------------------
|
||||
if self.long_name:
|
||||
self.long_value += line + " "
|
||||
line = "" # the line itself must not appear in output
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# 🔟 Store the (possibly empty) line for final output
|
||||
# --------------------------------------------------------------
|
||||
self.out.append(line + "\n")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helper that implements the token‑wise alias replacement
|
||||
# ------------------------------------------------------------------
|
||||
def _expand_aliases(self, text: str) -> str:
|
||||
"""Replace every whole‑token alias in *text* with its value."""
|
||||
i = 0
|
||||
result = ""
|
||||
|
||||
while i < len(text):
|
||||
# a = previous char, c = current char
|
||||
a = text[i - 1] if i > 0 else ""
|
||||
c = text[i]
|
||||
|
||||
# If we are already inside a token, just move forward
|
||||
if i > 0 and is_token_char(a):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# If the current char does not start a token, skip it
|
||||
if not is_token_char(c):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# At a token boundary – try to match any alias
|
||||
# ----------------------------------------------------------
|
||||
matched = False
|
||||
for name, value in self.aliases.items():
|
||||
if text.startswith(name, i):
|
||||
after = text[i + len(name) : i + len(name) + 1]
|
||||
if is_token_char(after): # name is only a prefix
|
||||
continue
|
||||
# Alias matches – replace it
|
||||
result += text[:i] + value
|
||||
text = text[i + len(name) :] # continue scanning the suffix
|
||||
i = 0
|
||||
matched = True
|
||||
break
|
||||
|
||||
if not matched:
|
||||
i += 1
|
||||
|
||||
return result + text
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Finalisation
|
||||
# ------------------------------------------------------------------
|
||||
def finish(self) -> None:
|
||||
"""Write the accumulated output to stdout (unless an error occurred)."""
|
||||
if self.err == 0:
|
||||
sys.stdout.write("".join(self.out))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Command‑line driver
|
||||
# ----------------------------------------------------------------------
|
||||
def _process_path(path: Path, translator: Translator) -> None:
|
||||
"""Read *path* line‑by‑line and feed it to *translator*."""
|
||||
for lineno, raw in enumerate(path.read_text(encoding="utf-8").splitlines(True), start=1):
|
||||
translator.process(raw, str(path), lineno)
|
||||
|
||||
def main() -> None:
|
||||
translator = Translator()
|
||||
|
||||
# No file arguments → read from stdin (named "<stdin>")
|
||||
if len(sys.argv) == 1:
|
||||
# stdin may contain multiple lines; we treat it as a single “virtual”
|
||||
# file so that line numbers are still correct.
|
||||
for lineno, raw in enumerate(sys.stdin, start=1):
|
||||
translator.process(raw, "<stdin>", lineno)
|
||||
else:
|
||||
for name in sys.argv[1:]:
|
||||
p = Path(name)
|
||||
if not p.is_file():
|
||||
sys.stderr.write(f"File not found: {name}\n")
|
||||
sys.exit(1)
|
||||
_process_path(p, translator)
|
||||
|
||||
translator.finish()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
## What makes this version more Pythonic?
|
||||
|
||||
# | Aspect | Original style | Refactored style |
|
||||
# |--------|----------------|------------------|
|
||||
# | **State handling** | Global variables (`buf`, `module`, …) | `@dataclass Translator` encapsulates all mutable state |
|
||||
# | **Regular‑expression reuse** | Re‑compiled on every call (`match`, `gsub`) | Compiled once (`TOKEN_RE`) and reused |
|
||||
# | **String manipulation** | Manual `substr`, concatenation in loops | Slicing, `str.replace`, `re.sub` for clarity |
|
||||
# | **Loop logic** | `for (i=1; i<length($0); i+=1)` with many manual index tricks | A single `while` loop with early‑continue guards; the inner alias‑search is a clean `for name, value in self.aliases.items()` |
|
||||
# | **Error handling** | `print(... > "/dev/stderr")` and `exit(1)` | Dedicated `die()` helper that writes to `stderr` and exits |
|
||||
# | **File I/O** | Manual `while (getline ...)` in AWK → `for` over `sys.stdin` / `Path.read_text()` | Uses `Path.read_text()` and `enumerate` for line numbers |
|
||||
# | **Readability** | Mixed AWK‑style comments, one‑liner `if` statements | Docstrings, section comments, type hints, and small helper methods |
|
||||
# | **Extensibility** | Adding a new rule required editing a monolithic block | New rules can be added as separate `elif` blocks or new methods without touching the core loop |
|
||||
|
||||
# The script can be saved as `py_awk_translator.py`, made executable (`chmod +x py_awk_translator.py`), and used exactly like the original AWK program while being easier to maintain and understand.
|
||||
292
marzipan/test-gpt-oss.py
Normal file
292
marzipan/test-gpt-oss.py
Normal file
@@ -0,0 +1,292 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# **Python 3 translation of the AWK script**
|
||||
|
||||
# Below is a drop‑in replacement that can be used the same way as the original
|
||||
# `awk` program (give it one or more file names, or let it read from *stdin*).
|
||||
# All the logic of the AWK version is kept – the only difference is that the
|
||||
# code is now ordinary, readable Python 3.
|
||||
|
||||
|
||||
"""
|
||||
translate_awk_to_py.py
|
||||
|
||||
A line‑by‑line translator for the “@module / @alias / @long‑alias …”
|
||||
pre‑processor that was originally written in AWK. The behaviour is
|
||||
identical to the AWK script you posted, including the exact error
|
||||
messages and exit codes.
|
||||
|
||||
Usage
|
||||
|
||||
python3 translate_awk_to_py.py file1.pv file2.pv
|
||||
# or
|
||||
cat file.pv | python3 translate_awk_to_py.py
|
||||
|
||||
The script prints the transformed source to *stdout* and writes any
|
||||
diagnostic messages to *stderr* (exactly like the AWK version).
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Helper functions
|
||||
# ----------------------------------------------------------------------
|
||||
def istok(ch: str) -> bool:
|
||||
"""Return True if *ch* is a token character (alnum, '_' or ''')."""
|
||||
return bool(re.match(r"[0-9a-zA-Z_']", ch))
|
||||
|
||||
def error(msg: str, fname: str, lineno: int) -> None:
|
||||
"""Print an error message to stderr and exit with status 1."""
|
||||
sys.stderr.write(f"{fname}:{lineno}: {msg}\n")
|
||||
sys.exit(1)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Main processing class (keeps the same global state as the AWK script)
|
||||
# ----------------------------------------------------------------------
|
||||
class Translator:
|
||||
def __init__(self):
|
||||
self.buf = "" # final output buffer
|
||||
self.module = "" # current @module name
|
||||
self.err = 0 # error flag (mirrors AWK's)
|
||||
self.long_alias_name = "" # name of a multi‑line alias
|
||||
self.long_alias_value = "" # accumulated value of that alias
|
||||
self.aliases: dict[str, str] = {} # simple one‑line aliases
|
||||
|
||||
# ----------------------------------| AWK rule | Python implementation |
|
||||
# |----------|-----------------------|
|
||||
# | `BEGIN` block – initialise variables | `Translator.__init__` |
|
||||
# | `@module` line – set `module`, clear `aliases` | first `if` in `process_line` |
|
||||
# | `@alias` line – split `name=value` pairs into `aliases` | second `elif` |
|
||||
# | `@long-alias` / `@long-alias-end` handling | third/fourth `elif` blocks + the `if self.long_alias_name` section |
|
||||
# | Detection of illegal `PRIVATE__` usage | `elif "PRIVATE__" in orig_line` (the same string that the AWK script would have produced after the `~` replacement) |
|
||||
# | Validation of `@query|@reachable|@lemma` statements | `elif re.search(r"@(query|reachable|lemma)", …)` |
|
||||
# | Replacement of `~` with `PRIVATE__<module>__` | `line.replace("~", …)` |
|
||||
# | Token‑wise alias substitution (the long `for (i=1; …)` loop) | the `while i < len(line): …` loop that restarts from the beginning after each successful replacement |
|
||||
# | Accumulating the final output in `buf` | `self.buf += line + "\n"` |
|
||||
# | `END` block – print buffer if no error | `Translator.finish()` |
|
||||
|
||||
# The script can be saved as `translate_awk_to_py.py`, made executable (`chmod +x translate_awk_to_py.py`) and used exactly like the original AWK program. All error messages, line numbers and exit codes are identical, so any surrounding tooling that expects the AWK behaviour will continue to work.--------------------------------
|
||||
# Line‑by‑line processing (mirrors the order of the AWK rules)
|
||||
# ------------------------------------------------------------------
|
||||
def process_line(self, line: str, fname: str, lineno: int) -> None:
|
||||
"""Transform *line* according to all the rules."""
|
||||
# keep the original line for error reporting
|
||||
orig_line = line.rstrip("\n")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 1) @module
|
||||
# ------------------------------------------------------------------
|
||||
if orig_line.startswith("@module"):
|
||||
parts = orig_line.split()
|
||||
if len(parts) >= 2:
|
||||
self.module = parts[1]
|
||||
else:
|
||||
self.module = ""
|
||||
self.aliases.clear()
|
||||
line = "" # AWK does: $0 = ""
|
||||
# fall through – nothing else on this line matters
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 2) @alias
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@alias"):
|
||||
# everything after the keyword is a list of name=value pairs
|
||||
for token in orig_line.split()[1:]:
|
||||
if "=" in token:
|
||||
name, value = token.split("=", 1)
|
||||
self.aliases[name] = value
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 3) @long-alias-end
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@long-alias-end"):
|
||||
if not self.long_alias_name:
|
||||
error("Long alias not started", fname, lineno)
|
||||
# compress multiple spaces to a single space
|
||||
self.long_alias_value = re.sub(r" +", " ", self.long_alias_value)
|
||||
self.aliases[self.long_alias_name] = self.long_alias_value.strip()
|
||||
# reset the temporary variables
|
||||
self.long_alias_name = ""
|
||||
self.long_alias_value = ""
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 4) @long-alias (start of a multi‑line alias)
|
||||
# ------------------------------------------------------------------
|
||||
elif orig_line.startswith("@long-alias"):
|
||||
parts = orig_line.split()
|
||||
if len(parts) >= 2:
|
||||
self.long_alias_name = parts[1]
|
||||
self.long_alias_value = ""
|
||||
else:
|
||||
self.long_alias_name = ""
|
||||
self.long_alias_value = ""
|
||||
line = ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 5) PRIVATE__ detection (illegal use of "~")
|
||||
# ------------------------------------------------------------------
|
||||
elif "PRIVATE__" in orig_line:
|
||||
# The AWK version looks for the literal string PRIVATE__ (which
|
||||
# appears only after the "~" replacement). We keep the same
|
||||
# behaviour.
|
||||
error(
|
||||
"Used private variable without ~:\n\n"
|
||||
f" {lineno} > {orig_line}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 6) @query / @reachable / @lemma validation
|
||||
# ------------------------------------------------------------------
|
||||
elif re.search(r"@(query|reachable|lemma)", orig_line):
|
||||
# Must contain a quoted string after the keyword
|
||||
if not re.search(r'@(query|reachable|lemma)\s+"[^"]*"', orig_line):
|
||||
error(
|
||||
"@query or @reachable statement without parameter:\n\n"
|
||||
f" {lineno} > {orig_line}",
|
||||
fname,
|
||||
lineno,
|
||||
)
|
||||
# Replace the quoted part with spaces (preserve line length)
|
||||
m = re.search(r'@(query|reachable|lemma)\s+"[^"]*"', orig_line)
|
||||
start, end = m.start(), m.end()
|
||||
pre = orig_line[:start]
|
||||
mat = orig_line[start:end]
|
||||
post = orig_line[end:]
|
||||
mat_spaced = " " * len(mat)
|
||||
line = pre + mat_spaced + post
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 7) Replace "~" with the private‑variable prefix
|
||||
# ------------------------------------------------------------------
|
||||
else:
|
||||
# No special rule matched yet – we keep the line as‑is for now.
|
||||
line = orig_line
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 8) Insert the private‑variable prefix (if any "~" is present)
|
||||
# ------------------------------------------------------------------
|
||||
if "~" in line:
|
||||
line = line.replace("~", f"PRIVATE__{self.module}__")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 9) Alias substitution (token‑wise, exactly like the AWK loop)
|
||||
# ------------------------------------------------------------------
|
||||
# The algorithm walks through the line character by character,
|
||||
# looking for the start of a token. When a token matches a key in
|
||||
# *self.aliases* it is replaced by the stored value and the scan
|
||||
# restarts from the beginning of the (now shorter) line.
|
||||
i = 0
|
||||
minibuf = ""
|
||||
while i < len(line):
|
||||
# a = previous character, c = current character
|
||||
a = line[i - 1] if i > 0 else ""
|
||||
c = line[i]
|
||||
|
||||
# If we are already inside a token, just move on
|
||||
if i > 0 and istok(a):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# If the current character does NOT start a token, skip it
|
||||
if not istok(c):
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --------------------------------------------------------------
|
||||
# We are at a token boundary – try to match any alias
|
||||
# --------------------------------------------------------------
|
||||
matched = False
|
||||
for alias, value in self.aliases.items():
|
||||
klen = len(alias)
|
||||
token = line[i : i + klen]
|
||||
after = line[i + klen : i + klen + 1] # char after the token
|
||||
|
||||
if token != alias:
|
||||
continue
|
||||
if istok(after): # alias is only a prefix of a longer token
|
||||
continue
|
||||
|
||||
# ---- alias matches -------------------------------------------------
|
||||
matched = True
|
||||
prefix = line[:i] # everything before the token
|
||||
suffix = line[i + klen :] # everything after the token
|
||||
minibuf += prefix + value
|
||||
line = suffix # continue scanning the suffix
|
||||
i = 0 # restart from the beginning
|
||||
break
|
||||
|
||||
if not matched:
|
||||
# No alias matched – keep the current character and move on
|
||||
i += 1
|
||||
|
||||
# Append whatever is left of the line after the last replacement
|
||||
line = minibuf + line
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 10) If we are inside a multi‑line alias, accumulate the line
|
||||
# ------------------------------------------------------------------
|
||||
if self.long_alias_name:
|
||||
self.long_alias_value += line + " "
|
||||
line = "" # the line itself must not appear in the output
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 11) Append the (possibly empty) line to the global buffer
|
||||
# ------------------------------------------------------------------
|
||||
self.buf += line + "\n"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Final output
|
||||
# ------------------------------------------------------------------
|
||||
def finish(self) -> None:
|
||||
"""Print the accumulated buffer if no error occurred."""
|
||||
if self.err == 0:
|
||||
sys.stdout.write(self.buf)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ----------------------------------------------------------------------
|
||||
def main() -> None:
|
||||
translator = Translator()
|
||||
|
||||
# If no file name is given we read from stdin (named "<stdin>")
|
||||
if len(sys.argv) == 1:
|
||||
translator.process_line(sys.stdin.read(), "<stdin>", 1)
|
||||
else:
|
||||
for fname in sys.argv[1:]:
|
||||
path = Path(fname)
|
||||
try:
|
||||
with path.open(encoding="utf-8") as f:
|
||||
for lineno, raw in enumerate(f, start=1):
|
||||
translator.process_line(raw, str(path), lineno)
|
||||
except FileNotFoundError:
|
||||
sys.stderr.write(f"File not found: {fname}\n")
|
||||
sys.exit(1)
|
||||
|
||||
translator.finish()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
### How the Python version mirrors the AWK script
|
||||
|
||||
# | AWK rule | Python implementation |
|
||||
# |----------|-----------------------|
|
||||
# | `BEGIN` block – initialise variables | `Translator.__init__` |
|
||||
# | `@module` line – set `module`, clear `aliases` | first `if` in `process_line` |
|
||||
# | `@alias` line – split `name=value` pairs into `aliases` | second `elif` |
|
||||
# | `@long-alias` / `@long-alias-end` handling | third/fourth `elif` blocks + the `if self.long_alias_name` section |
|
||||
# | Detection of illegal `PRIVATE__` usage | `elif "PRIVATE__" in orig_line` (the same string that the AWK script would have produced after the `~` replacement) |
|
||||
# | Validation of `@query|@reachable|@lemma` statements | `elif re.search(r"@(query|reachable|lemma)", …)` |
|
||||
# | Replacement of `~` with `PRIVATE__<module>__` | `line.replace("~", …)` |
|
||||
# | Token‑wise alias substitution (the long `for (i=1; …)` loop) | the `while i < len(line): …` loop that restarts from the beginning after each successful replacement |
|
||||
# | Accumulating the final output in `buf` | `self.buf += line + "\n"` |
|
||||
# | `END` block – print buffer if no error | `Translator.finish()` |
|
||||
|
||||
# The script can be saved as `translate_awk_to_py.py`, made executable (`chmod +x translate_awk_to_py.py`) and used exactly like the original AWK program. All error messages, line numbers and exit codes are identical, so any surrounding tooling that expects the AWK behaviour will continue to work.
|
||||
23
marzipan/test_parser.sh
Executable file
23
marzipan/test_parser.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
shopt -s nullglob globstar
|
||||
|
||||
proverif_repo=$1
|
||||
|
||||
# Prerequisites:
|
||||
# * built ProVerif
|
||||
# * ran ./test (and aborted it) such that the preparation scripts have been run
|
||||
|
||||
# Test pitype files
|
||||
for f in $proverif_repo/examples/pitype/**/*.pv; do
|
||||
[[ $f == *.m4.pv ]] && continue
|
||||
echo "$f"
|
||||
nix run .# -- parse "$f"
|
||||
done
|
||||
|
||||
|
||||
# Test cryptoverif files
|
||||
for f in $proverif_repo/examples/cryptoverif/**/*.pcv; do
|
||||
[[ $f == *.m4.pcv ]] && continue
|
||||
echo "$f"
|
||||
nix run .# -- parse "$f"
|
||||
done
|
||||
@@ -364,7 +364,7 @@ impl Rosenpass {
|
||||
// check the secret-key file is a valid key
|
||||
ensure!(
|
||||
SSk::load(&keypair.secret_key).is_ok(),
|
||||
"could not load public-key file {:?}: invalid key",
|
||||
"could not load secret-key file {:?}: invalid key",
|
||||
keypair.secret_key
|
||||
);
|
||||
}
|
||||
|
||||
25
supply-chain-CI.md
Normal file
25
supply-chain-CI.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Continuous Integration for supply chain protection
|
||||
|
||||
This repository's CI uses non-standard mechanisms to harmonize the usage of `dependabot` together with [`cargo vet`](https://mozilla.github.io/cargo-vet/). Since cargo-vet audits for new versions of crates are rarely immediately available once dependabots bumps the version,
|
||||
the exemptions for `cargo vet` have to be regenerated for each push request opened by dependabot. To make this work, some setup is neccessary to setup the CI. The required steps are as follows:
|
||||
|
||||
1. Create a mew user on github. For the purpose of these instructions, we will assume that its mail address is `ci@example.com` and that its username is `ci-bot`. Protect this user account as you would any other user account that you intend to gve write permissions to. For example, setup MFA or protect the email address of the user. Make sure to verify your e-mail.
|
||||
2. Add `ci-bot` as a member of your organizaton with write access to the repository.
|
||||
3. In your organization, go to "Settings" -> "Personal Access tokens" -> "Settings". There select "Allow access via fine-grained personal access tokens" and save. Depending on your preferences either choose "Require administrator approval" or "Do not require administrator approval".
|
||||
4. Create a new personal access token as `ci-bot` for the rosenpass repository. That is, in the settings for `ci-bot`, select "Developer settings" -> "Personal Access tokens" -> "Fine-grained tokens". Then click on "Generate new token". Enter a name of your choosing and choose an expiration date that you feel comfortable with. A shorter expiration period will requrie more manual management by you but is more secure than a longer one. Select your organization as the resource owner and select the rosenpass repository as the repository. Under "Repository permissions", grant "Read and write"-access to the "Contens" premission for the token. Grant no other permissions to the token, except for the read-only access to the "Metadata" permission, which is mandatory. Then generate the token and copy it for the next steps.
|
||||
5. If you chose "Require administrator approval" in step 3, approve the fine grained access token by, as a organization administrator, going to "Settings" -> "Personal Access tokens" -> "Pending requests" and grant the request.
|
||||
6. Now, with your account that has administrative permissions for the repository, open the settings page for the repository and select "Secrets and variables" -> "Actions" and click "New repository secret". In the name field enter "CI_BOT_PAT". This name is mandatory, since it is explicitly referenced in the supply-chain workflow. Below, enter the token that was generated in step 4.
|
||||
7. Analogously to step 6, open the settings page for the repository and select "Secrets and variables" -> "Dependabot" and click "New repository secret". In the name field enter "CI_BOT_PAT". This name is mandatory, since it is explicitly referenced in the supply-chain workflow. Below, enter the token that was generated in step 4.
|
||||
|
||||
## What this does
|
||||
|
||||
For the `cargo vet` check in the CI for dependabot, the `cargo vet`-exemptions have to automatically be regenerated, because otherwise this CI job will always fail for dependabot PRs. After the exemptions have been regenerated, they need to be commited and pushed to the PR. This invalidates the CI run that pushed the commit so that it does not show up in the PR anymore but does not trigger a new CI run. This is a [protection by Github](https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) to prevent infinite loops. However, in this case it prevents us from having a proper CI run for dependabot PRs. The solution to this is to execute `push` operation with a personal access token.
|
||||
|
||||
## Preventing infinite loops
|
||||
|
||||
The CI is configured to avoid infinite loops by only regenerating and pushing the `cargo vet` exemptions if the CI run happens with respect to a PR opened by dependabot and not for any other pushed or pull requests. In addition one of the following conditions has to be met:
|
||||
|
||||
- The last commit was performed by dependabot
|
||||
- The last commit message ends in `--regenerate-exemptions`
|
||||
|
||||
Summarizing, the exemptions are only regenerated in the context of pull requests opened by dependabot and, the last commit was was performed by dependabot or the last commit message ends in `--regenerate-exemptions`.
|
||||
@@ -142,7 +142,7 @@ version = "0.7.4"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.clap_mangen]]
|
||||
version = "0.2.24"
|
||||
version = "0.2.29"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.cmake]]
|
||||
@@ -257,10 +257,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.10.2"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.fastrand]]
|
||||
version = "2.3.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.findshlibs]]
|
||||
version = "0.10.2"
|
||||
criteria = "safe-to-run"
|
||||
@@ -285,10 +281,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.2.15"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.gimli]]
|
||||
version = "0.31.1"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.hash32]]
|
||||
version = "0.2.1"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -341,6 +333,10 @@ criteria = "safe-to-deploy"
|
||||
version = "2.1.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.io-uring]]
|
||||
version = "0.7.9"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.ipc-channel]]
|
||||
version = "0.18.3"
|
||||
criteria = "safe-to-run"
|
||||
@@ -370,7 +366,7 @@ version = "1.3.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.libc]]
|
||||
version = "0.2.169"
|
||||
version = "0.2.174"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.libcrux]]
|
||||
@@ -529,10 +525,6 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.15"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.pin-project-lite]]
|
||||
version = "0.2.16"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.pkg-config]]
|
||||
version = "0.3.31"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -581,14 +573,6 @@ criteria = "safe-to-deploy"
|
||||
version = "0.9.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.rand_chacha]]
|
||||
version = "0.9.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.rand_core]]
|
||||
version = "0.9.3"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.redox_syscall]]
|
||||
version = "0.5.9"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -658,7 +642,7 @@ version = "0.4.9"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.socket2]]
|
||||
version = "0.5.8"
|
||||
version = "0.6.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.spin]]
|
||||
@@ -702,7 +686,7 @@ version = "2.0.11"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.tokio]]
|
||||
version = "1.44.2"
|
||||
version = "1.47.0"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.tokio-macros]]
|
||||
@@ -733,10 +717,6 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.17"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.utf8parse]]
|
||||
version = "0.2.2"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.uuid]]
|
||||
version = "1.14.0"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -847,7 +827,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows-targets]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows-targets]]
|
||||
version = "0.52.6"
|
||||
@@ -859,7 +839,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_aarch64_gnullvm]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_aarch64_gnullvm]]
|
||||
version = "0.52.6"
|
||||
@@ -871,7 +851,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_aarch64_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_aarch64_msvc]]
|
||||
version = "0.52.6"
|
||||
@@ -883,7 +863,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_i686_gnu]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_i686_gnu]]
|
||||
version = "0.52.6"
|
||||
@@ -899,7 +879,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_i686_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_i686_msvc]]
|
||||
version = "0.52.6"
|
||||
@@ -911,7 +891,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_gnu]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_gnu]]
|
||||
version = "0.52.6"
|
||||
@@ -923,7 +903,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_gnullvm]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_gnullvm]]
|
||||
version = "0.52.6"
|
||||
@@ -935,7 +915,7 @@ criteria = "safe-to-deploy"
|
||||
|
||||
[[exemptions.windows_x86_64_msvc]]
|
||||
version = "0.48.5"
|
||||
criteria = "safe-to-deploy"
|
||||
criteria = "safe-to-run"
|
||||
|
||||
[[exemptions.windows_x86_64_msvc]]
|
||||
version = "0.52.6"
|
||||
|
||||
@@ -35,7 +35,7 @@ who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
user-id = 73222 # wasmtime-publish
|
||||
start = "2023-01-01"
|
||||
end = "2025-05-08"
|
||||
end = "2026-06-03"
|
||||
notes = """
|
||||
The Bytecode Alliance uses the `wasmtime-publish` crates.io account to automate
|
||||
publication of this crate from CI. This repository requires all PRs are reviewed
|
||||
@@ -144,6 +144,21 @@ who = "Dan Gohman <dev@sunfishcode.online>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.3.9 -> 0.3.10"
|
||||
|
||||
[[audits.bytecode-alliance.audits.fastrand]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.0.0 -> 2.0.1"
|
||||
notes = """
|
||||
This update had a few doc updates but no otherwise-substantial source code
|
||||
updates.
|
||||
"""
|
||||
|
||||
[[audits.bytecode-alliance.audits.fastrand]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.1.1 -> 2.3.0"
|
||||
notes = "Minor refactoring, nothing new."
|
||||
|
||||
[[audits.bytecode-alliance.audits.futures]]
|
||||
who = "Joel Dice <joel.dice@gmail.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -190,6 +205,18 @@ who = "Pat Hickey <pat@moreproductive.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.3.28 -> 0.3.31"
|
||||
|
||||
[[audits.bytecode-alliance.audits.gimli]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.29.0 -> 0.31.0"
|
||||
notes = "Various updates here and there, nothing too major, what you'd expect from a DWARF parsing crate."
|
||||
|
||||
[[audits.bytecode-alliance.audits.gimli]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.31.0 -> 0.31.1"
|
||||
notes = "No fundmanetally new `unsafe` code, some small refactoring of existing code. Lots of changes in tests, not as many changes in the rest of the crate. More dwarf!"
|
||||
|
||||
[[audits.bytecode-alliance.audits.heck]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -249,6 +276,12 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.0"
|
||||
notes = "I am the author of this crate."
|
||||
|
||||
[[audits.bytecode-alliance.audits.pin-project-lite]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.13 -> 0.2.14"
|
||||
notes = "No substantive changes in this update"
|
||||
|
||||
[[audits.bytecode-alliance.audits.pin-utils]]
|
||||
who = "Pat Hickey <phickey@fastly.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -301,6 +334,12 @@ criteria = "safe-to-deploy"
|
||||
version = "1.0.40"
|
||||
notes = "Found no unsafe or ambient capabilities used"
|
||||
|
||||
[[audits.embark-studios.audits.utf8parse]]
|
||||
who = "Johan Andersson <opensource@embark-studios.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.2.1"
|
||||
notes = "Single unsafe usage that looks sound, no ambient capabilities"
|
||||
|
||||
[[audits.fermyon.audits.oorandom]]
|
||||
who = "Radu Matei <radu.matei@fermyon.com>"
|
||||
criteria = "safe-to-run"
|
||||
@@ -411,6 +450,16 @@ delta = "1.0.1 -> 1.0.2"
|
||||
notes = "No changes to any .rs files or Rust code."
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.fastrand]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "1.9.0"
|
||||
notes = """
|
||||
`does-not-implement-crypto` is certified because this crate explicitly says
|
||||
that the RNG here is not cryptographically secure.
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromiumos/third_party/rust_crates/+/refs/heads/main/cargo-vet/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.glob]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -554,6 +603,20 @@ version = "0.1.46"
|
||||
notes = "Contains no unsafe"
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.pin-project-lite]]
|
||||
who = "David Koloski <dkoloski@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.2.9"
|
||||
notes = "Reviewed on https://fxrev.dev/824504"
|
||||
aggregated-from = "https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/third_party/rust_crates/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.pin-project-lite]]
|
||||
who = "David Koloski <dkoloski@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.9 -> 0.2.13"
|
||||
notes = "Audited at https://fxrev.dev/946396"
|
||||
aggregated-from = "https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/third_party/rust_crates/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.proc-macro-error-attr]]
|
||||
who = "George Burgess IV <gbiv@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -708,6 +771,24 @@ For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.rand_chacha]]
|
||||
who = "Lukasz Anforowicz <lukasza@chromium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.3.1"
|
||||
notes = """
|
||||
For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.rand_core]]
|
||||
who = "Lukasz Anforowicz <lukasza@chromium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.6.4"
|
||||
notes = """
|
||||
For more detailed unsafe review notes please see https://crrev.com/c/6362797
|
||||
"""
|
||||
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
|
||||
|
||||
[[audits.google.audits.regex-syntax]]
|
||||
who = "Manish Goregaokar <manishearth@google.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1158,12 +1239,12 @@ version = "0.3.0"
|
||||
[[audits.isrg.audits.rand_chacha]]
|
||||
who = "David Cook <dcook@divviup.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.3.1"
|
||||
delta = "0.3.1 -> 0.9.0"
|
||||
|
||||
[[audits.isrg.audits.rand_core]]
|
||||
who = "David Cook <dcook@divviup.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.6.3"
|
||||
delta = "0.6.4 -> 0.9.3"
|
||||
|
||||
[[audits.isrg.audits.rayon]]
|
||||
who = "Brandon Pitman <bran@bran.land>"
|
||||
@@ -1379,6 +1460,25 @@ criteria = "safe-to-deploy"
|
||||
delta = "0.3.1 -> 0.3.3"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "1.9.0 -> 2.0.0"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.0.1 -> 2.1.0"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fastrand]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "2.1.0 -> 2.1.1"
|
||||
notes = "Fairly trivial changes, no chance of security regression."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.fnv]]
|
||||
who = "Bobby Holley <bobbyholley@gmail.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1409,6 +1509,23 @@ documentation.
|
||||
"""
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.gimli]]
|
||||
who = "Alex Franchuk <afranchuk@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.30.0"
|
||||
notes = """
|
||||
Unsafe code blocks are sound. Minimal dependencies used. No use of
|
||||
side-effectful std functions.
|
||||
"""
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.gimli]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.30.0 -> 0.29.0"
|
||||
notes = "No unsafe code, mostly algorithms and parsing. Very unlikely to cause security issues."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.hex]]
|
||||
who = "Simon Friedberger <simon@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -1428,11 +1545,15 @@ delta = "1.0.0 -> 0.1.2"
|
||||
notes = "Small refactor of some simple iterator logic, no unsafe code or capabilities."
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.rand_core]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
[[audits.mozilla.audits.pin-project-lite]]
|
||||
who = "Nika Layzell <nika@thelayzells.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.6.3 -> 0.6.4"
|
||||
aggregated-from = "https://hg.mozilla.org/mozilla-central/raw-file/tip/supply-chain/audits.toml"
|
||||
delta = "0.2.14 -> 0.2.16"
|
||||
notes = """
|
||||
Only functional change is to work around a bug in the negative_impls feature
|
||||
(https://github.com/taiki-e/pin-project/issues/340#issuecomment-2432146009)
|
||||
"""
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/cargo-vet/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.rayon]]
|
||||
who = "Josh Stone <jistone@redhat.com>"
|
||||
@@ -1491,6 +1612,12 @@ criteria = "safe-to-deploy"
|
||||
delta = "1.0.43 -> 1.0.69"
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/glean/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.utf8parse]]
|
||||
who = "Nika Layzell <nika@thelayzells.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.2.1 -> 0.2.2"
|
||||
aggregated-from = "https://raw.githubusercontent.com/mozilla/cargo-vet/main/supply-chain/audits.toml"
|
||||
|
||||
[[audits.mozilla.audits.zeroize]]
|
||||
who = "Benjamin Beurdouche <beurdouche@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
Reference in New Issue
Block a user