Compare commits

..

2 Commits

Author SHA1 Message Date
Karolin Varner
d58aa363cd fix: Add test for rosenpass_constant_time::compare being little endian 2024-03-10 19:25:00 +01:00
Paul Spooren
1b233bc600 ci: enable cargo bench again
It only takes a few seconds to run, enable it.

Signed-off-by: Paul Spooren <mail@aparcar.org>
2024-03-07 12:21:55 +01:00
183 changed files with 3957 additions and 24046 deletions

View File

@@ -1,14 +0,0 @@
public_key = "rp-a-public-key"
secret_key = "rp-a-secret-key"
listen = ["127.0.0.1:9999"]
verbosity = "Verbose"
[api]
listen_path = []
listen_fd = []
stream_fd = []
[[peers]]
public_key = "rp-b-public-key"
endpoint = "127.0.0.1:9998"
key_out = "rp-b-key-out.txt"

View File

@@ -1,14 +0,0 @@
public_key = "rp-b-public-key"
secret_key = "rp-b-secret-key"
listen = ["127.0.0.1:9998"]
verbosity = "Verbose"
[api]
listen_path = []
listen_fd = []
stream_fd = []
[[peers]]
public_key = "rp-a-public-key"
endpoint = "127.0.0.1:9999"
key_out = "rp-a-key-out.txt"

View File

@@ -1,48 +0,0 @@
#!/bin/bash
iterations="$1"
sleep_time="$2"
config_a="$3"
config_b="$4"
PWD="$(pwd)"
EXEC="$PWD/target/release/rosenpass"
i=0
while [ "$i" -ne "$iterations" ]; do
echo "=> Iteration $i"
# flush the PSK files
echo "A" >rp-a-key-out.txt
echo "B" >rp-b-key-out.txt
# start the two instances
echo "Starting instance A"
"$EXEC" exchange-config "$config_a" &
PID_A=$!
sleep "$sleep_time"
echo "Starting instance B"
"$EXEC" exchange-config "$config_b" &
PID_B=$!
# give the key exchange some time to complete
sleep 3
# kill the instances
kill $PID_A
kill $PID_B
# compare the keys
if cmp -s rp-a-key-out.txt rp-b-key-out.txt; then
echo "Keys match"
else
echo "::warning title=Key Exchange Race Condition::The key exchange resulted in different keys. Delay was ${sleep_time}s."
# TODO: set this to 1 when the race condition is fixed
exit 0
fi
# give the instances some time to shut down
sleep 2
i=$((i + 1))
done

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env nu
use std log
use log *
# cd to git root
cd (git rev-parse --show-toplevel)
@@ -116,7 +116,6 @@ for system in ($targets | columns) {
} }
| filter {|it| $it.needed}
| each {|it| job-id $system $it.name}
| sort
)
mut new_job = {

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
iterations="$1"
sleep_time="$2"
PWD="$(pwd)"
EXEC="$PWD/target/release/rosenpass"
LOGS="$PWD/output/logs"
mkdir -p "$LOGS"
run_command() {
local file=$1
local log_file="$2"
("$EXEC" exchange-config "$file" 2>&1 | tee -a "$log_file") &
echo $!
}
pids=()
(cd output/dut && run_command "configs/dut-$iterations.toml" "$LOGS/dut.log")
for (( x=0; x<iterations; x++ )); do
(cd output/ate && run_command "configs/ate-$x.toml" "$LOGS/ate-$x.log") & pids+=($!)
done
sleep "$sleep_time"
lsof -i :9999 | awk 'NR!=1 {print $2}' | xargs kill
for (( x=0; x<iterations; x++ )); do
port=$((x + 50000))
lsof -i :$port | awk 'NR!=1 {print $2}' | xargs kill
done

View File

@@ -1 +0,0 @@
FROM ghcr.io/xtruder/nix-devcontainer:v1

View File

@@ -1,33 +0,0 @@
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at
// https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-existing-dockerfile
{
"name": "devcontainer-project",
"dockerFile": "Dockerfile",
"context": "${localWorkspaceFolder}",
"build": {
"args": {
"USER_UID": "${localEnv:USER_UID}",
"USER_GID": "${localEnv:USER_GID}"
}
},
// run arguments passed to docker
"runArgs": ["--security-opt", "label=disable"],
// disable command overriding and updating remote user ID
"overrideCommand": false,
"userEnvProbe": "loginShell",
"updateRemoteUserUID": false,
// build development environment on creation, make sure you already have shell.nix
"onCreateCommand": "nix develop",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
"customizations": {
"vscode": {
"extensions": ["rust-lang.rust-analyzer", "tamasfe.even-better-toml"]
}
}
}

14
.github/codecov.yml vendored
View File

@@ -1,14 +0,0 @@
codecov:
branch: main
coverage:
status:
project:
default:
# basic
target: auto #default
threshold: 5
base: auto
if_ci_failed: error #success, failure, error, ignore
informational: false
only_pulls: true
patch: off

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -1,63 +0,0 @@
name: Dependent Issues
on:
issues:
types:
- opened
- edited
- closed
- reopened
pull_request_target:
types:
- opened
- edited
- closed
- reopened
# Makes sure we always add status check for PRs. Useful only if
# this action is required to pass before merging. Otherwise, it
# can be removed.
- synchronize
# Schedule a daily check. Useful if you reference cross-repository
# issues or pull requests. Otherwise, it can be removed.
schedule:
- cron: "0 0 * * *"
jobs:
check:
permissions:
issues: write
pull-requests: write
statuses: write
runs-on: ubuntu-latest
steps:
- uses: z0al/dependent-issues@v1
env:
# (Required) The token to use to make API calls to GitHub.
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# (Optional) The token to use to make API calls to GitHub for remote repos.
GITHUB_READ_TOKEN: ${{ secrets.GITHUB_READ_TOKEN }}
with:
# (Optional) The label to use to mark dependent issues
label: dependent
# (Optional) Enable checking for dependencies in issues.
# Enable by setting the value to "on". Default "off"
check_issues: off
# (Optional) Ignore dependabot PRs.
# Enable by setting the value to "on". Default "off"
ignore_dependabot: off
# (Optional) A comma-separated list of keywords. Default
# "depends on, blocked by"
keywords: depends on, blocked by
# (Optional) A custom comment body. It supports `{{ dependencies }}` token.
comment: >
This PR/issue depends on:
{{ dependencies }}
By **[Dependent Issues](https://github.com/z0al/dependent-issues)** (🤖). Happy coding!

View File

@@ -13,10 +13,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Clone rosenpass-website repository
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
repository: rosenpass/rosenpass-website
ref: main

View File

@@ -6,11 +6,6 @@ on:
push:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
i686-linux---default:
name: Build i686-linux.default
@@ -19,11 +14,11 @@ jobs:
needs:
- i686-linux---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -35,11 +30,11 @@ jobs:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -52,11 +47,11 @@ jobs:
needs:
- i686-linux---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -67,11 +62,11 @@ jobs:
runs-on:
- ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -84,11 +79,11 @@ jobs:
needs:
- x86_64-darwin---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -100,14 +95,13 @@ jobs:
- macos-13
needs:
- x86_64-darwin---rosenpass
- x86_64-darwin---rp
- x86_64-darwin---rosenpass-oci-image
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -119,32 +113,16 @@ jobs:
- macos-13
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.x86_64-darwin.rosenpass --print-build-logs
x86_64-darwin---rp:
name: Build x86_64-darwin.rp
runs-on:
- macos-13
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.x86_64-darwin.rp --print-build-logs
x86_64-darwin---rosenpass-oci-image:
name: Build x86_64-darwin.rosenpass-oci-image
runs-on:
@@ -152,11 +130,11 @@ jobs:
needs:
- x86_64-darwin---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -167,11 +145,11 @@ jobs:
runs-on:
- macos-13
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -184,11 +162,11 @@ jobs:
needs:
- x86_64-linux---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -201,11 +179,11 @@ jobs:
needs:
- x86_64-linux---proverif-patched
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -217,11 +195,11 @@ jobs:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -232,55 +210,53 @@ jobs:
runs-on:
- ubuntu-latest
needs:
- x86_64-linux---rosenpass-static
- x86_64-linux---rosenpass-static-oci-image
- x86_64-linux---rp-static
- x86_64-linux---rosenpass-static
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.x86_64-linux.release-package --print-build-logs
# aarch64-linux---release-package:
# name: Build aarch64-linux.release-package
# runs-on:
# - ubuntu-latest
# needs:
# - aarch64-linux---rosenpass-oci-image
# - aarch64-linux---rosenpass
# - aarch64-linux---rp
# steps:
# - run: |
# DEBIAN_FRONTEND=noninteractive
# sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
# - uses: actions/checkout@v4
# - uses: cachix/install-nix-action@v30
# with:
# nix_path: nixpkgs=channel:nixos-unstable
# extra_nix_config: |
# system = aarch64-linux
# - uses: cachix/cachix-action@v15
# with:
# name: rosenpass
# authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
# - name: Build
# run: nix build .#packages.aarch64-linux.release-package --print-build-logs
aarch64-linux---release-package:
name: Build aarch64-linux.release-package
runs-on:
- ubuntu-latest
needs:
- aarch64-linux---rosenpass-oci-image
- aarch64-linux---rosenpass
steps:
- run: |
DEBIAN_FRONTEND=noninteractive
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
extra_nix_config: |
system = aarch64-linux
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.aarch64-linux.release-package --print-build-logs
x86_64-linux---rosenpass:
name: Build x86_64-linux.rosenpass
runs-on:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -295,39 +271,18 @@ jobs:
- run: |
DEBIAN_FRONTEND=noninteractive
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
extra_nix_config: |
system = aarch64-linux
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.aarch64-linux.rosenpass --print-build-logs
aarch64-linux---rp:
name: Build aarch64-linux.rp
runs-on:
- ubuntu-latest
needs: []
steps:
- run: |
DEBIAN_FRONTEND=noninteractive
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
with:
nix_path: nixpkgs=channel:nixos-unstable
extra_nix_config: |
system = aarch64-linux
- uses: cachix/cachix-action@v15
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.aarch64-linux.rp --print-build-logs
x86_64-linux---rosenpass-oci-image:
name: Build x86_64-linux.rosenpass-oci-image
runs-on:
@@ -335,11 +290,11 @@ jobs:
needs:
- x86_64-linux---rosenpass
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -355,13 +310,13 @@ jobs:
- run: |
DEBIAN_FRONTEND=noninteractive
sudo apt-get update -q -y && sudo apt-get install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
extra_nix_config: |
system = aarch64-linux
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -373,32 +328,16 @@ jobs:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.x86_64-linux.rosenpass-static --print-build-logs
x86_64-linux---rp-static:
name: Build x86_64-linux.rp-static
runs-on:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build
run: nix build .#packages.x86_64-linux.rp-static --print-build-logs
x86_64-linux---rosenpass-static-oci-image:
name: Build x86_64-linux.rosenpass-static-oci-image
runs-on:
@@ -406,11 +345,11 @@ jobs:
needs:
- x86_64-linux---rosenpass-static
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -422,11 +361,11 @@ jobs:
- ubuntu-latest
needs: []
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -437,11 +376,11 @@ jobs:
runs-on:
- ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -452,11 +391,11 @@ jobs:
runs-on: ubuntu-latest
if: ${{ github.ref == 'refs/heads/main' }}
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -465,7 +404,7 @@ jobs:
- name: Build
run: nix build .#packages.x86_64-linux.whitepaper --print-build-logs
- name: Deploy PDF artifacts
uses: peaceiris/actions-gh-pages@v4
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: result/

View File

@@ -4,10 +4,6 @@ on:
push:
branches: [main]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
checks: write
contents: read
@@ -16,8 +12,8 @@ jobs:
prettier:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actionsx/prettier@v3
- uses: actions/checkout@v3
- uses: actionsx/prettier@v2
with:
args: --check .
@@ -25,7 +21,7 @@ jobs:
name: Shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
@@ -33,15 +29,15 @@ jobs:
name: Rust Format
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Run Rust Formatting Script
run: bash format_rust_code.sh --mode check
cargo-bench:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -50,25 +46,17 @@ jobs:
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install libsodium
run: sudo apt-get install -y libsodium-dev
# liboqs requires quite a lot of stack memory, thus we adjust
# the default stack size picked for new threads (which is used
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
- run: RUST_MIN_STACK=8388608 cargo bench --workspace --exclude rosenpass-fuzzing
mandoc:
name: mandoc
runs-on: ubuntu-latest
steps:
- name: Install mandoc
run: sudo apt-get install -y mandoc
- uses: actions/checkout@v4
- name: Check rp.1
run: doc/check.sh doc/rp.1
- run: RUST_MIN_STACK=8388608 cargo bench --workspace
cargo-audit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions-rs/audit-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
@@ -76,8 +64,8 @@ jobs:
cargo-clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -87,6 +75,8 @@ jobs:
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- run: rustup component add clippy
- name: Install libsodium
run: sudo apt-get install -y libsodium-dev
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
@@ -95,8 +85,8 @@ jobs:
cargo-doc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -106,21 +96,18 @@ jobs:
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- run: rustup component add clippy
- name: Install libsodium
run: sudo apt-get install -y libsodium-dev
# `--no-deps` used as a workaround for a rust compiler bug. See:
# - https://github.com/rosenpass/rosenpass/issues/62
# - https://github.com/rust-lang/rust/issues/108378
- run: RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --document-private-items
cargo-test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-13]
# - ubuntu is x86-64
# - macos-13 is also x86-64 architecture
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -129,6 +116,8 @@ jobs:
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install libsodium
run: sudo apt-get install -y libsodium-dev
# liboqs requires quite a lot of stack memory, thus we adjust
# the default stack size picked for new threads (which is used
# by `cargo test`) to be _big enough_. Setting it to 8 MiB
@@ -138,8 +127,8 @@ jobs:
runs-on:
- ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -148,10 +137,10 @@ jobs:
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- uses: cachix/install-nix-action@v30
- uses: cachix/install-nix-action@v21
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
@@ -160,8 +149,8 @@ jobs:
cargo-fuzz:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
- uses: actions/checkout@v3
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
@@ -170,6 +159,8 @@ jobs:
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install libsodium
run: sudo apt-get install -y libsodium-dev
- name: Install nightly toolchain
run: |
rustup toolchain install nightly
@@ -183,30 +174,5 @@ jobs:
cargo fuzz run fuzz_handle_msg -- -max_total_time=5
ulimit -s 8192000 && RUST_MIN_STACK=33554432000 && cargo fuzz run fuzz_kyber_encaps -- -max_total_time=5
cargo fuzz run fuzz_mceliece_encaps -- -max_total_time=5
cargo fuzz run fuzz_box_secret_alloc_malloc -- -max_total_time=5
cargo fuzz run fuzz_box_secret_alloc_memfdsec -- -max_total_time=5
cargo fuzz run fuzz_box_secret_alloc_memfdsec_mallocfb -- -max_total_time=5
cargo fuzz run fuzz_vec_secret_alloc_malloc -- -max_total_time=5
cargo fuzz run fuzz_vec_secret_alloc_memfdsec -- -max_total_time=5
cargo fuzz run fuzz_vec_secret_alloc_memfdsec_mallocfb -- -max_total_time=5
codecov:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: rustup default nightly
- run: rustup component add llvm-tools-preview
- run: |
cargo install cargo-llvm-cov || true
cargo install grcov || true
./coverage_report.sh
# If using tarapulin
#- run: cargo install cargo-tarpaulin
#- run: cargo tarpaulin --out Xml
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
files: ./target/grcov/lcov
verbose: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
cargo fuzz run fuzz_box_secret_alloc -- -max_total_time=5
cargo fuzz run fuzz_vec_secret_alloc -- -max_total_time=5

View File

@@ -1,37 +0,0 @@
name: Regressions
on:
pull_request:
push:
branches: [main]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
checks: write
contents: read
jobs:
multi-peer:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: cargo build --bin rosenpass --release
- run: python misc/generate_configs.py
- run: chmod +x .ci/run-regression.sh
- run: .ci/run-regression.sh 100 20
- run: |
[ $(ls -1 output/ate/out | wc -l) -eq 100 ]
boot-race:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: cargo build --bin rosenpass --release
- run: chmod +x .ci/boot_race/run.sh
- run: cargo run --release --bin rosenpass gen-keys .ci/boot_race/a.toml
- run: cargo run --release --bin rosenpass gen-keys .ci/boot_race/b.toml
- run: .ci/boot_race/run.sh 5 2 .ci/boot_race/a.toml .ci/boot_race/b.toml
- run: .ci/boot_race/run.sh 5 1 .ci/boot_race/a.toml .ci/boot_race/b.toml
- run: .ci/boot_race/run.sh 5 0 .ci/boot_race/a.toml .ci/boot_race/b.toml

View File

@@ -11,18 +11,18 @@ jobs:
runs-on:
- ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build release
run: nix build .#release-package --print-build-logs
- name: Release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
draft: ${{ contains(github.ref_name, 'rc') }}
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
@@ -32,18 +32,18 @@ jobs:
runs-on:
- macos-13
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build release
run: nix build .#release-package --print-build-logs
- name: Release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
draft: ${{ contains(github.ref_name, 'rc') }}
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}
@@ -53,18 +53,18 @@ jobs:
runs-on:
- ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v30
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
- uses: cachix/cachix-action@v12
with:
name: rosenpass
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- name: Build release
run: nix build .#release-package --print-build-logs
- name: Release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
draft: ${{ contains(github.ref_name, 'rc') }}
prerelease: ${{ contains(github.ref_name, 'alpha') || contains(github.ref_name, 'beta') }}

5
.gitignore vendored
View File

@@ -20,8 +20,3 @@ _markdown_*
**/result
**/result-*
.direnv
# Visual studio code
.vscode
/output

View File

@@ -1 +0,0 @@
Clara Engler <cve@cve.cx> <me@emilengler.com>

View File

@@ -1,5 +1,4 @@
.direnv/
flake.lock
papers/whitepaper.md
src/usage.md
target/
src/usage.md

View File

@@ -1,41 +0,0 @@
# Contributing to Rosenpass
## Common operations
### Apply code formatting
Format rust code:
```bash
cargo fmt
```
Format rust code in markdown files:
```bash
./format_rust_code.sh --mode fix
```
### Spawn a development environment with nix
```bash
nix develop .#fullEnv
```
You need to [install this nix package manager](https://wiki.archlinux.org/title/Nix) first.
### Run our test
Make sure to increase the stack size available; some of our cryptography operations require a lot of stack memory.
```bash
RUST_MIN_STACK=8388608 cargo test --workspace --all-features
```
### Generate coverage reports
Keep in mind that many of Rosenpass' tests are doctests, so to get an accurate read on our code coverage, you have to include doctests:
```bash
./coverage_report.sh
```

1692
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,11 +11,11 @@ members = [
"to",
"fuzz",
"secret-memory",
"rp",
"wireguard-broker",
]
default-members = ["rosenpass", "rp", "wireguard-broker"]
default-members = [
"rosenpass"
]
[workspace.metadata.release]
# ensure that adding `--package` as argument to `cargo release` still creates version tags in the form of `vx.y.z`
@@ -30,64 +30,32 @@ rosenpass-ciphers = { path = "ciphers" }
rosenpass-to = { path = "to" }
rosenpass-secret-memory = { path = "secret-memory" }
rosenpass-oqs = { path = "oqs" }
rosenpass-wireguard-broker = { path = "wireguard-broker" }
criterion = "0.4.0"
test_bin = "0.4.0"
libfuzzer-sys = "0.4"
stacker = "0.1.15"
doc-comment = "0.3.3"
base64ct = { version = "1.6.0", default-features = false }
zeroize = "1.8.1"
memoffset = "0.9.1"
thiserror = "1.0.69"
paste = "1.0.15"
env_logger = "0.10.2"
base64 = "0.21.5"
zeroize = "1.7.0"
memoffset = "0.9.0"
thiserror = "1.0.50"
paste = "1.0.14"
env_logger = "0.10.1"
toml = "0.7.8"
static_assertions = "1.1.0"
allocator-api2 = "0.2.14"
memsec = { git = "https://github.com/rosenpass/memsec.git", rev = "aceb9baee8aec6844125bd6612f92e9a281373df", features = [
"alloc_ext",
] }
allocator-api2-tests = "0.2.14"
memsec = "0.6.3"
rand = "0.8.5"
typenum = "1.17.0"
log = { version = "0.4.22" }
clap = { version = "4.5.23", features = ["derive"] }
clap_mangen = "0.2.24"
clap_complete = "4.5.38"
serde = { version = "1.0.216", features = ["derive"] }
arbitrary = { version = "1.4.1", features = ["derive"] }
anyhow = { version = "1.0.94", features = ["backtrace", "std"] }
mio = { version = "1.0.3", features = ["net", "os-poll"] }
oqs-sys = { version = "0.9.1", default-features = false, features = [
'classic_mceliece',
'kyber',
] }
log = { version = "0.4.20" }
clap = { version = "4.4.10", features = ["derive"] }
serde = { version = "1.0.193", features = ["derive"] }
arbitrary = { version = "1.3.2", features = ["derive"] }
anyhow = { version = "1.0.75", features = ["backtrace", "std"] }
mio = { version = "0.8.9", features = ["net", "os-poll"] }
oqs-sys = { version = "0.8", default-features = false, features = ['classic_mceliece', 'kyber'] }
blake2 = "0.10.6"
chacha20poly1305 = { version = "0.10.1", default-features = false, features = [
"std",
"heapless",
] }
zerocopy = { version = "0.7.35", features = ["derive"] }
chacha20poly1305 = { version = "0.10.1", default-features = false, features = [ "std", "heapless" ] }
zerocopy = { version = "0.7.32", features = ["derive"] }
home = "0.5.9"
derive_builder = "0.20.1"
tokio = { version = "1.42", features = ["macros", "rt-multi-thread"] }
postcard = { version = "1.1.1", features = ["alloc"] }
libcrux = { version = "0.0.2-pre.2" }
hex-literal = { version = "0.4.1" }
hex = { version = "0.4.3" }
heck = { version = "0.5.0" }
libc = { version = "0.2" }
uds = { git = "https://github.com/rosenpass/uds" }
signal-hook = "0.3.17"
#Dev dependencies
serial_test = "3.2.0"
tempfile = "3"
stacker = "0.1.17"
libfuzzer-sys = "0.4"
test_bin = "0.4.0"
criterion = "0.4.0"
allocator-api2-tests = "0.2.15"
procspawn = { version = "1.0.1", features = ["test-support"] }
#Broker dependencies (might need cleanup or changes)
wireguard-uapi = { version = "3.0.0", features = ["xplatform"] }
command-fds = "0.2.3"
rustix = { version = "0.38.42", features = ["net", "fs", "process"] }

View File

@@ -3,12 +3,33 @@
#define SESSION_START_EVENTS 0
#define RANDOMIZED_CALL_IDS 0
#include "config.mpv"
#include "prelude/basic.mpv"
#include "crypto/key.mpv"
#include "crypto/kem.mpv"
#include "rosenpass/oracles.mpv"
nounif v:seed_prec; attacker(prepare_seed(trusted_seed( v )))/6217[hypothesis].
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
nounif v:key_prec; attacker(prepare_key(trusted_key( v )))/6213[hypothesis].
nounif v:kem_sk_prec; attacker(prepare_kem_sk(trusted_kem_sk( v )))/6212[hypothesis].
nounif v:key; attacker(prepare_key( v ))/6211[hypothesis].
nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
nounif Spk:kem_sk_tmpl;
attacker(Creveal_kem_pk(Spk))/6110[conclusion].
nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr ))/6109[conclusion].
nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/6108[conclusion].
nounif rh:RespHello_t;
attacker(Cresp_hello( *rh ))/6107[conclusion].
nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/6106[conclusion].
let main = rosenpass_main.
@lemma "state coherence, initiator: Initiator accepting a RespHello message implies they also generated the associated InitHello message"

View File

@@ -10,6 +10,26 @@
let main = rosenpass_main.
nounif v:seed_prec; attacker(prepare_seed(trusted_seed( v )))/6217[hypothesis].
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
nounif v:key_prec; attacker(prepare_key(trusted_key( v )))/6213[hypothesis].
nounif v:kem_sk_prec; attacker(prepare_kem_sk(trusted_kem_sk( v )))/6212[hypothesis].
nounif v:key; attacker(prepare_key( v ))/6211[hypothesis].
nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
nounif Spk:kem_sk_tmpl;
attacker(Creveal_kem_pk(Spk))/6110[conclusion].
nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr ))/6109[conclusion].
nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/6108[conclusion].
nounif rh:RespHello_t;
attacker(Cresp_hello( *rh ))/6107[conclusion].
nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/6106[conclusion].
@lemma "non-interruptability: Adv cannot prevent a genuine InitHello message from being accepted"
lemma ih:InitHello_t, psk:key, sski:kem_sk, sskr:kem_sk;
event(IHRjct(ih, psk, sskr, kem_pub(sski)))

View File

@@ -88,6 +88,18 @@ set verboseCompleted=VERBOSE.
#define SES_EV(...)
#endif
#if COOKIE_EVENTS
#define COOKIE_EV(...) __VA_ARGS__
#else
#define COOKIE_EV(...)
#endif
#if KEM_EVENTS
#define KEM_EV(...) __VA_ARGS__
#else
#define KEM_EV(...)
#endif
(* TODO: Authentication timing properties *)
(* TODO: Proof that every adversary submitted package is equivalent to one generated by the proper algorithm using different coins. This probably requires introducing an oracle that extracts the coins used and explicitly adding the notion of coins used for Packet->Packet steps and an inductive RNG notion. *)

View File

@@ -41,23 +41,32 @@ restriction s:seed, p1:Atom, p2:Atom, ad1:Atom, ad2:Atom;
event(ConsumeSeed(p1, s, ad1)) && event(ConsumeSeed(p2, s, ad2))
==> p1 = p2 && ad1 = ad2.
letfun create_mac2(k:key, msg:bits) = prf(k,msg).
#include "rosenpass/responder.macro"
fun Cinit_conf(kem_sk_tmpl, key_tmpl, kem_pk_tmpl, InitConf_t) : Atom [data].
CK_EV( event OskOinit_conf(key, key). )
MTX_EV( event ICRjct(InitConf_t, key, kem_sk, kem_pk). )
SES_EV( event ResponderSession(InitConf_t, key). )
event ConsumeBiscuit(Atom, kem_sk, kem_pk, Atom).
let Oinit_conf() =
in(C, Cinit_conf(Ssskm, Spsk, Sspkt, ic));
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinit_conf(Ssskm, Spsk, Sspkt, ic);
KEM_EV(event Oinit_conf_KemUse(SessionId, SessionId, Atom).)
#ifdef KEM_EVENTS
restriction sidi:SessionId, sidr:SessionId, ad1:Atom, ad2:Atom;
event(Oinit_conf_KemUse(sidi, sidr, ad1)) && event(Oinit_conf_KemUse(sidi, sidr, ad2))
==> ad1 = ad2.
#endif
event ConsumeBiscuit(Atom, kem_sk, kem_pk, Atom).
fun Ccookie(key, bits) : Atom[data].
let Oinit_conf_inner(Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t, call:Atom) =
SETUP_HANDSHAKE_STATE()
eski <- kem_sk0;
epki <- kem_pk0;
let try_ = (
let InitConf(sidi, sidr, biscuit, auth) = ic in
KEM_EV(event Oinit_conf_KemUse(sidi, sidr, call);)
INITCONF_CONSUME()
event ConsumeBiscuit(biscuit_no, sskm, spkt, call);
CK_EV( event OskOinit_conf(ck_rh, osk); )
@@ -73,10 +82,20 @@ let Oinit_conf() =
#endif
).
let Oinit_conf() =
in(C, Cinit_conf(Ssskm, Spsk, Sspkt, ic));
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinit_conf(Ssskm, Spsk, Sspkt, ic);
#endif
Oinit_conf_inner(Ssskm, Spsk, Sspkt, ic, call).
restriction biscuit_no:Atom, sskm:kem_sk, spkr:kem_pk, ad1:Atom, ad2:Atom;
event(ConsumeBiscuit(biscuit_no, sskm, spkr, ad1)) && event(ConsumeBiscuit(biscuit_no, sskm, spkr, ad2))
==> ad1 = ad2.
// TODO: Restriction biscuit no invalidation
#include "rosenpass/initiator.macro"
@@ -85,27 +104,56 @@ CK_EV( event OskOresp_hello(key, key, key). )
MTX_EV( event RHRjct(RespHello_t, key, kem_sk, kem_pk). )
MTX_EV( event ICSent(RespHello_t, InitConf_t, key, kem_sk, kem_pk). )
SES_EV( event InitiatorSession(RespHello_t, key). )
let Oresp_hello(HS_DECL_ARGS) =
in(C, Cresp_hello(RespHello(sidr, =sidi, ecti, scti, biscuit, auth)));
rh <- RespHello(sidr, sidi, ecti, scti, biscuit, auth);
/* try */ let ic = (
ck_ini <- ck;
RESPHELLO_CONSUME()
ck_ih <- ck;
INITCONF_PRODUCE()
CK_EV (event OskOresp_hello(ck_ini, ck_ih, osk); ) // TODO: Queries testing that there is no duplication
MTX_EV( event ICSent(rh, ic, psk, sski, spkr); )
SES_EV( event InitiatorSession(rh, osk); )
ic
/* success */ ) in (
out(C, ic)
/* fail */ ) else (
#if MESSAGE_TRANSMISSION_EVENTS
event RHRjct(rh, psk, sski, spkr)
#else
0
KEM_EV(event Oresp_hello_KemUse(SessionId, SessionId, Atom).)
#ifdef KEM_EVENTS
restriction sidi:SessionId, sidr:SessionId, ad1:Atom, ad2:Atom;
event(Oresp_hello_KemUse(sidi, sidr, ad1)) && event(Oresp_hello_KemUse(sidi, sidr, ad2))
==> ad1 = ad2.
#endif
#ifdef COOKIE_EVENTS
COOKIE_EVENTS(Oresp_hello)
#endif
let Oresp_hello(HS_DECL_ARGS, C_in:channel, call:Atom) =
in(C_in, Cresp_hello(RespHello(sidr, =sidi, ecti, scti, biscuit, auth)));
in(C_in, mac2_key:key);
rh <- RespHello(sidr, sidi, ecti, scti, biscuit, auth);
#ifdef COOKIE_EVENTS
msg <- RH2b(rh);
COOKIE_PROCESS(Oresp_hello,
#endif
/* try */ let ic = (
ck_ini <- ck;
KEM_EV(event Oresp_hello_KemUse(sidi, sidr, call);)
RESPHELLO_CONSUME()
ck_ih <- ck;
INITCONF_PRODUCE()
CK_EV (event OskOresp_hello(ck_ini, ck_ih, osk); ) // TODO: Queries testing that there is no duplication
MTX_EV( event ICSent(rh, ic, psk, sski, spkr); )
SES_EV( event InitiatorSession(rh, osk); )
ic
/* success */ ) in (
icbits <- IC2b(ic);
mac <- create_mac(spkt, icbits);
mac2 <- create_mac2(mac2_key, mac_envelope2b(mac));
out(C_in, ic);
out(C_in, mac);
out(C_in, mac2)
/* fail */ ) else (
#if MESSAGE_TRANSMISSION_EVENTS
event RHRjct(rh, psk, sski, spkr)
#else
0
#endif
)
#ifdef COOKIE_EVENTS
)
#else
.
#endif
).
// TODO: Restriction: Biscuit no invalidation
@@ -116,24 +164,33 @@ MTX_EV( event IHRjct(InitHello_t, key, kem_sk, kem_pk). )
MTX_EV( event RHSent(InitHello_t, RespHello_t, key, kem_sk, kem_pk). )
event ConsumeSidr(SessionId, Atom).
event ConsumeBn(Atom, kem_sk, kem_pk, Atom).
let Oinit_hello() =
in(C, Cinit_hello(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih));
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinit_hello(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih);
KEM_EV(event Oinit_hello_KemUse(SessionId, SessionId, Atom).)
#ifdef KEM_EVENTS
restriction sidi:SessionId, sidr:SessionId, ad1:Atom, ad2:Atom;
event(Oinit_hello_KemUse(sidi, sidr, ad1)) && event(Oinit_hello_KemUse(sidi, sidr, ad2))
==> ad1 = ad2.
#endif
let Oinit_hello_inner(sidm:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt: kem_sk_tmpl, Septi: seed_tmpl, Sspti: seed_tmpl, ih: InitHello_t, mac2_key:key, C_out:channel, call:Atom) =
// TODO: This is ugly
let InitHello(sidi, epki, sctr, pidiC, auth) = ih in
SETUP_HANDSHAKE_STATE()
eski <- kem_sk0;
epti <- rng_key(setup_seed(Septi)); // RHR4
spti <- rng_key(setup_seed(Sspti)); // RHR5
event ConsumeBn(biscuit_no, sskm, spkt, call);
event ConsumeSidr(sidr, call);
epti <- rng_key(setup_seed(Septi)); // RHR4
spti <- rng_key(setup_seed(Sspti)); // RHR5
event ConsumeSeed(Epti, setup_seed(Septi), call);
event ConsumeSeed(Spti, setup_seed(Sspti), call);
// out(C_out, spkt);
let rh = (
KEM_EV(event Oinit_hello_KemUse(sidi, sidr, call);)
INITHELLO_CONSUME()
ck_ini <- ck;
RESPHELLO_PRODUCE()
@@ -141,7 +198,14 @@ let Oinit_hello() =
MTX_EV( event RHSent(ih, rh, psk, sskr, spki); )
rh
/* success */ ) in (
out(C, rh)
rhbits <- RH2b(rh);
mac <- create_mac(spkt, rhbits);
out(C_out, rh);
out(C_out, mac);
mac2 <- create_mac2(mac2_key, mac_envelope2b(mac));
out(C_out, mac2)
/* fail */ ) else (
#if MESSAGE_TRANSMISSION_EVENTS
event IHRjct(ih, psk, sskr, spki)
@@ -150,6 +214,18 @@ let Oinit_hello() =
#endif
).
let Oinit_hello() =
in(C, Cinit_hello(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih));
in(C, mac2_key:key);
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinit_hello(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih);
#endif
Oinit_hello_inner(sidr, biscuit_no, Ssskm, Spsk, Sspkt, Septi, Sspti, ih, mac2_key, C, call).
restriction sid:SessionId, ad1:Atom, ad2:Atom;
event(ConsumeSidr(sid, ad1)) && event(ConsumeSidr(sid, ad2))
==> ad1 = ad2.
@@ -166,27 +242,55 @@ fun Cinitiator(SessionId, kem_sk_tmpl, key_tmpl, kem_pk_tmpl, seed_tmpl, seed_tm
CK_EV( event OskOinitiator_ck(key). )
CK_EV( event OskOinitiator(key, key, kem_sk, kem_pk, key). )
MTX_EV( event IHSent(InitHello_t, key, kem_sk, kem_pk). )
event ConsumeSidi(SessionId, Atom).
let Oinitiator() =
in(C, Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr));
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr);
KEM_EV(event Oinitiator_inner_KemUse(SessionId, SessionId, Atom).)
#ifdef KEM_EVENTS
restriction sidi:SessionId, sidr:SessionId, ad1:Atom, ad2:Atom;
event(Oinitiator_inner_KemUse(sidi, sidr, ad1)) && event(Oinitiator_inner_KemUse(sidi, sidr, ad2))
==> ad1 = ad2.
#endif
SETUP_HANDSHAKE_STATE()
RNG_KEM_PAIR(eski, epki, Seski) // IHI3
sidr <- sid0;
sptr <- rng_key(setup_seed(Ssptr)); // IHI5
event ConsumeSidi(sidi, call);
event ConsumeSeed(Sptr, setup_seed(Ssptr), call);
event ConsumeSeed(Eski, setup_seed(Seski), call);
INITHELLO_PRODUCE()
CK_EV( event OskOinitiator_ck(ck); )
CK_EV( event OskOinitiator(ck, psk, sski, spkr, sptr); )
MTX_EV( event IHSent(ih, psk, sski, spkr); )
out(C, ih);
Oresp_hello(HS_PASS_ARGS).
event ConsumeSidi(SessionId, Atom).
let Oinitiator_inner(sidi: SessionId, Ssskm: kem_sk_tmpl, Spsk: key_tmpl, Sspkt: kem_sk_tmpl, Seski: seed_tmpl, Ssptr: seed_tmpl, last_cookie:key, C_out:channel, call:Atom) =
SETUP_HANDSHAKE_STATE()
sidr <- sid0;
KEM_EV(event Oinitiator_inner_KemUse(sidi, sidr, call);)
RNG_KEM_PAIR(eski, epki, Seski) // IHI3
sptr <- rng_key(setup_seed(Ssptr)); // IHI5
event ConsumeSidi(sidi, call);
event ConsumeSeed(Sptr, setup_seed(Ssptr), call);
event ConsumeSeed(Eski, setup_seed(Seski), call);
INITHELLO_PRODUCE()
CK_EV( event OskOinitiator_ck(ck); )
CK_EV( event OskOinitiator(ck, psk, sski, spkr, sptr); )
MTX_EV( event IHSent(ih, psk, sski, spkr); )
out(C_out, ih);
ihbits <- IH2b(ih);
mac <- create_mac(spkt, ihbits);
out(C_out, mac);
mac2 <- create_mac2(last_cookie, mac_envelope2b(mac));
out(C_out, mac2);
Oresp_hello(HS_PASS_ARGS, C_out, call).
let Oinitiator() =
in(C, Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr));
#if RANDOMIZED_CALL_IDS
new call:Atom;
#else
call <- Cinitiator(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr);
#endif
in(C, last_cookie:key);
Oinitiator_inner(sidi, Ssskm, Spsk, Sspkt, Seski, Ssptr, last_cookie, C, call).
restriction sid:SessionId, ad1:Atom, ad2:Atom;
event(ConsumeSidi(sid, ad1)) && event(ConsumeSidi(sid, ad2))
@@ -207,21 +311,3 @@ let rosenpass_main() = 0
| REP(RESPONDER_BOUND, Oinit_hello)
| REP(RESPONDER_BOUND, Oinit_conf).
nounif v:seed_prec; attacker(prepare_seed(trusted_seed( v )))/6217[hypothesis].
nounif v:seed; attacker(prepare_seed( v ))/6216[hypothesis].
nounif v:seed; attacker(rng_kem_sk( v ))/6215[hypothesis].
nounif v:seed; attacker(rng_key( v ))/6214[hypothesis].
nounif v:key_prec; attacker(prepare_key(trusted_key( v )))/6213[hypothesis].
nounif v:kem_sk_prec; attacker(prepare_kem_sk(trusted_kem_sk( v )))/6212[hypothesis].
nounif v:key; attacker(prepare_key( v ))/6211[hypothesis].
nounif v:kem_sk; attacker(prepare_kem_sk( v ))/6210[hypothesis].
nounif Spk:kem_sk_tmpl;
attacker(Creveal_kem_pk(Spk))/6110[conclusion].
nounif sid:SessionId, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Seski:seed_tmpl, Ssptr:seed_tmpl;
attacker(Cinitiator( *sid, *Ssskm, *Spsk, *Sspkt, *Seski, *Ssptr ))/6109[conclusion].
nounif sid:SessionId, biscuit_no:Atom, Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, Septi:seed_tmpl, Sspti:seed_tmpl, ih:InitHello_t;
attacker(Cinit_hello( *sid, *biscuit_no, *Ssskm, *Spsk, *Sspkt, *Septi, *Sspti, *ih ))/6108[conclusion].
nounif rh:RespHello_t;
attacker(Cresp_hello( *rh ))/6107[conclusion].
nounif Ssskm:kem_sk_tmpl, Spsk:key_tmpl, Sspkt:kem_sk_tmpl, ic:InitConf_t;
attacker(Cinit_conf( *Ssskm, *Spsk, *Sspkt, *ic ))/6106[conclusion].

View File

@@ -2,6 +2,26 @@
#include "crypto/kem.mpv"
#include "rosenpass/handshake_state.mpv"
fun Envelope(
key,
bits
): bits [data].
type mac_envelope_t.
fun mac_envelope(
key,
bits
) : mac_envelope_t.
fun mac_envelope2b(mac_envelope_t) : bits [typeConverter].
letfun create_mac(pk:kem_pk, payload:bits) = mac_envelope(lprf2(MAC, kem_pk2b(pk), payload), payload).
fun mac_envelope_pk_test(mac_envelope_t, kem_pk) : bool
reduc forall pk:kem_pk, b:bits;
mac_envelope_pk_test(mac_envelope(prf(prf(prf(prf(key0,PROTOCOL),MAC),kem_pk2b(pk)),
b), b), pk) = true.
type InitHello_t.
fun InitHello(
SessionId, // sidi
@@ -11,6 +31,8 @@ fun InitHello(
bits // auth
) : InitHello_t [data].
fun IH2b(InitHello_t) : bitstring [typeConverter].
#define INITHELLO_PRODUCE() \
ck <- lprf1(CK_INIT, kem_pk2b(spkr)); /* IHI1 */ \
/* not handled here */ /* IHI2 */ \
@@ -41,7 +63,9 @@ fun RespHello(
bits // auth
) : RespHello_t [data].
#define RESPHELLO_PRODUCE() \
fun RH2b(RespHello_t) : bitstring [typeConverter].
#define RESPHELLO_PRODUCE() \
/* not handled here */ /* RHR1 */ \
MIX2(sid2b(sidr), sid2b(sidi)) /* RHR3 */ \
ENCAPS_AND_MIX(ecti, epki, epti) /* RHR4 */ \
@@ -67,13 +91,14 @@ fun InitConf(
bits // auth
) : InitConf_t [data].
fun IC2b(InitConf_t) : bitstring [typeConverter].
#define INITCONF_PRODUCE() \
MIX2(sid2b(sidi), sid2b(sidr)) /* ICI3 */ \
ENCRYPT_AND_MIX(auth, empty) /* ICI4 */ \
ic <- InitConf(sidi, sidr, biscuit, auth);
#define INITCONF_CONSUME() \
let InitConf(sidi, sidr, biscuit, auth) = ic in \
LOAD_BISCUIT(biscuit_no, biscuit) /* ICR1 */ \
ENCRYPT_AND_MIX(rh_auth, empty) /* ICIR */ \
ck_rh <- ck; /* ---- */ /* TODO: Move into oracles.mpv */ \

View File

@@ -10,8 +10,3 @@ repository = "https://github.com/rosenpass/rosenpass"
readme = "readme.md"
[dependencies]
[dev-dependencies]
rosenpass-oqs = { workspace = true }
rosenpass-secret-memory = { workspace = true }
anyhow = {workspace = true}

View File

@@ -1,5 +1,5 @@
# Rosenpass internal cryptographic traits
# Rosenpass internal libsodium bindings
Rosenpass internal library providing traits for cryptographic primitives.
This is an internal library; no guarantee is made about its API at this point in time.
This is an internal library; not guarantee is made about its API at this point in time.

View File

@@ -5,128 +5,12 @@
//!
//! Conceptually KEMs are akin to public-key encryption, but instead of encrypting
//! arbitrary data, KEMs are limited to the transmission of keys, randomly chosen during
//!
//! encapsulation.
//!
//! The [Kem] Trait describes the basic API offered by a Key Encapsulation
//! Mechanism. Two implementations for it are provided:
//! [Kyber512](../../rosenpass_oqs/kyber_512/enum.Kyber512.html) and
//! [ClassicMceliece460896](../../rosenpass_oqs/classic_mceliece_460896/enum.ClassicMceliece460896.html).
//!
//! An example where Alice generates a keypair and gives her public key to Bob, for Bob to
//! encapsulate a symmetric key and Alice to decapsulate it would look as follows.
//! In the example, we are using Kyber512, but any KEM that correctly implements the [Kem]
//! trait could be used as well.
//!```rust
//! use rosenpass_cipher_traits::Kem;
//! use rosenpass_oqs::Kyber512;
//! # use rosenpass_secret_memory::{secret_policy_use_only_malloc_secrets, Secret};
//!
//! type MyKem = Kyber512;
//! secret_policy_use_only_malloc_secrets();
//! let mut alice_sk: Secret<{ MyKem::SK_LEN }> = Secret::zero();
//! let mut alice_pk: [u8; MyKem::PK_LEN] = [0; MyKem::PK_LEN];
//! MyKem::keygen(alice_sk.secret_mut(), &mut alice_pk)?;
//!
//! let mut bob_shk: Secret<{ MyKem::SHK_LEN }> = Secret::zero();
//! let mut bob_ct: [u8; MyKem::CT_LEN] = [0; MyKem::CT_LEN];
//! MyKem::encaps(bob_shk.secret_mut(), &mut bob_ct, &mut alice_pk)?;
//!
//! let mut alice_shk: Secret<{ MyKem::SHK_LEN }> = Secret::zero();
//! MyKem::decaps(alice_shk.secret_mut(), alice_sk.secret_mut(), &mut bob_ct)?;
//!
//! # assert_eq!(alice_shk.secret(), bob_shk.secret());
//! # Ok::<(), anyhow::Error>(())
//!```
//!
//! Implementing the [Kem]-trait for a KEM is easy. Mostly, you must format the KEM's
//! keys, and ciphertext as `u8` slices. Below, we provide an example for how the trait can
//! be implemented using a **HORRIBLY INSECURE** DummyKem that only uses static values for keys
//! and ciphertexts as an example.
//!```rust
//!# use rosenpass_cipher_traits::Kem;
//!
//! struct DummyKem {}
//! impl Kem for DummyKem {
//!
//! // For this DummyKem, using String for errors is sufficient.
//! type Error = String;
//!
//! // For this DummyKem, we will use a single `u8` for everything
//! const SK_LEN: usize = 1;
//! const PK_LEN: usize = 1;
//! const CT_LEN: usize = 1;
//! const SHK_LEN: usize = 1;
//!
//! fn keygen(sk: &mut [u8], pk: &mut [u8]) -> Result<(), Self::Error> {
//! if sk.len() != Self::SK_LEN {
//! return Err("sk does not have the correct length!".to_string());
//! }
//! if pk.len() != Self::PK_LEN {
//! return Err("pk does not have the correct length!".to_string());
//! }
//! sk[0] = 42;
//! pk[0] = 21;
//! Ok(())
//! }
//!
//! fn encaps(shk: &mut [u8], ct: &mut [u8], pk: &[u8]) -> Result<(), Self::Error> {
//! if pk.len() != Self::PK_LEN {
//! return Err("pk does not have the correct length!".to_string());
//! }
//! if ct.len() != Self::CT_LEN {
//! return Err("ct does not have the correct length!".to_string());
//! }
//! if shk.len() != Self::SHK_LEN {
//! return Err("shk does not have the correct length!".to_string());
//! }
//! if pk[0] != 21 {
//! return Err("Invalid public key!".to_string());
//! }
//! ct[0] = 7;
//! shk[0] = 17;
//! Ok(())
//! }
//!
//! fn decaps(shk: &mut [u8], sk: &[u8], ct: &[u8]) -> Result<(), Self::Error> {
//! if sk.len() != Self::SK_LEN {
//! return Err("sk does not have the correct length!".to_string());
//! }
//! if ct.len() != Self::CT_LEN {
//! return Err("ct does not have the correct length!".to_string());
//! }
//! if shk.len() != Self::SHK_LEN {
//! return Err("shk does not have the correct length!".to_string());
//! }
//! if sk[0] != 42 {
//! return Err("Invalid public key!".to_string());
//! }
//! if ct[0] != 7 {
//! return Err("Invalid ciphertext!".to_string());
//! }
//! shk[0] = 17;
//! Ok(())
//! }
//! }
//! # use rosenpass_secret_memory::{secret_policy_use_only_malloc_secrets, Secret};
//! #
//! # type MyKem = DummyKem;
//! # secret_policy_use_only_malloc_secrets();
//! # let mut alice_sk: Secret<{ MyKem::SK_LEN }> = Secret::zero();
//! # let mut alice_pk: [u8; MyKem::PK_LEN] = [0; MyKem::PK_LEN];
//! # MyKem::keygen(alice_sk.secret_mut(), &mut alice_pk)?;
//!
//! # let mut bob_shk: Secret<{ MyKem::SHK_LEN }> = Secret::zero();
//! # let mut bob_ct: [u8; MyKem::CT_LEN] = [0; MyKem::CT_LEN];
//! # MyKem::encaps(bob_shk.secret_mut(), &mut bob_ct, &mut alice_pk)?;
//! #
//! # let mut alice_shk: Secret<{ MyKem::SHK_LEN }> = Secret::zero();
//! # MyKem::decaps(alice_shk.secret_mut(), alice_sk.secret_mut(), &mut bob_ct)?;
//! #
//! # assert_eq!(alice_shk.secret(), bob_shk.secret());
//! #
//! # Ok::<(), String>(())
//!```
//!
//! The [KEM] Trait describes the basic API offered by a Key Encapsulation
//! Mechanism. Two implementations for it are provided, [StaticKEM] and [EphemeralKEM].
use std::result::Result;
/// Key Encapsulation Mechanism
///

View File

@@ -9,9 +9,6 @@ homepage = "https://rosenpass.eu/"
repository = "https://github.com/rosenpass/rosenpass"
readme = "readme.md"
[features]
experiment_libcrux = ["dep:libcrux"]
[dependencies]
anyhow = { workspace = true }
rosenpass-to = { workspace = true }
@@ -23,4 +20,3 @@ static_assertions = { workspace = true }
zeroize = { workspace = true }
chacha20poly1305 = { workspace = true }
blake2 = { workspace = true }
libcrux = { workspace = true, optional = true }

View File

@@ -2,196 +2,100 @@ use anyhow::Result;
use rosenpass_secret_memory::Secret;
use rosenpass_to::To;
use crate::keyed_hash as hash;
use crate::subtle::incorrect_hmac_blake2b as hash;
pub use hash::KEY_LEN;
///
///```rust
/// # use rosenpass_ciphers::hash_domain::{HashDomain, HashDomainNamespace, SecretHashDomain, SecretHashDomainNamespace};
/// use rosenpass_secret_memory::Secret;
/// # rosenpass_secret_memory::secret_policy_use_only_malloc_secrets();
///
/// const PROTOCOL_IDENTIFIER: &str = "MY_PROTOCOL:IDENTIFIER";
/// # fn do_doc_test() -> Result<(), Box<dyn std::error::Error>> {
/// // create use once hash domain for the protocol identifier
/// let mut hash_domain = HashDomain::zero();
/// hash_domain = hash_domain.mix(PROTOCOL_IDENTIFIER.as_bytes())?;
/// // upgrade to reusable hash domain
/// let hash_domain_namespace: HashDomainNamespace = hash_domain.dup();
/// // derive new key
/// let key_identifier = "my_key_identifier";
/// let key = hash_domain_namespace.mix(key_identifier.as_bytes())?.into_value();
/// // derive a new key based on a secret
/// const MY_SECRET_LEN: usize = 21;
/// let my_secret_bytes = "my super duper secret".as_bytes();
/// let my_secret: Secret<21> = Secret::from_slice("my super duper secret".as_bytes());
/// let secret_hash_domain: SecretHashDomain = hash_domain_namespace.mix_secret(my_secret)?;
/// // derive a new key based on the secret key
/// let new_key_identifier = "my_new_key_identifier".as_bytes();
/// let new_key = secret_hash_domain.mix(new_key_identifier)?.into_secret();
///
/// # Ok(())
/// # }
/// # do_doc_test().unwrap();
///
///```
///
// TODO Use a proper Dec interface
/// A use-once hash domain for a specified key that can be used directly.
/// The key must consist of [KEY_LEN] many bytes. If the key must remain secret,
/// use [SecretHashDomain] instead.
#[derive(Clone, Debug)]
pub struct HashDomain([u8; KEY_LEN]);
/// A reusable hash domain for a namespace identified by the key.
/// The key must consist of [KEY_LEN] many bytes. If the key must remain secret,
/// use [SecretHashDomainNamespace] instead.
#[derive(Clone, Debug)]
pub struct HashDomainNamespace([u8; KEY_LEN]);
/// A use-once hash domain for a specified key that can be used directly
/// by wrapping it in [Secret]. The key must consist of [KEY_LEN] many bytes.
#[derive(Clone, Debug)]
pub struct SecretHashDomain(Secret<KEY_LEN>);
/// A reusable secure hash domain for a namespace identified by the key and that keeps the key secure
/// by wrapping it in [Secret]. The key must consist of [KEY_LEN] many bytes.
#[derive(Clone, Debug)]
pub struct SecretHashDomainNamespace(Secret<KEY_LEN>);
impl HashDomain {
/// Creates a nw [HashDomain] initialized with a all-zeros key.
pub fn zero() -> Self {
Self([0u8; KEY_LEN])
}
/// Turns this [HashDomain] into a [HashDomainNamespace], keeping the key.
pub fn dup(self) -> HashDomainNamespace {
HashDomainNamespace(self.0)
}
/// Turns this [HashDomain] into a [SecretHashDomain] by wrapping the key into a [Secret]
/// and creating a new [SecretHashDomain] from it.
pub fn turn_secret(self) -> SecretHashDomain {
SecretHashDomain(Secret::from_slice(&self.0))
}
// TODO: Protocol! Use domain separation to ensure that
/// Creates a new [HashDomain] by mixing in a new key `v`. Specifically,
/// it evaluates [hash::hash] with this HashDomain's key as the key and `v`
/// as the `data` and uses the result as the key for the new [HashDomain].
///
pub fn mix(self, v: &[u8]) -> Result<Self> {
Ok(Self(hash::hash(&self.0, v).collect::<[u8; KEY_LEN]>()?))
}
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
/// by calling [SecretHashDomain::invoke_primitive] with this
/// [HashDomain]'s key as `k` and `v` as `d`.
pub fn mix_secret<const N: usize>(self, v: Secret<N>) -> Result<SecretHashDomain> {
SecretHashDomain::invoke_primitive(&self.0, v.secret())
}
/// Gets the key of this [HashDomain].
pub fn into_value(self) -> [u8; KEY_LEN] {
self.0
}
}
impl HashDomainNamespace {
/// Creates a new [HashDomain] by mixing in a new key `v`. Specifically,
/// it evaluates [hash::hash] with the key of this HashDomainNamespace key as the key and `v`
/// as the `data` and uses the result as the key for the new [HashDomain].
pub fn mix(&self, v: &[u8]) -> Result<HashDomain> {
Ok(HashDomain(
hash::hash(&self.0, v).collect::<[u8; KEY_LEN]>()?,
))
}
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
/// by calling [SecretHashDomain::invoke_primitive] with the key of this
/// [HashDomainNamespace] as `k` and `v` as `d`.
///
/// It requires that `v` consists of exactly [KEY_LEN] many bytes.
pub fn mix_secret<const N: usize>(&self, v: Secret<N>) -> Result<SecretHashDomain> {
SecretHashDomain::invoke_primitive(&self.0, v.secret())
}
}
impl SecretHashDomain {
/// Create a new [SecretHashDomain] with the given key `k` and data `d` by calling
/// [hash::hash] with `k` as the `key` and `d` s the `data`, and using the result
/// as the content for the new [SecretHashDomain].
/// Both `k` and `d` have to be exactly [KEY_LEN] bytes in length.
pub fn invoke_primitive(k: &[u8], d: &[u8]) -> Result<SecretHashDomain> {
let mut r = SecretHashDomain(Secret::zero());
hash::hash(k, d).to(r.0.secret_mut())?;
Ok(r)
}
/// Creates a new [SecretHashDomain] that is initialized with an all zeros key.
pub fn zero() -> Self {
Self(Secret::zero())
}
/// Turns this [SecretHashDomain] into a [SecretHashDomainNamespace].
pub fn dup(self) -> SecretHashDomainNamespace {
SecretHashDomainNamespace(self.0)
}
/// Creates a new [SecretHashDomain] from a [Secret] `k`.
///
/// It requires that `k` consist of exactly [KEY_LEN] bytes.
pub fn danger_from_secret(k: Secret<KEY_LEN>) -> Self {
Self(k)
}
/// Creates a new [SecretHashDomain] by mixing in a new key `v`. Specifically,
/// it evaluates [hash::hash] with this [SecretHashDomain]'s key as the key and `v`
/// as the `data` and uses the result as the key for the new [SecretHashDomain].
///
/// It requires that `v` consists of exactly [KEY_LEN] many bytes.
pub fn mix(self, v: &[u8]) -> Result<SecretHashDomain> {
Self::invoke_primitive(self.0.secret(), v)
}
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
/// by calling [SecretHashDomain::invoke_primitive] with the key of this
/// [HashDomainNamespace] as `k` and `v` as `d`.
///
/// It requires that `v` consists of exactly [KEY_LEN] many bytes.
pub fn mix_secret<const N: usize>(self, v: Secret<N>) -> Result<SecretHashDomain> {
Self::invoke_primitive(self.0.secret(), v.secret())
}
/// Get the secret key data from this [SecretHashDomain].
pub fn into_secret(self) -> Secret<KEY_LEN> {
self.0
}
/// Evaluate [hash::hash] with this [SecretHashDomain]'s data as the `key` and
/// `dst` as the `data` and stores the result as the new data for this [SecretHashDomain].
///
/// It requires that both `v` and `d` consist of exactly [KEY_LEN] many bytes.
pub fn into_secret_slice(mut self, v: &[u8], dst: &[u8]) -> Result<()> {
hash::hash(v, dst).to(self.0.secret_mut())
}
}
impl SecretHashDomainNamespace {
/// Creates a new [SecretHashDomain] by mixing in a new key `v`. Specifically,
/// it evaluates [hash::hash] with the key of this HashDomainNamespace key as the key and `v`
/// as the `data` and uses the result as the key for the new [HashDomain].
///
/// It requires that `v` consists of exactly [KEY_LEN] many bytes.
pub fn mix(&self, v: &[u8]) -> Result<SecretHashDomain> {
SecretHashDomain::invoke_primitive(self.0.secret(), v)
}
/// Creates a new [SecretHashDomain] by mixing in a new key `v`
/// by calling [SecretHashDomain::invoke_primitive] with the key of this
/// [HashDomainNamespace] as `k` and `v` as `d`.
///
/// It requires that `v` consists of exactly [KEY_LEN] many bytes.
pub fn mix_secret<const N: usize>(&self, v: Secret<N>) -> Result<SecretHashDomain> {
SecretHashDomain::invoke_primitive(self.0.secret(), v.secret())
}
@@ -199,7 +103,6 @@ impl SecretHashDomainNamespace {
// TODO: This entire API is not very nice; we need this for biscuits, but
// it might be better to extract a special "biscuit"
// labeled subkey and reinitialize the chain with this
/// Get the secret key data from this [SecretHashDomain].
pub fn danger_into_secret(self) -> Secret<KEY_LEN> {
self.0
}

View File

@@ -2,36 +2,17 @@ use static_assertions::const_assert;
pub mod subtle;
/// All keyed primitives in this crate use 32 byte keys
pub const KEY_LEN: usize = 32;
const_assert!(KEY_LEN == aead::KEY_LEN);
const_assert!(KEY_LEN == xaead::KEY_LEN);
const_assert!(KEY_LEN == hash_domain::KEY_LEN);
/// Keyed hashing
///
/// This should only be used for implementation details; anything with relevance
/// to the cryptographic protocol should use the facilities in [hash_domain], (though
/// hash domain uses this module internally)
pub mod keyed_hash {
pub use crate::subtle::incorrect_hmac_blake2b::{
hash, KEY_LEN, KEY_MAX, KEY_MIN, OUT_MAX, OUT_MIN,
};
}
/// Authenticated encryption with associated data
/// Chacha20poly1305 is used.
pub mod aead {
#[cfg(not(feature = "experiment_libcrux"))]
pub use crate::subtle::chacha20poly1305_ietf::{decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN};
#[cfg(feature = "experiment_libcrux")]
pub use crate::subtle::chacha20poly1305_ietf_libcrux::{
decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN,
};
}
/// Authenticated encryption with associated data with a constant nonce
/// XChacha20poly1305 is used.
pub mod xaead {
pub use crate::subtle::xchacha20poly1305_ietf::{
decrypt, encrypt, KEY_LEN, NONCE_LEN, TAG_LEN,
@@ -40,13 +21,6 @@ pub mod xaead {
pub mod hash_domain;
/// This crate includes two key encapsulation mechanisms.
/// Namely ClassicMceliece460896 (also referred to as `StaticKem` sometimes) and
/// Kyber512 (also referred to as `EphemeralKem` sometimes).
///
/// See [rosenpass_oqs::ClassicMceliece460896]
/// and [rosenpass_oqs::Kyber512] for more details on the specific KEMS.
///
pub mod kem {
pub use rosenpass_oqs::ClassicMceliece460896 as StaticKem;
pub use rosenpass_oqs::Kyber512 as EphemeralKem;

View File

@@ -9,43 +9,19 @@ use blake2::Blake2bMac;
use rosenpass_to::{ops::copy_slice, with_destination, To};
use rosenpass_util::typenum2const;
/// Specify that the used implementation of BLAKE2b is the MAC version of BLAKE2b
/// with output and key length of 32 bytes (see [Blake2bMac]).
type Impl = Blake2bMac<U32>;
type KeyLen = <Impl as KeySizeUser>::KeySize;
type OutLen = <Impl as OutputSizeUser>::OutputSize;
/// The key length for BLAKE2b supported by this API. Currently 32 Bytes.
const KEY_LEN: usize = typenum2const! { KeyLen };
/// The output length for BLAKE2b supported by this API. Currently 32 Bytes.
const OUT_LEN: usize = typenum2const! { OutLen };
/// Minimal key length supported by this API.
pub const KEY_MIN: usize = KEY_LEN;
/// maximal key length supported by this API.
pub const KEY_MAX: usize = KEY_LEN;
/// minimal output length supported by this API.
pub const OUT_MIN: usize = OUT_LEN;
/// maximal output length supported by this API.
pub const OUT_MAX: usize = OUT_LEN;
/// Hashes the given `data` with the [Blake2bMac] hash function under the given `key`.
/// The both the length of the output the length of the key 32 bytes (or 256 bits).
///
/// # Examples
///
///```rust
/// # use rosenpass_ciphers::subtle::blake2b::hash;
/// use rosenpass_to::To;
/// let zero_key: [u8; 32] = [0; 32];
/// let data: [u8; 32] = [255; 32];
/// // buffer for the hash output
/// let mut hash_data: [u8; 32] = [0u8; 32];
///
/// assert!(hash(&zero_key, &data).to(&mut hash_data).is_ok(), "Hashing has to return OK result");
///```
///
#[inline]
pub fn hash<'a>(key: &'a [u8], data: &'a [u8]) -> impl To<[u8], anyhow::Result<()>> + 'a {
with_destination(|out: &mut [u8]| {
@@ -57,9 +33,10 @@ pub fn hash<'a>(key: &'a [u8], data: &'a [u8]) -> impl To<[u8], anyhow::Result<(
// out the right way to use the imports while allowing for zeroization.
// An API based on slices might actually be simpler.
let mut tmp = Zeroizing::new([0u8; OUT_LEN]);
let tmp = GenericArray::from_mut_slice(tmp.as_mut());
h.finalize_into(tmp);
let mut tmp = GenericArray::from_mut_slice(tmp.as_mut());
h.finalize_into(&mut tmp);
copy_slice(tmp.as_ref()).to(out);
Ok(())
})
}

View File

@@ -6,39 +6,10 @@ use chacha20poly1305::aead::generic_array::GenericArray;
use chacha20poly1305::ChaCha20Poly1305 as AeadImpl;
use chacha20poly1305::{AeadCore, AeadInPlace, KeyInit, KeySizeUser};
/// The key length is 32 bytes or 256 bits.
pub const KEY_LEN: usize = typenum2const! { <AeadImpl as KeySizeUser>::KeySize };
/// The MAC tag length is 16 bytes or 128 bits.
pub const TAG_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::TagSize };
/// The nonce length is 12 bytes or 96 bits.
pub const NONCE_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::NonceSize };
/// Encrypts using ChaCha20Poly1305 as implemented in [RustCrypto](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305).
/// `key` MUST be chosen (pseudo-)randomly and `nonce` MOST NOT be reused. The `key` slice MUST have
/// a length of [KEY_LEN]. The `nonce` slice MUST have a length of [NONCE_LEN]. The last [TAG_LEN] bytes
/// written in `ciphertext` are the tag guaranteeing integrity. `ciphertext` MUST have a capacity of
/// `plaintext.len()` + [TAG_LEN].
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::chacha20poly1305_ietf::{encrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
///
/// const PLAINTEXT_LEN: usize = 43;
/// let plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(PLAINTEXT_LEN, plaintext.len());
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut ciphertext_buffer = [0u8;PLAINTEXT_LEN + TAG_LEN];
///
/// let res: anyhow::Result<()> = encrypt(&mut ciphertext_buffer, key, nonce, additional_data, plaintext);
/// assert!(res.is_ok());
/// # let expected_ciphertext: &[u8] = &[239, 104, 148, 202, 120, 32, 77, 27, 246, 206, 226, 17,
/// # 83, 78, 122, 116, 187, 123, 70, 199, 58, 130, 21, 1, 107, 230, 58, 77, 18, 152, 31, 159, 80,
/// # 151, 72, 27, 236, 137, 60, 55, 180, 31, 71, 97, 199, 12, 60, 155, 70, 221, 225, 110, 132, 191,
/// # 8, 114, 85, 4, 25];
/// # assert_eq!(expected_ciphertext, &ciphertext_buffer);
///```
#[inline]
pub fn encrypt(
ciphertext: &mut [u8],
@@ -50,38 +21,11 @@ pub fn encrypt(
let nonce = GenericArray::from_slice(nonce);
let (ct, mac) = ciphertext.split_at_mut(ciphertext.len() - TAG_LEN);
copy_slice(plaintext).to(ct);
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(nonce, ad, ct)?;
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(&nonce, ad, ct)?;
copy_slice(&mac_value[..]).to(mac);
Ok(())
}
/// Decrypts a `ciphertext` and verifies the integrity of the `ciphertext` and the additional data
/// `ad`. using ChaCha20Poly1305 as implemented in [RustCrypto](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305).
///
/// The `key` slice MUST have a length of [KEY_LEN]. The `nonce` slice MUST have a length of
/// [NONCE_LEN]. The plaintext buffer must have a capacity of `ciphertext.len()` - [TAG_LEN].
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::chacha20poly1305_ietf::{decrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
/// let ciphertext: &[u8] = &[239, 104, 148, 202, 120, 32, 77, 27, 246, 206, 226, 17,
/// 83, 78, 122, 116, 187, 123, 70, 199, 58, 130, 21, 1, 107, 230, 58, 77, 18, 152, 31, 159, 80,
/// 151, 72, 27, 236, 137, 60, 55, 180, 31, 71, 97, 199, 12, 60, 155, 70, 221, 225, 110, 132, 191,
/// 8, 114, 85, 4, 25]; // this is the ciphertext generated by the example for the encryption
/// const PLAINTEXT_LEN: usize = 43;
/// assert_eq!(PLAINTEXT_LEN + TAG_LEN, ciphertext.len());
///
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut plaintext_buffer = [0u8; PLAINTEXT_LEN];
///
/// let res: anyhow::Result<()> = decrypt(&mut plaintext_buffer, key, nonce, additional_data, ciphertext);
/// assert!(res.is_ok());
/// let expected_plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(expected_plaintext, plaintext_buffer);
///
///```
#[inline]
pub fn decrypt(
plaintext: &mut [u8],
@@ -94,6 +38,6 @@ pub fn decrypt(
let (ct, mac) = ciphertext.split_at(ciphertext.len() - TAG_LEN);
let tag = GenericArray::from_slice(mac);
copy_slice(ct).to(plaintext);
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(nonce, ad, plaintext, tag)?;
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(&nonce, ad, plaintext, tag)?;
Ok(())
}

View File

@@ -1,117 +0,0 @@
use rosenpass_to::ops::copy_slice;
use rosenpass_to::To;
use zeroize::Zeroize;
/// The key length is 32 bytes or 256 bits.
pub const KEY_LEN: usize = 32; // Grrrr! Libcrux, please provide me these constants.
/// The MAC tag length is 16 bytes or 128 bits.
pub const TAG_LEN: usize = 16;
/// The nonce length is 12 bytes or 96 bits.
pub const NONCE_LEN: usize = 12;
/// Encrypts using ChaCha20Poly1305 as implemented in [libcrux](https://github.com/cryspen/libcrux).
/// Key and nonce MUST be chosen (pseudo-)randomly. The `key` slice MUST have a length of
/// [KEY_LEN]. The `nonce` slice MUST have a length of [NONCE_LEN]. The last [TAG_LEN] bytes
/// written in `ciphertext` are the tag guaranteeing integrity. `ciphertext` MUST have a capacity of
/// `plaintext.len()` + [TAG_LEN].
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::chacha20poly1305_ietf_libcrux::{encrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
///
/// const PLAINTEXT_LEN: usize = 43;
/// let plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(PLAINTEXT_LEN, plaintext.len());
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut ciphertext_buffer = [0u8; PLAINTEXT_LEN + TAG_LEN];
///
/// let res: anyhow::Result<()> = encrypt(&mut ciphertext_buffer, key, nonce, additional_data, plaintext);
/// assert!(res.is_ok());
/// # let expected_ciphertext: &[u8] = &[239, 104, 148, 202, 120, 32, 77, 27, 246, 206, 226, 17,
/// # 83, 78, 122, 116, 187, 123, 70, 199, 58, 130, 21, 1, 107, 230, 58, 77, 18, 152, 31, 159, 80,
/// # 151, 72, 27, 236, 137, 60, 55, 180, 31, 71, 97, 199, 12, 60, 155, 70, 221, 225, 110, 132, 191,
/// # 8, 114, 85, 4, 25];
/// # assert_eq!(expected_ciphertext, &ciphertext_buffer);
///```
///
#[inline]
pub fn encrypt(
ciphertext: &mut [u8],
key: &[u8],
nonce: &[u8],
ad: &[u8],
plaintext: &[u8],
) -> anyhow::Result<()> {
let (ciphertext, mac) = ciphertext.split_at_mut(ciphertext.len() - TAG_LEN);
use libcrux::aead as C;
let crux_key = C::Key::Chacha20Poly1305(C::Chacha20Key(key.try_into().unwrap()));
let crux_iv = C::Iv(nonce.try_into().unwrap());
copy_slice(plaintext).to(ciphertext);
let crux_tag = libcrux::aead::encrypt(&crux_key, ciphertext, crux_iv, ad).unwrap();
copy_slice(crux_tag.as_ref()).to(mac);
match crux_key {
C::Key::Chacha20Poly1305(mut k) => k.0.zeroize(),
_ => panic!(),
}
Ok(())
}
/// Decrypts a `ciphertext` and verifies the integrity of the `ciphertext` and the additional data
/// `ad`. using ChaCha20Poly1305 as implemented in [libcrux](https://github.com/cryspen/libcrux).
///
/// The `key` slice MUST have a length of [KEY_LEN]. The `nonce` slice MUST have a length of
/// [NONCE_LEN]. The plaintext buffer must have a capacity of `ciphertext.len()` - [TAG_LEN].
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::chacha20poly1305_ietf_libcrux::{decrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
/// let ciphertext: &[u8] = &[239, 104, 148, 202, 120, 32, 77, 27, 246, 206, 226, 17,
/// 83, 78, 122, 116, 187, 123, 70, 199, 58, 130, 21, 1, 107, 230, 58, 77, 18, 152, 31, 159, 80,
/// 151, 72, 27, 236, 137, 60, 55, 180, 31, 71, 97, 199, 12, 60, 155, 70, 221, 225, 110, 132, 191,
/// 8, 114, 85, 4, 25]; // this is the ciphertext generated by the example for the encryption
/// const PLAINTEXT_LEN: usize = 43;
/// assert_eq!(PLAINTEXT_LEN + TAG_LEN, ciphertext.len());
///
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut plaintext_buffer = [0u8; PLAINTEXT_LEN];
///
/// let res: anyhow::Result<()> = decrypt(&mut plaintext_buffer, key, nonce, additional_data, ciphertext);
/// assert!(res.is_ok());
/// let expected_plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(expected_plaintext, plaintext_buffer);
///
///```
#[inline]
pub fn decrypt(
plaintext: &mut [u8],
key: &[u8],
nonce: &[u8],
ad: &[u8],
ciphertext: &[u8],
) -> anyhow::Result<()> {
let (ciphertext, mac) = ciphertext.split_at(ciphertext.len() - TAG_LEN);
use libcrux::aead as C;
let crux_key = C::Key::Chacha20Poly1305(C::Chacha20Key(key.try_into().unwrap()));
let crux_iv = C::Iv(nonce.try_into().unwrap());
let crux_tag = C::Tag::from_slice(mac).unwrap();
copy_slice(ciphertext).to(plaintext);
libcrux::aead::decrypt(&crux_key, plaintext, crux_iv, ad, &crux_tag).unwrap();
match crux_key {
C::Key::Chacha20Poly1305(mut k) => k.0.zeroize(),
_ => panic!(),
}
Ok(())
}

View File

@@ -6,15 +6,10 @@ use rosenpass_to::{ops::copy_slice, with_destination, To};
use crate::subtle::blake2b;
/// The key length, 32 bytes or 256 bits.
pub const KEY_LEN: usize = 32;
/// The minimal key length, identical to [KEY_LEN]
pub const KEY_MIN: usize = KEY_LEN;
/// The maximal key length, identical to [KEY_LEN]
pub const KEY_MAX: usize = KEY_LEN;
/// The minimal output length, see [blake2b::OUT_MIN]
pub const OUT_MIN: usize = blake2b::OUT_MIN;
/// The maximal output length, see [blake2b::OUT_MAX]
pub const OUT_MAX: usize = blake2b::OUT_MAX;
/// This is a woefully incorrect implementation of hmac_blake2b.
@@ -24,22 +19,6 @@ pub const OUT_MAX: usize = blake2b::OUT_MAX;
///
/// This will be replaced, likely by Kekkac at some point soon.
/// <https://github.com/rosenpass/rosenpass/pull/145>
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::incorrect_hmac_blake2b::hash;
/// use rosenpass_to::To;
/// let key: [u8; 32] = [0; 32];
/// let data: [u8; 32] = [255; 32];
/// // buffer for the hash output
/// let mut hash_data: [u8; 32] = [0u8; 32];
///
/// assert!(hash(&key, &data).to(&mut hash_data).is_ok(), "Hashing has to return OK result");
/// # let expected_hash: &[u8] = &[5, 152, 135, 141, 151, 106, 147, 8, 220, 95, 38, 66, 29, 33, 3,
/// 104, 250, 114, 131, 119, 27, 56, 59, 44, 11, 67, 230, 113, 112, 20, 80, 103];
/// # assert_eq!(hash_data, expected_hash);
///```
///
#[inline]
pub fn hash<'a>(key: &'a [u8], data: &'a [u8]) -> impl To<[u8], anyhow::Result<()>> + 'a {
const IPAD: [u8; KEY_LEN] = [0x36u8; KEY_LEN];

View File

@@ -1,13 +1,4 @@
/// This module provides the following cryptographic schemes:
/// - [blake2b]: The blake2b hash function
/// - [chacha20poly1305_ietf]: The Chacha20Poly1305 AEAD as implemented in [RustCrypto](https://crates.io/crates/chacha20poly1305) (only used when the feature `experiment_libcrux` is disabled).
/// - [chacha20poly1305_ietf_libcrux]: The Chacha20Poly1305 AEAD as implemented in [libcrux](https://github.com/cryspen/libcrux) (only used when the feature `experiment_libcrux` is enabled).
/// - [incorrect_hmac_blake2b]: An (incorrect) hmac based on [blake2b].
/// - [xchacha20poly1305_ietf] The Chacha20Poly1305 AEAD as implemented in [RustCrypto](https://crates.io/crates/chacha20poly1305)
pub mod blake2b;
#[cfg(not(feature = "experiment_libcrux"))]
pub mod chacha20poly1305_ietf;
#[cfg(feature = "experiment_libcrux")]
pub mod chacha20poly1305_ietf_libcrux;
pub mod incorrect_hmac_blake2b;
pub mod xchacha20poly1305_ietf;

View File

@@ -6,41 +6,10 @@ use chacha20poly1305::aead::generic_array::GenericArray;
use chacha20poly1305::XChaCha20Poly1305 as AeadImpl;
use chacha20poly1305::{AeadCore, AeadInPlace, KeyInit, KeySizeUser};
/// The key length is 32 bytes or 256 bits.
pub const KEY_LEN: usize = typenum2const! { <AeadImpl as KeySizeUser>::KeySize };
/// The MAC tag length is 16 bytes or 128 bits.
pub const TAG_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::TagSize };
/// The nonce length is 24 bytes or 192 bits.
pub const NONCE_LEN: usize = typenum2const! { <AeadImpl as AeadCore>::NonceSize };
/// Encrypts using XChaCha20Poly1305 as implemented in [RustCrypto](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305).
/// `key` and `nonce` MUST be chosen (pseudo-)randomly. The `key` slice MUST have a length of
/// [KEY_LEN]. The `nonce` slice MUST have a length of [NONCE_LEN].
/// In contrast to [chacha20poly1305_ietf::encrypt](crate::subtle::chacha20poly1305_ietf::encrypt) and
/// [chacha20poly1305_ietf_libcrux::encrypt](crate::subtle::chacha20poly1305_ietf_libcrux::encrypt),
/// `nonce` is also written into `ciphertext` and therefore ciphertext MUST have a length
/// of at least [NONCE_LEN] + `plaintext.len()` + [TAG_LEN].
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::xchacha20poly1305_ietf::{encrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
/// const PLAINTEXT_LEN: usize = 43;
/// let plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(PLAINTEXT_LEN, plaintext.len());
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut ciphertext_buffer = [0u8; NONCE_LEN + PLAINTEXT_LEN + TAG_LEN];
///
///
/// let res: anyhow::Result<()> = encrypt(&mut ciphertext_buffer, key, nonce, additional_data, plaintext);
/// # assert!(res.is_ok());
/// # let expected_ciphertext: &[u8] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/// # 0, 0, 0, 0, 8, 241, 229, 253, 200, 81, 248, 30, 183, 149, 134, 168, 149, 87, 109, 49, 159, 108,
/// # 206, 89, 51, 232, 232, 197, 163, 253, 254, 208, 73, 76, 253, 13, 247, 162, 133, 184, 177, 44,
/// # 73, 138, 176, 193, 61, 248, 61, 183, 164, 192, 214, 168, 4, 1, 62, 243, 36, 48, 149, 164, 6];
/// # assert_eq!(expected_ciphertext, &ciphertext_buffer);
///```
#[inline]
pub fn encrypt(
ciphertext: &mut [u8],
@@ -54,43 +23,11 @@ pub fn encrypt(
let (ct, mac) = ct_mac.split_at_mut(ct_mac.len() - TAG_LEN);
copy_slice(nonce).to(n);
copy_slice(plaintext).to(ct);
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(nonce, ad, ct)?;
let mac_value = AeadImpl::new_from_slice(key)?.encrypt_in_place_detached(&nonce, ad, ct)?;
copy_slice(&mac_value[..]).to(mac);
Ok(())
}
/// Decrypts a `ciphertext` and verifies the integrity of the `ciphertext` and the additional data
/// `ad`. using XChaCha20Poly1305 as implemented in [RustCrypto](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305).
///
/// The `key` slice MUST have a length of [KEY_LEN]. The `nonce` slice MUST have a length of
/// [NONCE_LEN]. The plaintext buffer must have a capacity of `ciphertext.len()` - [TAG_LEN] - [NONCE_LEN].
///
/// In contrast to [chacha20poly1305_ietf::decrypt](crate::subtle::chacha20poly1305_ietf::decrypt) and
/// [chacha20poly1305_ietf_libcrux::decrypt](crate::subtle::chacha20poly1305_ietf_libcrux::decrypt),
/// `ciperhtext` MUST include the as it is not given otherwise.
///
/// # Examples
///```rust
/// # use rosenpass_ciphers::subtle::xchacha20poly1305_ietf::{decrypt, TAG_LEN, KEY_LEN, NONCE_LEN};
/// let ciphertext: &[u8] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/// # 0, 0, 0, 0, 8, 241, 229, 253, 200, 81, 248, 30, 183, 149, 134, 168, 149, 87, 109, 49, 159, 108,
/// # 206, 89, 51, 232, 232, 197, 163, 253, 254, 208, 73, 76, 253, 13, 247, 162, 133, 184, 177, 44,
/// # 73, 138, 176, 193, 61, 248, 61, 183, 164, 192, 214, 168, 4, 1, 62, 243, 36, 48, 149, 164, 6];
/// // this is the ciphertext generated by the example for the encryption
/// const PLAINTEXT_LEN: usize = 43;
/// assert_eq!(PLAINTEXT_LEN + TAG_LEN + NONCE_LEN, ciphertext.len());
///
/// let key: &[u8] = &[0u8; KEY_LEN]; // THIS IS NOT A SECURE KEY
/// let nonce: &[u8] = &[0u8; NONCE_LEN]; // THIS IS NOT A SECURE NONCE
/// let additional_data: &[u8] = "the encrypted message is very important".as_bytes();
/// let mut plaintext_buffer = [0u8; PLAINTEXT_LEN];
///
/// let res: anyhow::Result<()> = decrypt(&mut plaintext_buffer, key, additional_data, ciphertext);
/// assert!(res.is_ok());
/// let expected_plaintext = "post-quantum cryptography is very important".as_bytes();
/// assert_eq!(expected_plaintext, plaintext_buffer);
///
///```
#[inline]
pub fn decrypt(
plaintext: &mut [u8],
@@ -103,6 +40,6 @@ pub fn decrypt(
let nonce = GenericArray::from_slice(n);
let tag = GenericArray::from_slice(mac);
copy_slice(ct).to(plaintext);
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(nonce, ad, plaintext, tag)?;
AeadImpl::new_from_slice(key)?.decrypt_in_place_detached(&nonce, ad, plaintext, tag)?;
Ok(())
}

View File

@@ -1,43 +1,11 @@
//! Constant-time comparison
use core::ptr;
/// Little endian memcmp version of quinier/memsec
/// https://github.com/quininer/memsec/blob/bbc647967ff6d20d6dccf1c85f5d9037fcadd3b0/src/lib.rs#L30
///
/// # Panic & Safety
///
/// Both input arrays must be at least of the indicated length.
///
/// See [std::ptr::read_volatile] on safety.
#[inline(never)]
pub unsafe fn memcmp_le(b1: *const u8, b2: *const u8, len: usize) -> i32 {
let mut res = 0;
for i in 0..len {
let diff =
i32::from(ptr::read_volatile(b1.add(i))) - i32::from(ptr::read_volatile(b2.add(i)));
res = (res & (((diff - 1) & !diff) >> 8)) | diff;
}
((res - 1) >> 8) + (res >> 8) + 1
}
#[test]
pub fn memcmp_le_test() {
// use rosenpass_constant_time::memcmp_le;
let a = [0, 1, 0, 0];
let b = [0, 0, 0, 1];
assert_eq!(-1, unsafe { memcmp_le(a.as_ptr(), b.as_ptr(), 4) });
assert_eq!(0, unsafe { memcmp_le(a.as_ptr(), a.as_ptr(), 4) });
assert_eq!(1, unsafe { memcmp_le(b.as_ptr(), a.as_ptr(), 4) });
}
/// compares two slices of memory content and returns an integer indicating the relationship between
/// the slices
/// Compares two slices of memory containing arbitrary-length little endian unsigned integers
/// and returns an integer indicating the relationship between the slices.
///
/// ## Returns
/// - <0 if the first byte that does not match both slices has a lower value in `a` than in `b`
/// - 0 if the contents are equal
/// - >0 if the first byte that does not match both slices has a higher value in `a` than in `b`
///
/// - -1 if a < b
/// - 0 if a = b
/// - 1 if a > b
///
/// ## Leaks
/// If the two slices have differents lengths, the function will return immediately. This
@@ -48,32 +16,27 @@ pub fn memcmp_le_test() {
/// considered safe.
///
/// ## Tests
/// For discussion on how to ensure the constant-time execution of this function, see
/// <https://github.com/rosenpass/rosenpass/issues/232>
///
/// # Examples
///
/// ```rust
/// use rosenpass_constant_time::compare;
/// let a = [0, 1, 0, 0];
/// let b = [0, 0, 0, 1];
/// assert_eq!(-1, compare(&a, &b));
/// assert_eq!(0, compare(&a, &a));
/// assert_eq!(1, compare(&b, &a));
/// assert_eq!(compare(&[], &[]), 0);
///
/// assert_eq!(compare(&[0], &[1]), -1);
/// assert_eq!(compare(&[0], &[0]), 0);
/// assert_eq!(compare(&[1], &[0]), 1);
///
/// assert_eq!(compare(&[0, 0], &[1, 0]), -1);
/// assert_eq!(compare(&[0, 0], &[0, 0]), 0);
/// assert_eq!(compare(&[1, 0], &[0, 0]), 1);
///
/// assert_eq!(compare(&[1, 0], &[0, 1]), -1);
/// assert_eq!(compare(&[0, 1], &[0, 0]), 1);
/// ```
///
/// # Panic
///
/// This function will panic if the input arrays are of different lengths.
///
/// ```should_panic
/// use rosenpass_constant_time::compare;
/// let a = [0, 1, 0];
/// let b = [0, 0, 0, 1];
/// compare(&a, &b);
/// ```
/// For discussion on how to ensure the constant-time execution of this function, see
/// <https://github.com/rosenpass/rosenpass/issues/232>
#[inline]
pub fn compare(a: &[u8], b: &[u8]) -> i32 {
assert!(a.len() == b.len());
unsafe { memcmp_le(a.as_ptr(), b.as_ptr(), a.len()) }
unsafe { memsec::memcmp(a.as_ptr(), b.as_ptr(), a.len()) }
}

View File

@@ -1,5 +1,3 @@
//! Incrementing numbers
use core::hint::black_box;
/// Interpret the given slice as a little-endian unsigned integer

View File

@@ -1,5 +1,3 @@
#![warn(missing_docs)]
#![warn(clippy::missing_docs_in_private_items)]
//! constant-time implementations of some primitives
//!
//! Rosenpass internal library providing basic constant-time operations.

View File

@@ -1,5 +1,3 @@
//! memcmp
/// compares two sclices of memory content and returns whether they are equal
///
/// ## Leaks
@@ -10,27 +8,18 @@
/// The execution time of the function grows approx. linear with the length of the input. This is
/// considered safe.
///
/// ## Examples
///
/// ```rust
/// use rosenpass_constant_time::memcmp;
/// let a = [0, 0, 0, 0];
/// let b = [0, 0, 0, 1];
/// let c = [0, 0, 0];
/// assert!(memcmp(&a, &a));
/// assert!(!memcmp(&a, &b));
/// assert!(!memcmp(&a, &c));
/// ```
#[inline]
pub fn memcmp(a: &[u8], b: &[u8]) -> bool {
a.len() == b.len() && unsafe { memsec::memeq(a.as_ptr(), b.as_ptr(), a.len()) }
}
/// [tests::memcmp_runs_in_constant_time] runs a stasticial test that the equality of the two
/// ## Tests
/// [`tests::memcmp_runs_in_constant_time`] runs a stasticial test that the equality of the two
/// input parameters does not correlate with the run time.
///
/// For discussion on how to (further) ensure the constant-time execution of this function,
/// see <https://github.com/rosenpass/rosenpass/issues/232>
#[inline]
pub fn memcmp(a: &[u8], b: &[u8]) -> bool {
a.len() == b.len()
&& unsafe { memsec::memeq(a.as_ptr() as *const u8, b.as_ptr() as *const u8, a.len()) }
}
#[cfg(all(test, feature = "constant_time_tests"))]
mod tests {
use super::*;

View File

@@ -1,5 +1,3 @@
//! xor
use core::hint::black_box;
use rosenpass_to::{with_destination, To};

View File

@@ -1,45 +0,0 @@
#! /usr/bin/env bash
set -e -o pipefail
OUTPUT_DIR="target/grcov"
log() {
echo >&2 "$@"
}
exc() {
echo '$' "$@"
"$@"
}
main() {
exc cd "$(dirname "$0")"
local open="0"
if [[ "$1" == "--open" ]]; then
open="1"
fi
exc cargo llvm-cov --all-features --workspace --doctests --branch
exc cp -rv target/llvm-cov-target/doctestbins target/llvm-cov-target/debug/deps/doctestbins
exc rm -rf "${OUTPUT_DIR}"
exc mkdir -p "${OUTPUT_DIR}"
exc grcov target/llvm-cov-target/ --llvm -s . --branch \
--binary-path ./target/llvm-cov-target/debug/deps \
--ignore-not-existing --ignore '../*' --ignore "/*" \
--excl-line '^\s*#\[(derive|repr)\(' \
-t lcov,html,markdown -o "${OUTPUT_DIR}"
if (( "${open}" == 1 )); then
xdg-open "${PWD}/${OUTPUT_DIR}/html/index.html"
fi
log ""
log "Generated reports in \"${PWD}/${OUTPUT_DIR}\"."
log "Open \"${PWD}/${OUTPUT_DIR}/html/index.html\" to view HTML report."
log ""
}
main "$@"

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# We have to filter this STYLE error out, because it is very platform specific
OUTPUT=$(mandoc -Tlint "$1" | grep --invert-match "STYLE: referenced manual not found")
if [ -z "$OUTPUT" ]
then
exit 0
else
echo "$1 is malformatted, check mandoc -Tlint $1"
echo "$OUTPUT"
exit 1
fi

114
doc/rosenpass.1 Normal file
View File

@@ -0,0 +1,114 @@
.Dd $Mdocdate$
.Dt ROSENPASS 1
.Os
.Sh NAME
.Nm rosenpass
.Nd builds post-quantum-secure VPNs
.Sh SYNOPSIS
.Nm
.Op COMMAND
.Op Ar OPTIONS ...
.Op Ar ARGS ...
.Sh DESCRIPTION
.Nm
performs cryptographic key exchanges that are secure against quantum-computers
and then outputs the keys.
These keys can then be passed to various services, such as wireguard or other
vpn services, as pre-shared-keys to achieve security against attackers with
quantum computers.
.Pp
This is a research project and quantum computers are not thought to become
practical in fewer than ten years.
If you are not specifically tasked with developing post-quantum secure systems,
you probably do not need this tool.
.Ss COMMANDS
.Bl -tag -width Ds
.It Ar gen-keys --secret-key <file-path> --public-key <file-path>
Generate a keypair to use in the exchange command later.
Send the public-key file to your communication partner and keep the private-key
file secret!
.It Ar exchange private-key <file-path> public-key <file-path> [ OPTIONS ] PEERS
Start a process to exchange keys with the specified peers.
You should specify at least one peer.
.Pp
Its
.Ar OPTIONS
are as follows:
.Bl -tag -width Ds
.It Ar listen <ip>[:<port>]
Instructs
.Nm
to listen on the specified interface and port.
By default,
.Nm
will listen on all interfaces and select a random port.
.It Ar verbose
Extra logging.
.El
.El
.Ss PEER
Each
.Ar PEER
is defined as follows:
.Qq peer public-key <file-path> [endpoint <ip>[:<port>]] [preshared-key <file-path>] [outfile <file-path>] [wireguard <dev> <peer> <extra_params>]
.Pp
Providing a
.Ar PEER
instructs
.Nm
to exchange keys with the given peer and write the resulting PSK into the given
output file.
You must either specify the outfile or wireguard output option.
.Pp
The parameters of
.Ar PEER
are as follows:
.Bl -tag -width Ds
.It Ar endpoint <ip>[:<port>]
Specifies the address where the peer can be reached.
This will be automatically updated after the first successful key exchange with
the peer.
If this is unspecified, the peer must initiate the connection.
.It Ar preshared-key <file-path>
You may specify a pre-shared key which will be mixed into the final secret.
.It Ar outfile <file-path>
You may specify a file to write the exchanged keys to.
If this option is specified,
.Nm
will write a notification to standard out every time the key is updated.
.It Ar wireguard <dev> <peer> <extra_params>
This allows you to directly specify a wireguard peer to deploy the
pre-shared-key to.
You may specify extra parameters you would pass to
.Qq wg set
besides the preshared-key parameter which is used by
.Nm .
This makes it possible to add peers entirely from
.Nm .
.El
.Sh EXIT STATUS
.Ex -std
.Sh SEE ALSO
.Xr rp 1 ,
.Xr wg 1
.Rs
.%A Karolin Varner
.%A Benjamin Lipp
.%A Wanja Zaeske
.%A Lisa Schmidt
.%D 2023
.%T Rosenpass
.%U https://rosenpass.eu/whitepaper.pdf
.Re
.Sh STANDARDS
This tool is the reference implementation of the Rosenpass protocol, as
specified within the whitepaper referenced above.
.Sh AUTHORS
Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
.Pp
This manual page was written by
.An Emil Engler
.Sh BUGS
The bugs are tracked at
.Lk https://github.com/rosenpass/rosenpass/issues .

View File

@@ -113,7 +113,7 @@ Rosenpass was created by Karolin Varner, Benjamin Lipp, Wanja Zaeske,
Marei Peischl, Stephan Ajuvo, and Lisa Schmidt.
.Pp
This manual page was written by
.An Clara Engler
.An Emil Engler
.Sh BUGS
The bugs are tracked at
.Lk https://github.com/rosenpass/rosenpass/issues .

55
flake.lock generated
View File

@@ -8,11 +8,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1728282832,
"narHash": "sha256-I7AbcwGggf+CHqpyd/9PiAjpIBGTGx5woYHqtwxaV7I=",
"lastModified": 1699770036,
"narHash": "sha256-bZmI7ytPAYLpyFNgj5xirDkKuAniOkj1xHdv5aIJ5GM=",
"owner": "nix-community",
"repo": "fenix",
"rev": "1ec71be1f4b8f3105c5d38da339cb061fefc43f4",
"rev": "81ab0b4f7ae9ebb57daa0edf119c4891806e4d3a",
"type": "github"
},
"original": {
@@ -26,11 +26,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"type": "github"
},
"original": {
@@ -39,37 +39,56 @@
"type": "github"
}
},
"nixpkgs": {
"naersk": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1728193676,
"narHash": "sha256-PbDWAIjKJdlVg+qQRhzdSor04bAPApDqIv2DofTyynk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ecbc1ca8ffd6aea8372ad16be9ebbb39889e55b6",
"lastModified": 1698420672,
"narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=",
"owner": "nix-community",
"repo": "naersk",
"rev": "aeb58d5e8faead8980a807c840232697982d47b9",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"owner": "nix-community",
"repo": "naersk",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1698846319,
"narHash": "sha256-4jyW/dqFBVpWFnhl0nvP6EN4lP7/ZqPxYRjl6var0Oc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "34bdaaf1f0b7fb6d9091472edc968ff10a8c2857",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"fenix": "fenix",
"flake-utils": "flake-utils",
"naersk": "naersk",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1728249780,
"narHash": "sha256-J269DvCI5dzBmPrXhAAtj566qt0b22TJtF3TIK+tMsI=",
"lastModified": 1699715108,
"narHash": "sha256-yPozsobJU55gj+szgo4Lpcg1lHvGQYAT6Y4MrC80mWE=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "2b750da1a1a2c1d2c70896108d7096089842d877",
"rev": "5fcf5289e726785d20d3aa4d13d90a43ed248e83",
"type": "github"
},
"original": {

349
flake.nix
View File

@@ -1,8 +1,11 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05";
flake-utils.url = "github:numtide/flake-utils";
# for quicker rust builds
naersk.url = "github:nix-community/naersk";
naersk.inputs.nixpkgs.follows = "nixpkgs";
# for rust nightly with llvm-tools-preview
fenix.url = "github:nix-community/fenix";
fenix.inputs.nixpkgs.follows = "nixpkgs";
@@ -11,15 +14,6 @@
outputs = { self, nixpkgs, flake-utils, ... }@inputs:
nixpkgs.lib.foldl (a: b: nixpkgs.lib.recursiveUpdate a b) { } [
#
### Export the overlay.nix from this flake ###
#
{
overlays.default = import ./overlay.nix;
}
#
### Actual Rosenpass Package and Docker Container Images ###
#
@@ -35,39 +29,232 @@
]
(system:
let
scoped = (scope: scope.result);
lib = nixpkgs.lib;
# normal nixpkgs
pkgs = import nixpkgs {
inherit system;
# apply our own overlay, overriding/inserting our packages as defined in ./pkgs
overlays = [ self.overlays.default ];
# TODO remove overlay once a fix for
# https://github.com/NixOS/nixpkgs/issues/216904 got merged
overlays = [
(
final: prev: {
iproute2 = prev.iproute2.overrideAttrs (old:
let
isStatic = prev.stdenv.hostPlatform.isStatic;
in
{
makeFlags = old.makeFlags ++ prev.lib.optional isStatic [
"TC_CONFIG_NO_XT=y"
];
});
}
)
];
};
# parsed Cargo.toml
cargoToml = builtins.fromTOML (builtins.readFile ./rosenpass/Cargo.toml);
# source files relevant for rust
src = scoped rec {
# File suffices to include
extensions = [
"lock"
"rs"
"toml"
];
# Files to explicitly include
files = [
"to/README.md"
];
src = ./.;
filter = (path: type: scoped rec {
inherit (lib) any id removePrefix hasSuffix;
anyof = (any id);
basename = baseNameOf (toString path);
relative = removePrefix (toString src + "/") (toString path);
result = anyof [
(type == "directory")
(any (ext: hasSuffix ".${ext}" basename) extensions)
(any (file: file == relative) files)
];
});
result = pkgs.lib.sources.cleanSourceWith { inherit src filter; };
};
# builds a bin path for all dependencies for the `rp` shellscript
rpBinPath = p: with p; lib.makeBinPath [
coreutils
findutils
gawk
wireguard-tools
];
# a function to generate a nix derivation for rosenpass against any
# given set of nixpkgs
rpDerivation = p:
let
# whether we want to build a statically linked binary
isStatic = p.targetPlatform.isStatic;
# the rust target of `p`
target = p.rust.toRustTargetSpec p.targetPlatform;
# convert a string to shout case
shout = string: builtins.replaceStrings [ "-" ] [ "_" ] (pkgs.lib.toUpper string);
# suitable Rust toolchain
toolchain = with inputs.fenix.packages.${system}; combine [
stable.cargo
stable.rustc
targets.${target}.stable.rust-std
];
# naersk with a custom toolchain
naersk = pkgs.callPackage inputs.naersk {
cargo = toolchain;
rustc = toolchain;
};
# used to trick the build.rs into believing that CMake was ran **again**
fakecmake = pkgs.writeScriptBin "cmake" ''
#! ${pkgs.stdenv.shell} -e
true
'';
in
naersk.buildPackage
{
# metadata and source
name = cargoToml.package.name;
version = cargoToml.package.version;
inherit src;
cargoBuildOptions = x: x ++ [ "-p" "rosenpass" ];
cargoTestOptions = x: x ++ [ "-p" "rosenpass" ];
doCheck = true;
nativeBuildInputs = with pkgs; [
p.stdenv.cc
cmake # for oqs build in the oqs-sys crate
mandoc # for the built-in manual
makeWrapper # for the rp shellscript
pkg-config # let libsodium-sys-stable find libsodium
removeReferencesTo
rustPlatform.bindgenHook # for C-bindings in the crypto libs
];
buildInputs = with p; [ bash libsodium ];
override = x: {
preBuild =
# nix defaults to building for aarch64 _without_ the armv8-a crypto
# extensions, but liboqs depens on these
(lib.optionalString (system == "aarch64-linux") ''
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -march=armv8-a+crypto"
''
);
# fortify is only compatible with dynamic linking
hardeningDisable = lib.optional isStatic "fortify";
};
overrideMain = x: {
# CMake detects that it was served a _foreign_ target dir, and CMake
# would be executed again upon the second build step of naersk.
# By adding our specially optimized CMake version, we reduce the cost
# of recompilation by 99 % while, while avoiding any CMake errors.
nativeBuildInputs = [ (lib.hiPrio fakecmake) ] ++ x.nativeBuildInputs;
# make sure that libc is linked, under musl this is not the case per
# default
preBuild = (lib.optionalString isStatic ''
NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -lc"
'');
preInstall = ''
install -D ${./rp} $out/bin/rp
wrapProgram $out/bin/rp --prefix PATH : "${ rpBinPath p }"
'';
};
# We want to build for a specific target...
CARGO_BUILD_TARGET = target;
# ... which might require a non-default linker:
"CARGO_TARGET_${shout target}_LINKER" =
let
inherit (p.stdenv) cc;
in
"${cc}/bin/${cc.targetPrefix}cc";
meta = with pkgs.lib;
{
inherit (cargoToml.package) description homepage;
license = with licenses; [ mit asl20 ];
maintainers = [ maintainers.wucke13 ];
platforms = platforms.all;
};
} // (lib.mkIf isStatic {
# otherwise pkg-config tries to link non-existent dynamic libs
# documented here: https://docs.rs/pkg-config/latest/pkg_config/
PKG_CONFIG_ALL_STATIC = true;
# tell rust to build everything statically linked
CARGO_BUILD_RUSTFLAGS = "-C target-feature=+crt-static";
});
# a function to generate a docker image based of rosenpass
rosenpassOCI = name: pkgs.dockerTools.buildImage rec {
inherit name;
copyToRoot = pkgs.buildEnv {
name = "image-root";
paths = [ self.packages.${system}.${name} ];
pathsToLink = [ "/bin" ];
};
config.Cmd = [ "/bin/rosenpass" ];
};
in
{
packages = {
default = pkgs.rosenpass;
rosenpass = pkgs.rosenpass;
rosenpass-oci-image = pkgs.rosenpass-oci-image;
rp = pkgs.rp;
rec {
packages = rec {
default = rosenpass;
rosenpass = rpDerivation pkgs;
rosenpass-oci-image = rosenpassOCI "rosenpass";
release-package = pkgs.release-package;
# for good measure, we also offer to cross compile to Linux on Arm
aarch64-linux-rosenpass-static =
pkgs.pkgsCross.aarch64-multiplatform.pkgsStatic.rosenpass;
aarch64-linux-rp-static = pkgs.pkgsCross.aarch64-multiplatform.pkgsStatic.rp;
}
//
# We only offer static builds for linux, as this is not supported on OS X
(nixpkgs.lib.attrsets.optionalAttrs pkgs.stdenv.isLinux {
rosenpass-static = pkgs.pkgsStatic.rosenpass;
rosenpass-static-oci-image = pkgs.pkgsStatic.rosenpass-oci-image;
rp-static = pkgs.pkgsStatic.rp;
});
# derivation for the release
release-package =
let
version = cargoToml.package.version;
package =
if pkgs.hostPlatform.isLinux then
packages.rosenpass-static
else packages.rosenpass;
oci-image =
if pkgs.hostPlatform.isLinux then
packages.rosenpass-static-oci-image
else packages.rosenpass-oci-image;
in
pkgs.runCommandNoCC "lace-result" { }
''
mkdir {bin,$out}
cp ${./.}/rp bin/
tar -cvf $out/rosenpass-${system}-${version}.tar bin/rp \
-C ${package} bin/rosenpass
cp ${oci-image} \
$out/rosenpass-oci-image-${system}-${version}.tar.gz
'';
} // (if pkgs.stdenv.isLinux then rec {
rosenpass-static = rpDerivation pkgs.pkgsStatic;
rosenpass-static-oci-image = rosenpassOCI "rosenpass-static";
} else { });
}
))
#
### Linux specifics ###
#
@@ -75,69 +262,91 @@
let
pkgs = import nixpkgs {
inherit system;
# apply our own overlay, overriding/inserting our packages as defined in ./pkgs
overlays = [ self.overlays.default ];
};
packages = self.packages.${system};
in
{
#
### Whitepaper ###
#
packages.whitepaper =
let
tlsetup = (pkgs.texlive.combine {
inherit (pkgs.texlive) scheme-basic acmart amsfonts ccicons
csquotes csvsimple doclicense fancyvrb fontspec gobble
koma-script ifmtarg latexmk lm markdown mathtools minted noto
nunito pgf soul unicode-math lualatex-math paralist
gitinfo2 eso-pic biblatex biblatex-trad biblatex-software
xkeyval xurl xifthen biber;
});
in
pkgs.stdenvNoCC.mkDerivation {
name = "whitepaper";
src = ./papers;
nativeBuildInputs = with pkgs; [
ncurses # tput
python3Packages.pygments
tlsetup # custom tex live scheme
which
];
buildPhase = ''
export HOME=$(mktemp -d)
latexmk -r tex/CI.rc
'';
installPhase = ''
mkdir -p $out
mv *.pdf readme.md $out/
'';
};
#
### Reading materials ###
#
packages.whitepaper = pkgs.whitepaper;
#
### Proof and Proof Tools ###
#
packages.proverif-patched = pkgs.proverif-patched;
packages.proof-proverif = pkgs.proof-proverif;
packages.proverif-patched = pkgs.proverif.overrideAttrs (old: {
postInstall = ''
install -D -t $out/lib cryptoverif.pvl
'';
});
packages.proof-proverif = pkgs.stdenv.mkDerivation {
name = "rosenpass-proverif-proof";
version = "unstable";
src = pkgs.lib.sources.sourceByRegex ./. [
"analyze.sh"
"marzipan(/marzipan.awk)?"
"analysis(/.*)?"
];
nativeBuildInputs = [ pkgs.proverif pkgs.graphviz ];
CRYPTOVERIF_LIB = packages.proverif-patched + "/lib/cryptoverif.pvl";
installPhase = ''
mkdir -p $out
bash analyze.sh -color -html $out
'';
};
#
### Devshells ###
#
devShells.default = pkgs.mkShell {
inherit (pkgs.proof-proverif) CRYPTOVERIF_LIB;
inputsFrom = [ pkgs.rosenpass ];
inherit (packages.proof-proverif) CRYPTOVERIF_LIB;
inputsFrom = [ packages.default ];
nativeBuildInputs = with pkgs; [
cmake # override the fakecmake from the main step above
cargo-release
clippy
rustfmt
nodePackages.prettier
nushell # for the .ci/gen-workflow-files.nu script
proverif-patched
];
};
# TODO: Write this as a patched version of the default environment
devShells.fullEnv = pkgs.mkShell {
inherit (pkgs.proof-proverif) CRYPTOVERIF_LIB;
inputsFrom = [ pkgs.rosenpass ];
nativeBuildInputs = with pkgs; [
cargo-release
rustfmt
nodePackages.prettier
nushell # for the .ci/gen-workflow-files.nu script
proverif-patched
inputs.fenix.packages.${system}.complete.toolchain
pkgs.cargo-llvm-cov
pkgs.grcov
packages.proverif-patched
];
};
devShells.coverage = pkgs.mkShell {
inputsFrom = [ pkgs.rosenpass ];
nativeBuildInputs = [
inputs.fenix.packages.${system}.complete.toolchain
pkgs.cargo-llvm-cov
pkgs.grcov
];
inputsFrom = [ packages.default ];
nativeBuildInputs = with pkgs; [ inputs.fenix.packages.${system}.complete.toolchain cargo-llvm-cov ];
};
checks = {
systemd-rosenpass = pkgs.testers.runNixOSTest ./tests/systemd/rosenpass.nix;
systemd-rp = pkgs.testers.runNixOSTest ./tests/systemd/rp.nix;
cargo-fmt = pkgs.runCommand "check-cargo-fmt"
{ inherit (self.devShells.${system}.default) nativeBuildInputs buildInputs; } ''
cargo fmt --manifest-path=${./.}/Cargo.toml --check --all && touch $out

View File

@@ -4,9 +4,6 @@ version = "0.0.1"
publish = false
edition = "2021"
[features]
experiment_libcrux = ["rosenpass-ciphers/experiment_libcrux"]
[package.metadata]
cargo-fuzz = true
@@ -51,37 +48,13 @@ test = false
doc = false
[[bin]]
name = "fuzz_box_secret_alloc_malloc"
path = "fuzz_targets/box_secret_alloc_malloc.rs"
name = "fuzz_box_secret_alloc"
path = "fuzz_targets/box_secret_alloc.rs"
test = false
doc = false
[[bin]]
name = "fuzz_vec_secret_alloc_malloc"
path = "fuzz_targets/vec_secret_alloc_malloc.rs"
test = false
doc = false
[[bin]]
name = "fuzz_box_secret_alloc_memfdsec"
path = "fuzz_targets/box_secret_alloc_memfdsec.rs"
test = false
doc = false
[[bin]]
name = "fuzz_vec_secret_alloc_memfdsec"
path = "fuzz_targets/vec_secret_alloc_memfdsec.rs"
test = false
doc = false
[[bin]]
name = "fuzz_box_secret_alloc_memfdsec_mallocfb"
path = "fuzz_targets/box_secret_alloc_memfdsec_mallocfb.rs"
test = false
doc = false
[[bin]]
name = "fuzz_vec_secret_alloc_memfdsec_mallocfb"
path = "fuzz_targets/vec_secret_alloc_memfdsec_mallocfb.rs"
name = "fuzz_vec_secret_alloc"
path = "fuzz_targets/vec_secret_alloc.rs"
test = false
doc = false

View File

@@ -15,7 +15,8 @@ pub struct Input {
}
fuzz_target!(|input: Input| {
let mut ciphertext = vec![0u8; input.plaintext.len() + 16];
let mut ciphertext: Vec<u8> = Vec::with_capacity(input.plaintext.len() + 16);
ciphertext.resize(input.plaintext.len() + 16, 0);
aead::encrypt(
ciphertext.as_mut_slice(),

View File

@@ -2,12 +2,7 @@
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_box;
use rosenpass_secret_memory::policy::*;
use std::sync::Once;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_try_use_memfd_secrets);
let _ = secret_box(data);
});

View File

@@ -1,12 +0,0 @@
#![no_main]
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_box;
use rosenpass_secret_memory::policy::*;
use std::sync::Once;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_use_only_malloc_secrets);
let _ = secret_box(data);
});

View File

@@ -1,13 +0,0 @@
#![no_main]
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_box;
use rosenpass_secret_memory::policy::*;
use std::sync::Once;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_use_only_memfd_secrets);
let _ = secret_box(data);
});

View File

@@ -4,17 +4,11 @@ extern crate rosenpass;
use libfuzzer_sys::fuzz_target;
use rosenpass::protocol::CryptoServer;
use rosenpass_cipher_traits::Kem;
use rosenpass_ciphers::kem::StaticKem;
use rosenpass_secret_memory::policy::*;
use rosenpass_secret_memory::{PublicBox, Secret};
use std::sync::Once;
use rosenpass_secret_memory::Secret;
static ONCE: Once = Once::new();
fuzz_target!(|rx_buf: &[u8]| {
ONCE.call_once(secret_policy_use_only_malloc_secrets);
let sk = Secret::from_slice(&[0; StaticKem::SK_LEN]);
let pk = PublicBox::from_slice(&[0; StaticKem::PK_LEN]);
let sk = Secret::from_slice(&[0; 13568]);
let pk = Secret::from_slice(&[0; 524160]);
let mut cs = CryptoServer::new(sk, pk);
let mut tx_buf = [0; 10240];

View File

@@ -9,12 +9,12 @@ use rosenpass_ciphers::kem::EphemeralKem;
#[derive(arbitrary::Arbitrary, Debug)]
pub struct Input {
pub pk: [u8; EphemeralKem::PK_LEN],
pub pk: [u8; 800],
}
fuzz_target!(|input: Input| {
let mut ciphertext = [0u8; EphemeralKem::CT_LEN];
let mut shared_secret = [0u8; EphemeralKem::SHK_LEN];
let mut ciphertext = [0u8; 768];
let mut shared_secret = [0u8; 32];
EphemeralKem::encaps(&mut shared_secret, &mut ciphertext, &input.pk).unwrap();
});

View File

@@ -7,8 +7,8 @@ use rosenpass_cipher_traits::Kem;
use rosenpass_ciphers::kem::StaticKem;
fuzz_target!(|input: [u8; StaticKem::PK_LEN]| {
let mut ciphertext = [0u8; StaticKem::CT_LEN];
let mut shared_secret = [0u8; StaticKem::SHK_LEN];
let mut ciphertext = [0u8; 188];
let mut shared_secret = [0u8; 32];
// We expect errors while fuzzing therefore we do not check the result.
let _ = StaticKem::encaps(&mut shared_secret, &mut ciphertext, &input);

View File

@@ -1,15 +1,9 @@
#![no_main]
use std::sync::Once;
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_vec;
use rosenpass_secret_memory::policy::*;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_try_use_memfd_secrets);
let mut vec = secret_vec();
vec.extend_from_slice(data);
});

View File

@@ -1,15 +0,0 @@
#![no_main]
use std::sync::Once;
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_vec;
use rosenpass_secret_memory::policy::*;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_use_only_malloc_secrets);
let mut vec = secret_vec();
vec.extend_from_slice(data);
});

View File

@@ -1,15 +0,0 @@
#![no_main]
use std::sync::Once;
use libfuzzer_sys::fuzz_target;
use rosenpass_secret_memory::alloc::secret_vec;
use rosenpass_secret_memory::policy::*;
static ONCE: Once = Once::new();
fuzz_target!(|data: &[u8]| {
ONCE.call_once(secret_policy_use_only_memfd_secrets);
let mut vec = secret_vec();
vec.extend_from_slice(data);
});

View File

@@ -1,13 +0,0 @@
secret_key = "peer_a.rp.sk"
public_key = "peer_a.rp.pk"
listen = ["[::1]:46127"]
verbosity = "Verbose"
[api]
listen_path = []
listen_fd = []
stream_fd = []
[[peers]]
public_key = "peer_b.rp.pk"
device = "rpPskBrkTestA"

View File

@@ -1,14 +0,0 @@
secret_key = "peer_b.rp.sk"
public_key = "peer_b.rp.pk"
listen = []
verbosity = "Verbose"
[api]
listen_path = []
listen_fd = []
stream_fd = []
[[peers]]
public_key = "peer_a.rp.pk"
endpoint = "[::1]:46127"
device = "rpPskBrkTestB"

View File

@@ -1,215 +0,0 @@
#! /bin/bash
set -e -o pipefail
enquote() {
while (( "$#" > 1)); do
printf "%q " "$1"
shift
done
if (("$#" > 0)); then
printf "%q" "$1"
fi
}
CLEANUP_HOOKS=()
hook_cleanup() {
local hook
set +e +o pipefail
for hook in "${CLEANUP_HOOKS[@]}"; do
eval "${hook}"
done
}
cleanup() {
CLEANUP_HOOKS=("$(enquote exc_with_ctx cleanup "$@")" "${CLEANUP_HOOKS[@]}")
}
cleanup_eval() {
cleanup eval "$*"
}
stderr() {
echo >&2 "$@"
}
log() {
local level; level="$1"; shift || fatal "USAGE: log LVL MESSAGE.."
stderr "[${level}]" "$@"
}
info() {
log "INFO" "$@"
}
debug() {
log "DEBUG" "$@"
}
fatal() {
log "FATAL" "$@"
exit 1
}
assert() {
local msg; msg="$1"; shift || fatal "USAGE: assert_cmd MESSAGE COMMAND.."
"$@" || fatal "${msg}"
}
abs_dir() {
local dir; dir="$1"; shift || fatal "USAGE: abs_dir DIR"
(
cd "${dir}"
pwd -P
)
}
exc_with_ctx() {
local ctx; ctx="$1"; shift || fatal "USAGE: exc_with_ctx CONTEXT COMMAND.."
if [[ -z "${ctx}" ]]; then
info '$' "$@"
else
info "${ctx}\$" "$@"
fi
"$@"
}
exc() {
exc_with_ctx "" "$@"
}
exc_eval() {
exc eval "$*"
}
exc_eval_with_ctx() {
local ctx; ctx="$1"; shift || fatal "USAGE: exc_eval_with_ctx CONTEXT EVAL_COMMAND.."
exc_with_ctx "eval:${ctx}" "$*"
}
exc_as_user() {
exc sudo -u "${SUDO_USER}" "$@"
}
exc_eval_as_user() {
exc_as_user bash -c "$*"
}
fork_eval_as_user() {
exc sudo -u "${SUDO_USER}" bash -c "$*" &
local pid; pid="$!"
cleanup wait "${pid}"
cleanup pkill -2 -P "${pid}" # Reverse ordering
}
info_success() {
stderr
stderr
if [[ "${SUCCESS}" = 1 ]]; then
stderr " Test was a success!"
else
stderr " !!! TEST WAS A FAILURE!!!"
fi
stderr
}
main() {
assert "Use as root with sudo" [ "$(id -u)" -eq 0 ]
assert "Use as root with sudo" [ -n "${SUDO_UID}" ]
assert "SUDO_UID is 0; refusing to build as root" [ "${SUDO_UID}" -ne 0 ]
cleanup info_success
trap hook_cleanup EXIT
SCRIPT="$0"
CFG_TEMPLATE_DIR="$(abs_dir "$(dirname "${SCRIPT}")")"
REPO="$(abs_dir "${CFG_TEMPLATE_DIR}/../..")"
BINS="${REPO}/target/debug"
# Create temp dir
TMP_DIR="/tmp/rosenpass-psk-broker-test-$(date +%s)-$(uuidgen)"
cleanup rm -rf "${TMP_DIR}"
exc_as_user mkdir -p "${TMP_DIR}"
# Copy config
CFG_DIR="${TMP_DIR}/cfg"
exc_as_user cp -R "${CFG_TEMPLATE_DIR}" "${CFG_DIR}"
exc umask 077
exc cd "${REPO}"
local build_cmd; build_cmd=(cargo build --workspace --color=always --all-features --bins --profile dev)
if test -e "${BINS}/rosenpass-wireguard-broker-privileged" -a -e "${BINS}/rosenpass"; then
info "Found the binaries rosenpass-wireguard-broker-privileged and rosenpass." \
"Run following commands as a regular user to recompile the binaries with the right options" \
"in case of an error:" '$' "${build_cmd[@]}"
else
exc_as_user "${build_cmd[@]}"
fi
exc sudo setcap CAP_NET_ADMIN=+eip "${BINS}/rosenpass-wireguard-broker-privileged"
exc cd "${CFG_DIR}"
exc_eval_as_user "wg genkey > peer_a.wg.sk"
exc_eval_as_user "wg pubkey < peer_a.wg.sk > peer_a.wg.pk"
exc_eval_as_user "wg genkey > peer_b.wg.sk"
exc_eval_as_user "wg pubkey < peer_b.wg.sk > peer_b.wg.pk"
exc_eval_as_user "wg genpsk > peer_a_invalid.psk"
exc_eval_as_user "wg genpsk > peer_b_invalid.psk"
exc_eval_as_user "echo $(enquote "peer = \"$(cat peer_b.wg.pk)\"") >> peer_a.rp.config"
exc_eval_as_user "echo $(enquote "peer = \"$(cat peer_a.wg.pk)\"") >> peer_b.rp.config"
exc_as_user "${BINS}"/rosenpass gen-keys peer_a.rp.config
exc_as_user "${BINS}"/rosenpass gen-keys peer_b.rp.config
cleanup ip l del dev rpPskBrkTestA
cleanup ip l del dev rpPskBrkTestB
exc ip l add dev rpPskBrkTestA type wireguard
exc ip l add dev rpPskBrkTestB type wireguard
exc wg set rpPskBrkTestA \
listen-port 46125 \
private-key peer_a.wg.sk \
peer "$(cat peer_b.wg.pk)" \
endpoint 'localhost:46126' \
preshared-key peer_a_invalid.psk \
allowed-ips fe80::2/64
exc wg set rpPskBrkTestB \
listen-port 46126 \
private-key peer_b.wg.sk \
peer "$(cat peer_a.wg.pk)" \
endpoint 'localhost:46125' \
preshared-key peer_b_invalid.psk \
allowed-ips fe80::1/64
exc ip l set rpPskBrkTestA up
exc ip l set rpPskBrkTestB up
exc ip a add fe80::1/64 dev rpPskBrkTestA
exc ip a add fe80::2/64 dev rpPskBrkTestB
fork_eval_as_user "\
RUST_LOG='info' \
PATH=$(enquote "${REPO}/target/debug:${PATH}") \
$(enquote "${BINS}/rosenpass") --psk-broker-spawn \
exchange-config peer_a.rp.config"
fork_eval_as_user "\
RUST_LOG='info' \
PATH=$(enquote "${REPO}/target/debug:${PATH}") \
$(enquote "${BINS}/rosenpass-wireguard-broker-socket-handler") \
--listen-path broker.sock"
fork_eval_as_user "\
RUST_LOG='info' \
PATH=$(enquote "$PWD/target/debug:${PATH}") \
$(enquote "${BINS}/rosenpass") --psk-broker-path broker.sock \
exchange-config peer_b.rp.config"
exc_as_user ping -c 2 -w 10 fe80::1%rpPskBrkTestA
exc_as_user ping -c 2 -w 10 fe80::2%rpPskBrkTestB
exc_as_user ping -c 2 -w 10 fe80::2%rpPskBrkTestA
exc_as_user ping -c 2 -w 10 fe80::1%rpPskBrkTestB
SUCCESS=1
}
main "$@"

View File

@@ -1,40 +0,0 @@
# Additional files
This folder contains additional files that are used in the project.
## `generate_configs.py`
The script is used to generate configuration files for a benchmark setup
consisting of a device under testing (DUT) and automatic test equipment (ATE),
basically a strong machine capable of running multiple Rosenpass instances at
once.
At the top of the script multiple variables can be set to configure the DUT IP
address and more. Once configured you may run `python3 generate_configs.py` to
create the configuration files.
A new folder called `output/` is created containing the subfolder `dut/` and
`ate/`. The former has to be copied on the DUT, ideally reproducible hardware
like a Raspberry Pi, while the latter is copied to the ATE, i.e. a laptop.
### Running a benchmark
On the ATE a run script is required since multiple instances of `rosenpass` are
started with different configurations in parallel. The scripts are named after
the number of instances they start, e.g. `run-50.sh` starts 50 instances.
```shell
# on the ATE aka laptop
cd output/ate
./run-10.sh
```
On the DUT you start a single Rosenpass instance with the configuration matching
the ATE number of peers.
```shell
# on the DUT aka Raspberry Pi
rosenpass exchange-config configs/dut-10.toml
```
Use whatever measurement tool you like to monitor the DUT and ATE.

View File

@@ -1,105 +0,0 @@
from pathlib import Path
from subprocess import run
import os
config = dict(
peer_counts=[1, 5, 10, 50, 100, 500],
peer_count_max=100,
ate_ip="127.0.0.1",
dut_ip="127.0.0.1",
dut_port=9999,
path_to_rosenpass_bin=os.getcwd() + "/target/release/rosenpass",
)
print(config)
output_dir = Path("output")
output_dir.mkdir(exist_ok=True)
template_dut = """
public_key = "keys/dut-public-key"
secret_key = "keys/dut-secret-key"
listen = ["{dut_ip}:{dut_port}"]
verbosity = "Quiet"
"""
template_dut_peer = """
[[peers]] # ATE-{i}
public_key = "keys/ate-{i}-public-key"
endpoint = "{ate_ip}:{ate_port}"
key_out = "out/key_out_{i}"
"""
template_ate = """
public_key = "keys/ate-{i}-public-key"
secret_key = "keys/ate-{i}-secret-key"
listen = ["{ate_ip}:{ate_port}"]
verbosity = "Quiet"
[[peers]] # DUT
public_key = "keys/dut-public-key"
endpoint = "{dut_ip}:{dut_port}"
key_out = "out/key_out_{i}"
"""
(output_dir / "dut" / "keys").mkdir(exist_ok=True, parents=True)
(output_dir / "dut" / "out").mkdir(exist_ok=True, parents=True)
(output_dir / "dut" / "configs").mkdir(exist_ok=True, parents=True)
(output_dir / "ate" / "keys").mkdir(exist_ok=True, parents=True)
(output_dir / "ate" / "out").mkdir(exist_ok=True, parents=True)
(output_dir / "ate" / "configs").mkdir(exist_ok=True, parents=True)
for peer_count in config["peer_counts"]:
dut_config = template_dut.format(**config)
for i in range(peer_count):
dut_config += template_dut_peer.format(**config, i=i, ate_port=50000 + i)
(output_dir / "dut" / "configs" / f"dut-{peer_count}.toml").write_text(dut_config)
if not (output_dir / "dut" / "keys" / "dut-public-key").exists():
print("Generate DUT keys")
run(
[
config["path_to_rosenpass_bin"],
"gen-keys",
f"configs/dut-{peer_count}.toml",
],
cwd=output_dir / "dut",
)
else:
print("DUT keys already exist")
# copy the DUT public key to the ATE
(output_dir / "ate" / "keys" / "dut-public-key").write_bytes(
(output_dir / "dut" / "keys" / "dut-public-key").read_bytes()
)
ate_script = "(trap 'kill 0' SIGINT; \\\n"
for i in range(config["peer_count_max"]):
(output_dir / "ate" / "configs" / f"ate-{i}.toml").write_text(
template_ate.format(**config, i=i, ate_port=50000 + i)
)
if not (output_dir / "ate" / "keys" / f"ate-{i}-public-key").exists():
# generate ATE keys
run(
[config["path_to_rosenpass_bin"], "gen-keys", f"configs/ate-{i}.toml"],
cwd=output_dir / "ate",
)
else:
print(f"ATE-{i} keys already exist")
# copy the ATE public keys to the DUT
(output_dir / "dut" / "keys" / f"ate-{i}-public-key").write_bytes(
(output_dir / "ate" / "keys" / f"ate-{i}-public-key").read_bytes()
)
ate_script += (
f"{config['path_to_rosenpass_bin']} exchange-config configs/ate-{i}.toml & \\\n"
)
if (i + 1) in config["peer_counts"]:
write_script = ate_script
write_script += "wait)"
(output_dir / "ate" / f"run-{i+1}.sh").write_text(write_script)

View File

@@ -14,7 +14,3 @@ rosenpass-cipher-traits = { workspace = true }
rosenpass-util = { workspace = true }
oqs-sys = { workspace = true }
paste = { workspace = true }
[dev-dependencies]
rosenpass-secret-memory = { workspace = true }
rosenpass-constant-time = { workspace = true }

View File

@@ -1,42 +1,9 @@
//! Generic helpers for declaring bindings to liboqs kems
/// Generate bindings to a liboqs-provided KEM
macro_rules! oqs_kem {
($name:ident) => { ::paste::paste!{
#[doc = "Bindings for ::oqs_sys::kem::" [<"OQS_KEM" _ $name:snake>] "_*"]
mod [< $name:snake >] {
use rosenpass_cipher_traits::Kem;
use rosenpass_util::result::Guaranteed;
#[doc = "Bindings for ::oqs_sys::kem::" [<"OQS_KEM" _ $name:snake>] "_*"]
#[doc = ""]
#[doc = "# Examples"]
#[doc = ""]
#[doc = "```rust"]
#[doc = "use std::borrow::{Borrow, BorrowMut};"]
#[doc = "use rosenpass_cipher_traits::Kem;"]
#[doc = "use rosenpass_oqs::" $name:camel " as MyKem;"]
#[doc = "use rosenpass_secret_memory::{Secret, Public};"]
#[doc = ""]
#[doc = "rosenpass_secret_memory::secret_policy_try_use_memfd_secrets();"]
#[doc = ""]
#[doc = "// Recipient generates secret key, transfers pk to sender"]
#[doc = "let mut sk = Secret::<{ MyKem::SK_LEN }>::zero();"]
#[doc = "let mut pk = Public::<{ MyKem::PK_LEN }>::zero();"]
#[doc = "MyKem::keygen(sk.secret_mut(), pk.borrow_mut());"]
#[doc = ""]
#[doc = "// Sender generates ciphertext and local shared key, sends ciphertext to recipient"]
#[doc = "let mut shk_enc = Secret::<{ MyKem::SHK_LEN }>::zero();"]
#[doc = "let mut ct = Public::<{ MyKem::CT_LEN }>::zero();"]
#[doc = "MyKem::encaps(shk_enc.secret_mut(), ct.borrow_mut(), pk.borrow());"]
#[doc = ""]
#[doc = "// Recipient decapsulates ciphertext"]
#[doc = "let mut shk_dec = Secret::<{ MyKem::SHK_LEN }>::zero();"]
#[doc = "MyKem::decaps(shk_dec.secret_mut(), sk.secret(), ct.borrow());"]
#[doc = ""]
#[doc = "// Both parties end up with the same shared key"]
#[doc = "assert!(rosenpass_constant_time::compare(shk_enc.secret_mut(), shk_dec.secret_mut()) == 0);"]
#[doc = "```"]
pub enum [< $name:camel >] {}
/// # Panic & Safety

View File

@@ -1,8 +1,3 @@
#![warn(missing_docs)]
#![warn(clippy::missing_docs_in_private_items)]
//! Bindings for liboqs used in Rosenpass
/// Call into a libOQS function
macro_rules! oqs_call {
($name:path, $($args:expr),*) => {{
use oqs_sys::common::OQS_STATUS::*;

View File

@@ -1,39 +0,0 @@
final: prev: {
#
### Actual rosenpass software ###
#
rosenpass = final.callPackage ./pkgs/rosenpass.nix { };
rosenpass-oci-image = final.callPackage ./pkgs/rosenpass-oci-image.nix { };
rp = final.callPackage ./pkgs/rosenpass.nix { package = "rp"; };
release-package = final.callPackage ./pkgs/release-package.nix { };
#
### Appendix ###
#
proverif-patched = prev.proverif.overrideAttrs (old: {
postInstall = ''
install -D -t $out/lib cryptoverif.pvl
'';
});
proof-proverif = final.stdenv.mkDerivation {
name = "rosenpass-proverif-proof";
version = "unstable";
src = final.lib.sources.sourceByRegex ./. [
"analyze.sh"
"marzipan(/marzipan.awk)?"
"analysis(/.*)?"
];
nativeBuildInputs = [ final.proverif final.graphviz ];
CRYPTOVERIF_LIB = final.proverif-patched + "/lib/cryptoverif.pvl";
installPhase = ''
mkdir -p $out
bash analyze.sh -color -html $out
'';
};
whitepaper = final.callPackage ./pkgs/whitepaper.nix { };
}

View File

@@ -2,11 +2,10 @@
template: rosenpass
title: Rosenpass
author:
- Karolin Varner = Rosenpass e.V., Max Planck Institute for Security and Privacy (MPI-SP)
- Benjamin Lipp = Rosenpass e.V., Max Planck Institute for Security and Privacy (MPI-SP)
- Karolin Varner = Independent Researcher
- Benjamin Lipp = Max Planck Institute for Security and Privacy (MPI-SP)
- Wanja Zaeske
- Lisa Schmidt = {Scientific Illustrator \\url{mullana.de}}
- Prabhpreet Dua
abstract: |
Rosenpass is used to create post-quantum-secure VPNs. Rosenpass computes a shared key, WireGuard (WG) [@wg] uses the shared key to establish a secure connection. Rosenpass can also be used without WireGuard, deriving post-quantum-secure symmetric keys for another application. The Rosenpass protocol builds on “Post-quantum WireGuard” (PQWG) [@pqwg] and improves it by using a cookie mechanism to provide security against state disruption attacks.
@@ -219,7 +218,6 @@ The server needs to store the following variables:
* `spkm`
* `biscuit_key` Randomly chosen key used to encrypt biscuits
* `biscuit_ctr` Retransmission protection for biscuits
* `cookie_secret`- A randomized cookie secret to derive cookies sent to peer when under load. This secret changes every 120 seconds
Not mandated per se, but required in practice:
@@ -245,7 +243,6 @@ The initiator stores the following local state for each ongoing handshake:
* `ck` The chaining key
* `eski` The initiator's ephemeral secret key
* `epki` The initiator's ephemeral public key
* `cookie_value`- Cookie value sent by an initiator peer under load, used to compute cookie field in outgoing handshake to peer under load. This value expires 120 seconds from when a peer sends this value using the CookieReply message
The responder stores no state. While the responder has access to all of the above variables except for `eski`, the responder discards them after generating the RespHello message. Instead, the responder state is contained inside a cookie called a biscuit. This value is returned to the responder inside the InitConf packet. The biscuit consists of:
@@ -383,18 +380,9 @@ fn load_biscuit(nct) {
"biscuit additional data",
spkr, sidi, sidr);
let pt : Biscuit = XAEAD::dec(k, n, ct, ad);
// Find the peer and apply retransmission protection
lookup_peer(pt.peerid);
// In December 2024, the InitConf retransmission mechanisim was redesigned
// in a backwards-compatible way. See the changelog.
//
// -- 2024-11-30, Karolin Varner
if (protocol_version!(< "0.3.0")) {
// Ensure that the biscuit is used only once
assert(pt.biscuit_no <= peer.biscuit_used);
}
assert(pt.biscuit_no <= peer.biscuit_used);
// Restore the chaining key
ck ← pt.ck;
@@ -440,161 +428,11 @@ The responder code handling InitConf needs to deal with the biscuits and package
ICR5 and ICR6 perform biscuit replay protection using the biscuit number. This is not handled in `load_biscuit()` itself because there is the case that `biscuit_no = biscuit_used` which needs to be dealt with for retransmission handling.
### Denial of Service Mitigation and Cookies
Rosenpass derives its cookie-based DoS mitigation technique for a responder when receiving InitHello messages from Wireguard [@wg].
When the responder is under load, it may choose to not process further InitHello handshake messages, but instead to respond with a cookie reply message (see Figure \ref{img:MessageTypes}).
The sender of the exchange then uses this cookie in order to resend the message and have it accepted the following time by the reciever.
For an initiator, Rosenpass ignores all messages when under load.
#### Cookie Reply Message
The cookie reply message is sent by the responder on receiving an InitHello message when under load. It consists of the `sidi` of the initiator, a random 24-byte bitstring `nonce` and encrypting `cookie_value` into a `cookie_encrypted` reply field which consists of the following:
```pseudorust
cookie_value = lhash("cookie-value", cookie_secret, initiator_host_info)[0..16]
cookie_encrypted = XAEAD(lhash("cookie-key", spkm), nonce, cookie_value, mac_peer)
```
where `cookie_secret` is a secret variable that changes every two minutes to a random value. `initiator_host_info` is used to identify the initiator host, and is implementation-specific for the client. This paramaters used to identify the host must be carefully chosen to ensure there is a unique mapping, especially when using IPv4 and IPv6 addresses to identify the host (such as taking care of IPv6 link-local addresses). `cookie_value` is a truncated 16 byte value from the above hash operation. `mac_peer` is the `mac` field of the peer's handshake message to which message is the reply.
#### Envelope `mac` Field
Similar to `mac.1` in Wireguard handshake messages, the `mac` field of a Rosenpass envelope from a handshake packet sender's point of view consists of the following:
```pseudorust
mac = lhash("mac", spkt, MAC_WIRE_DATA)[0..16]
```
where `MAC_WIRE_DATA` represents all bytes of msg prior to `mac` field in the envelope.
If a client receives an invalid `mac` value for any message, it will discard the message.
#### Envelope cookie field
The initiator, on receiving a CookieReply message, decrypts `cookie_encrypted` and stores the `cookie_value` for the session into `peer[sid].cookie_value` for a limited time (120 seconds). This value is then used to set `cookie` field set for subsequent messages and retransmissions to the responder as follows:
```pseudorust
if (peer.cookie_value.is_none() || seconds_since_update(peer[sid].cookie_value) >= 120) {
cookie.zeroize(); //zeroed out 16 bytes bitstring
}
else {
cookie = lhash("cookie",peer.cookie_value.unwrap(),COOKIE_WIRE_DATA)
}
```
Here, `seconds_since_update(peer.cookie_value)` is the amount of time in seconds ellapsed since last cookie was received, and `COOKIE_WIRE_DATA` are the message contents of all bytes of the retransmitted message prior to the `cookie` field.
The inititator can use an invalid value for the `cookie` value, when the responder is not under load, and the responder must ignore this value.
However, when the responder is under load, it may reject InitHello messages with the invalid `cookie` value, and issue a cookie reply message.
### Conditions to trigger DoS Mechanism
This whitepaper does not mandate any specific mechanism to detect responder contention (also mentioned as the under load condition) that would trigger use of the cookie mechanism.
For the reference implemenation, Rosenpass has derived inspiration from the linux implementation of Wireguard. This implementation suggests that the reciever keep track of the number of messages it is processing at a given time.
On receiving an incoming message, if the length of the message queue to be processed exceeds a threshold `MAX_QUEUED_INCOMING_HANDSHAKES_THRESHOLD`, the client is considered under load and its state is stored as under load. In addition, the timestamp of this instant when the client was last under load is stored. When recieving subsequent messages, if the client is still in an under load state, the client will check if the time ellpased since the client was last under load has exceeded `LAST_UNDER_LOAD_WINDOW` seconds. If this is the case, the client will update its state to normal operation, and process the message in a normal fashion.
Currently, the following constants are derived from the Linux kernel implementation of Wireguard:
```pseudorust
MAX_QUEUED_INCOMING_HANDSHAKES_THRESHOLD = 4096
LAST_UNDER_LOAD_WINDOW = 1 //seconds
```
## Dealing with Packet Loss
The initiator deals with packet loss by storing the messages it sends to the responder and retransmitting them in randomized, exponentially increasing intervals until they get a response. Receiving RespHello terminates retransmission of InitHello. A Data or EmptyData message serves as acknowledgement of receiving InitConf and terminates its retransmission.
The responder uses less complex form of the same mechanism: The responder never retransmits RespHello, instead the responder generates a new RespHello message if InitHello is retransmitted. Responder confirmation messages of completed handshake (EmptyData) messages are retransmitted by storing the most recent InitConf messages (or their hashes) and caching the associated EmptyData messages. Through this cache, InitConf retransmission is detected and the associated EmptyData message is retransmitted.
### Interaction with cookie reply system
The cookie reply system does not interfere with the retransmission logic discussed above.
When the initator is under load, it will ignore processing any incoming messages.
When a responder is under load and it receives an InitHello handshake message, the InitHello message will be discarded and a cookie reply message is sent. The initiator, then on the reciept of the cookie reply message, will store a decrypted `cookie_value` to set the `cookie` field to subsequently sent messages. As per the retransmission mechanism above, the initiator will send a retransmitted InitHello message with a valid `cookie` value appended. On receiving the retransmitted handshake message, the responder will validate the `cookie` value and resume with the handshake process.
When the responder is under load and it recieves an InitConf message, the message will be directly processed without checking the validity of the cookie field.
# Changelog
### 0.3.x
#### 2024-10-30 InitConf retransmission updates
\vspace{0.5em}
Author: Karolin Varner
Issue: [#331](https://github.com/rosenpass/rosenpass/issues/331)
PR: [#513](https://github.com/rosenpass/rosenpass/pull/513)
\vspace{0.5em}
We redesign the InitConf retransmission mechanism to use a hash table. This avoids the need for the InitConf handling code to account for InitConf retransmission specifically and moves the retransmission logic into less-sensitive code.
Previously, we would specifically account for InitConf retransmission in the InitConf handling code by checking the biscuit number: If the biscuit number was higher than any previously seen biscuit number, then this must be a new key-exchange being completed; if the biscuit number was exactly the highest seen biscuit number, then the InitConf message is interpreted as an InitConf retransmission; in this case, an entirely new EmptyData (responder confirmation) message was generated as confirmation that InitConf has been received and that the initiator can now cease opportunistic retransmission of InitConf.
This mechanism was a bit brittle; even leading to a very minor but still relevant security issue, necessitating the release of Rosenpass maintenance version 0.2.2 with a [fix for the problem](https://github.com/rosenpass/rosenpass/pull/329). We had processed the InitConf message, correctly identifying that InitConf was a retransmission, but we failed to pass this information on to the rest of the code base, leading to double emission of the same "hey, we have a new cryptographic session key" even if the `outfile` option was used to integrate Rosenpass into some external application. If this event was used anywhere to reset a nonce, then this could have led to a nonce-misuse, although for the use with WireGuard this is not an issue.
By removing all retransmission handling code from the cryptographic protocol, we are taking structural measures to exclude the possibilities of similar issues.
- In section "Dealing With Package Loss" we replace
\begin{quote}
The responder does not need to do anything special to handle RespHello retransmission if the RespHello package is lost, the initiator retransmits InitHello and the responder can generate another RespHello package from that. InitConf retransmission needs to be handled specifically in the responder code because accepting an InitConf retransmission would reset the live session including the nonce counter, which would cause nonce reuse. Implementations must detect the case that `biscuit_no = biscuit_used` in ICR5, skip execution of ICR6 and ICR7, and just transmit another EmptyData package to confirm that the initiator can stop transmitting InitConf.
\end{quote}
by
\begin{quote}
The responder uses less complex form of the same mechanism: The responder never retransmits RespHello, instead the responder generates a new RespHello message if InitHello is retransmitted. Responder confirmation messages of completed handshake (EmptyData) messages are retransmitted by storing the most recent InitConf messages (or their hashes) and caching the associated EmptyData messages. Through this cache, InitConf retransmission is detected and the associated EmptyData message is retransmitted.
\end{quote}
- In function `load_biscuit` we replace
``` {=tex}
\begin{quote}
\begin{minted}{pseudorust}
assert(pt.biscuit_no <= peer.biscuit_used);
\end{minted}
\end{quote}
```
by
``` {=tex}
\begin{quote}
\begin{minted}{pseudorust}
// In December 2024, the InitConf retransmission mechanisim was redesigned
// in a backwards-compatible way. See the changelog.
//
// -- 2024-11-30, Karolin Varner
if (protocol_version!(< "0.3.0")) {
// Ensure that the biscuit is used only once
assert(pt.biscuit_no <= peer.biscuit_used);
}
\end{minted}
\end{quote}
```
#### 2024-04-16 Denial of Service Mitigation
\vspace{0.5em}
Author: Prabhpreet Dua
Issue: [#137](https://github.com/rosenpass/rosenpass/issues/137)
PR: [#142](https://github.com/rosenpass/rosenpass/pull/142)
\vspace{0.5em}
- Added denial of service mitigation using the WireGuard cookie mechanism
- Added section "Denial of Service Mitigation and Cookies", and modify "Dealing with Packet Loss" for DoS cookie mechanism
The responder does not need to do anything special to handle RespHello retransmission if the RespHello package is lost, the initiator retransmits InitHello and the responder can generate another RespHello package from that. InitConf retransmission needs to be handled specifically in the responder code because accepting an InitConf retransmission would reset the live session including the nonce counter, which would cause nonce reuse. Implementations must detect the case that `biscuit_no = biscuit_used` in ICR5, skip execution of ICR6 and ICR7, and just transmit another EmptyData package to confirm that the initiator can stop transmitting InitConf.
\printbibliography

View File

@@ -1,27 +0,0 @@
{ lib, stdenvNoCC, runCommandNoCC, pkgsStatic, rosenpass, rosenpass-oci-image, rp } @ args:
let
version = rosenpass.version;
# select static packages on Linux, default packages otherwise
package =
if stdenvNoCC.hostPlatform.isLinux then
pkgsStatic.rosenpass
else args.rosenpass;
rp =
if stdenvNoCC.hostPlatform.isLinux then
pkgsStatic.rp
else args.rp;
oci-image =
if stdenvNoCC.hostPlatform.isLinux then
pkgsStatic.rosenpass-oci-image
else args.rosenpass-oci-image;
in
runCommandNoCC "lace-result" { } ''
mkdir {bin,$out}
tar -cvf $out/rosenpass-${stdenvNoCC.hostPlatform.system}-${version}.tar \
-C ${package} bin/rosenpass lib/systemd \
-C ${rp} bin/rp
cp ${oci-image} \
$out/rosenpass-oci-image-${stdenvNoCC.hostPlatform.system}-${version}.tar.gz
''

View File

@@ -1,11 +0,0 @@
{ dockerTools, buildEnv, rosenpass }:
dockerTools.buildImage {
name = rosenpass.name + "-oci";
copyToRoot = buildEnv {
name = "image-root";
paths = [ rosenpass ];
pathsToLink = [ "/bin" ];
};
config.Cmd = [ "/bin/rosenpass" ];
}

View File

@@ -1,87 +0,0 @@
{ lib, stdenv, rustPlatform, cmake, mandoc, removeReferencesTo, bash, package ? "rosenpass" }:
let
# whether we want to build a statically linked binary
isStatic = stdenv.targetPlatform.isStatic;
scoped = (scope: scope.result);
# source files relevant for rust
src = scoped rec {
# File suffices to include
extensions = [
"lock"
"rs"
"service"
"target"
"toml"
];
# Files to explicitly include
files = [
"to/README.md"
];
src = ../.;
filter = (path: type: scoped rec {
inherit (lib) any id removePrefix hasSuffix;
anyof = (any id);
basename = baseNameOf (toString path);
relative = removePrefix (toString src + "/") (toString path);
result = anyof [
(type == "directory")
(any (ext: hasSuffix ".${ext}" basename) extensions)
(any (file: file == relative) files)
];
});
result = lib.sources.cleanSourceWith { inherit src filter; };
};
# parsed Cargo.toml
cargoToml = builtins.fromTOML (builtins.readFile (src + "/rosenpass/Cargo.toml"));
in
rustPlatform.buildRustPackage {
name = cargoToml.package.name;
version = cargoToml.package.version;
inherit src;
cargoBuildOptions = [ "--package" package ];
cargoTestOptions = [ "--package" package ];
doCheck = true;
cargoLock = {
lockFile = src + "/Cargo.lock";
outputHashes = {
"memsec-0.6.3" = "sha256-4ri+IEqLd77cLcul3lZrmpDKj4cwuYJ8oPRAiQNGeLw=";
"uds-0.4.2" = "sha256-qlxr/iJt2AV4WryePIvqm/8/MK/iqtzegztNliR93W8=";
};
};
nativeBuildInputs = [
stdenv.cc
cmake # for oqs build in the oqs-sys crate
mandoc # for the built-in manual
removeReferencesTo
rustPlatform.bindgenHook # for C-bindings in the crypto libs
];
buildInputs = [ bash ];
hardeningDisable = lib.optional isStatic "fortify";
postInstall = ''
mkdir -p $out/lib/systemd/system
install systemd/rosenpass@.service $out/lib/systemd/system
install systemd/rp@.service $out/lib/systemd/system
install systemd/rosenpass.target $out/lib/systemd/system
'';
meta = {
inherit (cargoToml.package) description homepage;
license = with lib.licenses; [ mit asl20 ];
maintainers = [ lib.maintainers.wucke13 ];
platforms = lib.platforms.all;
};
}

View File

@@ -1,29 +0,0 @@
{ stdenvNoCC, texlive, ncurses, python3Packages, which }:
let
customTexLiveSetup = (texlive.combine {
inherit (texlive) acmart amsfonts biber biblatex biblatex-software
biblatex-trad ccicons csquotes csvsimple doclicense eso-pic fancyvrb
fontspec gitinfo2 gobble ifmtarg koma-script latexmk lm lualatex-math
markdown mathtools minted noto nunito paralist pgf scheme-basic soul
unicode-math upquote xifthen xkeyval xurl;
});
in
stdenvNoCC.mkDerivation {
name = "whitepaper";
src = ../papers;
nativeBuildInputs = [
ncurses # tput
python3Packages.pygments
customTexLiveSetup # custom tex live scheme
which
];
buildPhase = ''
export HOME=$(mktemp -d)
latexmk -r tex/CI.rc
'';
installPhase = ''
mkdir -p $out
mv *.pdf readme.md $out/
'';
}

View File

@@ -23,19 +23,13 @@ rosenpass help
Follow [quick start instructions](https://rosenpass.eu/#start) to get a VPN up and running.
## Contributing
Contributions are generally welcome. Join our [Matrix Chat](https://matrix.to/#/#rosenpass:matrix.org) if you are looking for guidance on how to contribute or for people to collaborate with.
We also have a as of now, very minimal [contributors guide](CONTRIBUTING.md).
## Software architecture
The [rosenpass tool](./src/) is written in Rust and uses liboqs[^liboqs]. The tool establishes a symmetric key and provides it to WireGuard. Since it supplies WireGuard with key through the PSK feature using Rosenpass+WireGuard is cryptographically no less secure than using WireGuard on its own ("hybrid security"). Rosenpass refreshes the symmetric key every two minutes.
The [rosenpass tool](./src/) is written in Rust and uses liboqs[^liboqs] and libsodium[^libsodium]. The tool establishes a symmetric key and provides it to WireGuard. Since it supplies WireGuard with key through the PSK feature using Rosenpass+WireGuard is cryptographically no less secure than using WireGuard on its own ("hybrid security"). Rosenpass refreshes the symmetric key every two minutes.
As with any application a small risk of critical security issues (such as buffer overflows, remote code execution) exists; the Rosenpass application is written in the Rust programming language which is much less prone to such issues. Rosenpass can also write keys to files instead of supplying them to WireGuard With a bit of scripting the stand alone mode of the implementation can be used to run the application in a Container, VM or on another host. This mode can also be used to integrate tools other than WireGuard with Rosenpass.
The [`rp`](./rp) tool written in Rust makes it easy to create a VPN using WireGuard and Rosenpass.
The [`rp`](./rp) tool written in bash makes it easy to create a VPN using WireGuard and Rosenpass.
`rp` is easy to get started with but has a few drawbacks; it runs as root, demanding access to both WireGuard
and Rosenpass private keys, takes control of the interface and works with exactly one interface. If you do not feel confident about running Rosenpass as root, you should use the stand-alone mode to create a more secure setup using containers, jails, or virtual machines.
@@ -65,6 +59,7 @@ The code uses a variety of optimizations to speed up analysis such as using secr
A wrapper script provides instant feedback about which queries execute as expected in color: A red cross if a query fails and a green check if it succeeds.
[^liboqs]: https://openquantumsafe.org/liboqs/
[^libsodium]: https://doc.libsodium.org/
[^wg]: https://www.wireguard.com/
[^pqwg]: https://eprint.iacr.org/2020/379
[^pqwg-statedis]: Unless supplied with a pre-shared-key, but this defeats the purpose of a key exchange protocol
@@ -72,8 +67,6 @@ A wrapper script provides instant feedback about which queries execute as expect
# Getting Rosenpass
Documentation and installation guides can be found at the [Rosenpass website](https://rosenpass.eu/docs).
Rosenpass is packaged for more and more distributions, maybe also for the distribution of your choice?
[![Packaging status](https://repology.org/badge/vertical-allrepos/rosenpass.svg)](https://repology.org/project/rosenpass/versions)

View File

@@ -1,6 +1,6 @@
[package]
name = "rosenpass"
version = "0.3.0-dev"
version = "0.2.1"
authors = ["Karolin Varner <karo@cupdev.net>", "wucke13 <wucke13@gmail.com>"]
edition = "2021"
license = "MIT OR Apache-2.0"
@@ -9,23 +9,6 @@ homepage = "https://rosenpass.eu/"
repository = "https://github.com/rosenpass/rosenpass"
readme = "readme.md"
[[bin]]
name = "rosenpass"
path = "src/main.rs"
[[bin]]
name = "rosenpass-gen-ipc-msg-types"
path = "src/bin/gen-ipc-msg-types.rs"
required-features = ["experiment_api", "internal_bin_gen_ipc_msg_types"]
[[test]]
name = "api-integration-tests"
required-features = ["experiment_api", "internal_testing"]
[[test]]
name = "api-integration-tests-api-setup"
required-features = ["experiment_api", "internal_testing"]
[[bench]]
name = "handshake"
harness = false
@@ -47,22 +30,10 @@ env_logger = { workspace = true }
serde = { workspace = true }
toml = { workspace = true }
clap = { workspace = true }
clap_complete = { workspace = true }
clap_mangen = { workspace = true }
mio = { workspace = true }
rand = { workspace = true }
zerocopy = { workspace = true }
home = { workspace = true }
derive_builder = { workspace = true }
rosenpass-wireguard-broker = { workspace = true }
zeroize = { workspace = true }
hex-literal = { workspace = true, optional = true }
hex = { workspace = true, optional = true }
heck = { workspace = true, optional = true }
command-fds = { workspace = true, optional = true }
rustix = { workspace = true, optional = true }
uds = { workspace = true, optional = true, features = ["mio_1xx"] }
signal-hook = { workspace = true, optional = true }
[build-dependencies]
anyhow = { workspace = true }
@@ -71,23 +42,3 @@ anyhow = { workspace = true }
criterion = { workspace = true }
test_bin = { workspace = true }
stacker = { workspace = true }
serial_test = { workspace = true }
procspawn = { workspace = true }
tempfile = { workspace = true }
rustix = { workspace = true }
[features]
default = []
experiment_memfd_secret = ["rosenpass-wireguard-broker/experiment_memfd_secret"]
experiment_libcrux = ["rosenpass-ciphers/experiment_libcrux"]
experiment_api = [
"hex-literal",
"uds",
"command-fds",
"rustix",
"rosenpass-util/experiment_file_descriptor_passing",
"rosenpass-wireguard-broker/experiment_api",
]
internal_signal_handling_for_coverage_reports = ["signal-hook"]
internal_testing = []
internal_bin_gen_ipc_msg_types = ["hex", "heck"]

View File

@@ -1,12 +1,10 @@
use anyhow::Result;
use rosenpass::protocol::{CryptoServer, HandleMsgResult, MsgBuf, PeerPtr, SPk, SSk, SymKey};
use std::ops::DerefMut;
use rosenpass_cipher_traits::Kem;
use rosenpass_ciphers::kem::StaticKem;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use rosenpass_secret_memory::secret_policy_try_use_memfd_secrets;
fn handle(
tx: &mut CryptoServer,
@@ -41,7 +39,7 @@ fn hs(ini: &mut CryptoServer, res: &mut CryptoServer) -> Result<()> {
fn keygen() -> Result<(SSk, SPk)> {
let (mut sk, mut pk) = (SSk::zero(), SPk::zero());
StaticKem::keygen(sk.secret_mut(), pk.deref_mut())?;
StaticKem::keygen(sk.secret_mut(), pk.secret_mut())?;
Ok((sk, pk))
}
@@ -58,7 +56,6 @@ fn make_server_pair() -> Result<(CryptoServer, CryptoServer)> {
}
fn criterion_benchmark(c: &mut Criterion) {
secret_policy_try_use_memfd_secrets();
let (mut a, mut b) = make_server_pair().unwrap();
c.bench_function("cca_secret_alloc", |bench| {
bench.iter(|| {

52
rosenpass/build.rs Normal file
View File

@@ -0,0 +1,52 @@
use anyhow::bail;
use anyhow::Result;
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use std::process::Command;
/// Invokes a troff compiler to compile a manual page
fn render_man(compiler: &str, man: &str) -> Result<String> {
let out = Command::new(compiler).args(["-Tascii", man]).output()?;
if !out.status.success() {
bail!("{} returned an error", compiler);
}
Ok(String::from_utf8(out.stdout)?)
}
/// Generates the manual page
fn generate_man() -> String {
// This function is purposely stupid and redundant
let man = render_man("mandoc", "./doc/rosenpass.1");
if let Ok(man) = man {
return man;
}
let man = render_man("groff", "./doc/rosenpass.1");
if let Ok(man) = man {
return man;
}
"Cannot render manual page. Please visit https://rosenpass.eu/docs/manuals/\n".into()
}
fn man() {
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let man = generate_man();
let path = out_dir.join("rosenpass.1.ascii");
let mut file = File::create(&path).unwrap();
file.write_all(man.as_bytes()).unwrap();
println!("cargo:rustc-env=ROSENPASS_MAN={}", path.display());
}
fn main() {
// For now, rerun the build script on every time, as the build script
// is not very expensive right now.
println!("cargo:rerun-if-changed=./");
man();
}

View File

@@ -1,341 +0,0 @@
// Note: This is business logic; tested through the integration tests in
// rosenpass/tests/
use std::{borrow::BorrowMut, collections::VecDeque, os::fd::OwnedFd};
use anyhow::Context;
use rosenpass_to::{ops::copy_slice, To};
use rosenpass_util::{
fd::FdIo,
functional::{run, ApplyExt},
io::ReadExt,
mem::DiscardResultExt,
mio::UnixStreamExt,
result::OkExt,
};
use rosenpass_wireguard_broker::brokers::mio_client::MioBrokerClient;
use crate::{
api::{add_listen_socket_response_status, add_psk_broker_response_status},
app_server::AppServer,
protocol::BuildCryptoServer,
};
use super::{supply_keypair_response_status, Server as ApiServer};
/// Stores the state of the API handler.
///
/// This is used in the context [ApiHandlerContext]; [ApiHandlerContext] exposes both
/// the [AppServer] and the API handler state.
///
/// [ApiHandlerContext] is what actually contains the API handler functions.
#[derive(Debug)]
pub struct ApiHandler {
_dummy: (),
}
impl ApiHandler {
/// Construct an [Self]
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self { _dummy: () }
}
}
/// The implementation of the API requires both access to its own state [ApiHandler] and to the
/// [AppServer] the API is supposed to operate on.
///
/// This trait provides both; it implements a pattern to allow for multiple - **potentially
/// overlapping** mutable references to be passed to the API handler functions.
///
/// This relatively complex scheme is chosen to appease the borrow checker: We want flexibility
/// with regard to where the [ApiHandler] is stored and we need a mutable reference to
/// [ApiHandler]. We also need a mutable reference to [AppServer]. Achieving this by using the
/// direct method would be impossible because the [ApiHandler] is actually stored somewhere inside
/// [AppServer]. The borrow checker does not allow this.
///
/// What we have instead is in practice a reference to [AppServer] and a function (as part of
/// the trait) that extracts an [ApiHandler] reference from [AppServer], which is allowed by the
/// borrow checker. A benefit of the use of a trait here is that we could, if desired, also store
/// the [ApiHandler] outside [AppServer]. It really depends on the trait.
pub trait ApiHandlerContext {
/// Retrieve the [ApiHandler]
fn api_handler(&self) -> &ApiHandler;
/// Retrieve the [AppServer]
fn app_server(&self) -> &AppServer;
/// Retrieve the [ApiHandler]
fn api_handler_mut(&mut self) -> &mut ApiHandler;
/// Retrieve the [AppServer]
fn app_server_mut(&mut self) -> &mut AppServer;
}
/// This is the Error raised by [ApiServer::supply_keypair]; it contains both
/// the underlying error message as well as the status value
/// returned by the API.
///
/// [ApiServer::supply_keypair] generally constructs a [Self] by using one of the
/// utility functions [SupplyKeypairErrorExt].
#[derive(thiserror::Error, Debug)]
#[error("Error in SupplyKeypair")]
struct SupplyKeypairError {
/// The status code communicated via the Rosenpass API
status: u128,
/// The underlying error that caused the Rosenpass API level Error
#[source]
cause: anyhow::Error,
}
trait SupplyKeypairErrorExt<T> {
/// Imbue any Error (that can be represented as [anyhow::Error]) with
/// an arbitrary error code
fn e_custom(self, status: u128) -> Result<T, SupplyKeypairError>;
/// Imbue any Error (that can be represented as [anyhow::Error]) with
/// the [supply_keypair_response_status::INTERNAL_ERROR] error code
fn einternal(self) -> Result<T, SupplyKeypairError>;
/// Imbue any Error (that can be represented as [anyhow::Error]) with
/// the [supply_keypair_response_status::KEYPAIR_ALREADY_SUPPLIED] error code
fn ealready_supplied(self) -> Result<T, SupplyKeypairError>;
/// Imbue any Error (that can be represented as [anyhow::Error]) with
/// the [supply_keypair_response_status::INVALID_REQUEST] error code
fn einvalid_req(self) -> Result<T, SupplyKeypairError>;
}
impl<T, E: Into<anyhow::Error>> SupplyKeypairErrorExt<T> for Result<T, E> {
fn e_custom(self, status: u128) -> Result<T, SupplyKeypairError> {
self.map_err(|e| SupplyKeypairError {
status,
cause: e.into(),
})
}
fn einternal(self) -> Result<T, SupplyKeypairError> {
self.e_custom(supply_keypair_response_status::INTERNAL_ERROR)
}
fn ealready_supplied(self) -> Result<T, SupplyKeypairError> {
self.e_custom(supply_keypair_response_status::KEYPAIR_ALREADY_SUPPLIED)
}
fn einvalid_req(self) -> Result<T, SupplyKeypairError> {
self.e_custom(supply_keypair_response_status::INVALID_REQUEST)
}
}
impl<T> ApiServer for T
where
T: ?Sized + ApiHandlerContext,
{
fn ping(
&mut self,
req: &super::PingRequest,
_req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::PingResponse,
) -> anyhow::Result<()> {
let (req, res) = (&req.payload, &mut res.payload);
copy_slice(&req.echo).to(&mut res.echo);
Ok(())
}
fn supply_keypair(
&mut self,
req: &super::SupplyKeypairRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::SupplyKeypairResponse,
) -> anyhow::Result<()> {
let outcome: Result<(), SupplyKeypairError> = run(|| {
// Acquire the file descriptors
let mut sk_io = FdIo(
req_fds
.front()
.context("First file descriptor, secret key, missing.")
.einvalid_req()?,
);
let mut pk_io = FdIo(
req_fds
.get(1)
.context("Second file descriptor, public key, missing.")
.einvalid_req()?,
);
// Actually read the secrets
let mut sk = crate::protocol::SSk::zero();
sk_io.read_exact_til_end(sk.secret_mut()).einvalid_req()?;
let mut pk = crate::protocol::SPk::zero();
pk_io.read_exact_til_end(pk.borrow_mut()).einvalid_req()?;
// Retrieve the construction site
let construction_site = self.app_server_mut().crypto_site.borrow_mut();
// Retrieve the builder
use rosenpass_util::build::ConstructionSite as C;
let maybe_builder = match construction_site {
C::Builder(builder) => Some(builder),
C::Product(_) => None,
C::Void => {
return Err(anyhow::Error::msg("CryptoServer construction side is void"))
.einternal();
}
};
// Retrieve a reference to the keypair
let Some(BuildCryptoServer {
ref mut keypair, ..
}) = maybe_builder
else {
return Err(anyhow::Error::msg("CryptoServer already built")).ealready_supplied();
};
// Supply the keypair to the CryptoServer
keypair
.insert(crate::protocol::Keypair { sk, pk })
.discard_result();
// Actually construct the CryptoServer
construction_site
.erect()
.map_err(|e| anyhow::Error::msg(format!("Error erecting the CryptoServer {e:?}")))
.einternal()?;
Ok(())
});
// Handle errors
use supply_keypair_response_status as status;
let status = match outcome {
Ok(()) => status::OK,
Err(e) => {
let lvl = match e.status {
status::INTERNAL_ERROR => log::Level::Warn,
_ => log::Level::Debug,
};
log::log!(
lvl,
"Error while processing API Request.\n Request: {:?}\n Error: {:?}",
req,
e.cause
);
if e.status == status::INTERNAL_ERROR {
return Err(e.cause);
}
e.status
}
};
res.payload.status = status;
Ok(())
}
fn add_listen_socket(
&mut self,
_req: &super::boilerplate::AddListenSocketRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::boilerplate::AddListenSocketResponse,
) -> anyhow::Result<()> {
// Retrieve file descriptor
let sock_res = run(|| -> anyhow::Result<mio::net::UdpSocket> {
let sock = req_fds
.pop_front()
.context("Invalid request socket missing.")?;
// TODO: We need to have this outside linux
#[cfg(target_os = "linux")]
rosenpass_util::fd::GetSocketProtocol::demand_udp_socket(&sock)?;
let sock = std::net::UdpSocket::from(sock);
sock.set_nonblocking(true)?;
mio::net::UdpSocket::from_std(sock).ok()
});
let sock = match sock_res {
Ok(sock) => sock,
Err(e) => {
log::debug!("Error processing AddListenSocket API request: {e:?}");
res.payload.status = add_listen_socket_response_status::INVALID_REQUEST;
return Ok(());
}
};
// Register socket
let reg_result = self.app_server_mut().register_listen_socket(sock);
if let Err(internal_error) = reg_result {
log::warn!("Internal error processing AddListenSocket API request: {internal_error:?}");
res.payload.status = add_listen_socket_response_status::INTERNAL_ERROR;
return Ok(());
};
res.payload.status = add_listen_socket_response_status::OK;
Ok(())
}
fn add_psk_broker(
&mut self,
_req: &super::boilerplate::AddPskBrokerRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::boilerplate::AddPskBrokerResponse,
) -> anyhow::Result<()> {
// Retrieve file descriptor
let sock_res = run(|| {
let sock = req_fds
.pop_front()
.context("Invalid request socket missing.")?;
mio::net::UnixStream::from_fd(sock)
});
// Handle errors
let sock = match sock_res {
Ok(sock) => sock,
Err(e) => {
log::debug!(
"Request found to be invalid while processing AddPskBroker API request: {e:?}"
);
res.payload.status = add_psk_broker_response_status::INVALID_REQUEST;
return Ok(());
}
};
// Register Socket
let client = Box::new(MioBrokerClient::new(sock));
// Workaround: The broker code is currently impressively overcomplicated. Brokers are
// stored in a hash map but the hash map key used is just a counter so a vector could
// have been used. Broker configuration is abstracted, different peers can have different
// brokers but there is no facility to add multiple brokers in practice. The broker index
// uses a `Public` wrapper without actually holding any cryptographic data. Even the broker
// configuration uses a trait abstraction for no discernible reason and a lot of the code
// introduces pointless, single-field wrapper structs.
// We should use an implement-what-is-actually-needed strategy next time.
// The Broker code needs to be slimmed down, the right direction to go is probably to
// just add event and capability support to the API and use the API to deliver OSK events.
//
// For now, we just replace the latest broker.
let erase_ptr = {
use crate::app_server::BrokerStorePtr;
//
use rosenpass_secret_memory::Public;
use zerocopy::AsBytes;
(self.app_server().brokers.store.len() - 1)
.apply(|x| x as u64)
.apply(|x| Public::from_slice(x.as_bytes()))
.apply(BrokerStorePtr)
};
let register_result = run(|| {
let srv = self.app_server_mut();
srv.unregister_broker(erase_ptr)?;
srv.register_broker(client)
});
if let Err(e) = register_result {
log::warn!("Internal error while processing AddPskBroker API request: {e:?}");
res.payload.status = add_psk_broker_response_status::INTERNAL_ERROR;
return Ok(());
}
res.payload.status = add_psk_broker_response_status::OK;
Ok(())
}
}

View File

@@ -1,222 +0,0 @@
use zerocopy::{ByteSlice, Ref};
use rosenpass_util::zerocopy::{RefMaker, ZerocopySliceExt};
use super::{
PingRequest, PingResponse, RawMsgType, RefMakerRawMsgTypeExt, RequestMsgType, RequestRef,
ResponseMsgType, ResponseRef, SupplyKeypairRequest, SupplyKeypairResponse,
};
pub trait ByteSliceRefExt: ByteSlice {
fn msg_type_maker(self) -> RefMaker<Self, RawMsgType> {
self.zk_ref_maker()
}
fn msg_type(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse()
}
fn msg_type_from_prefix(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse_prefix()
}
fn msg_type_from_suffix(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse_suffix()
}
fn request_msg_type(self) -> anyhow::Result<RequestMsgType> {
self.msg_type_maker().parse_request_msg_type()
}
fn request_msg_type_from_prefix(self) -> anyhow::Result<RequestMsgType> {
self.msg_type_maker()
.from_prefix()?
.parse_request_msg_type()
}
fn request_msg_type_from_suffix(self) -> anyhow::Result<RequestMsgType> {
self.msg_type_maker()
.from_suffix()?
.parse_request_msg_type()
}
fn response_msg_type(self) -> anyhow::Result<ResponseMsgType> {
self.msg_type_maker().parse_response_msg_type()
}
fn response_msg_type_from_prefix(self) -> anyhow::Result<ResponseMsgType> {
self.msg_type_maker()
.from_prefix()?
.parse_response_msg_type()
}
fn response_msg_type_from_suffix(self) -> anyhow::Result<ResponseMsgType> {
self.msg_type_maker()
.from_suffix()?
.parse_response_msg_type()
}
fn parse_request(self) -> anyhow::Result<RequestRef<Self>> {
RequestRef::parse(self)
}
fn parse_request_from_prefix(self) -> anyhow::Result<RequestRef<Self>> {
RequestRef::parse_from_prefix(self)
}
fn parse_request_from_suffix(self) -> anyhow::Result<RequestRef<Self>> {
RequestRef::parse_from_suffix(self)
}
fn parse_response(self) -> anyhow::Result<ResponseRef<Self>> {
ResponseRef::parse(self)
}
fn parse_response_from_prefix(self) -> anyhow::Result<ResponseRef<Self>> {
ResponseRef::parse_from_prefix(self)
}
fn parse_response_from_suffix(self) -> anyhow::Result<ResponseRef<Self>> {
ResponseRef::parse_from_suffix(self)
}
fn ping_request_maker(self) -> RefMaker<Self, PingRequest> {
self.zk_ref_maker()
}
fn ping_request(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse()
}
fn ping_request_from_prefix(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse_prefix()
}
fn ping_request_from_suffix(self) -> anyhow::Result<Ref<Self, PingRequest>> {
self.zk_parse_suffix()
}
fn ping_response_maker(self) -> RefMaker<Self, PingResponse> {
self.zk_ref_maker()
}
fn ping_response(self) -> anyhow::Result<Ref<Self, PingResponse>> {
self.zk_parse()
}
fn ping_response_from_prefix(self) -> anyhow::Result<Ref<Self, PingResponse>> {
self.zk_parse_prefix()
}
fn ping_response_from_suffix(self) -> anyhow::Result<Ref<Self, PingResponse>> {
self.zk_parse_suffix()
}
fn supply_keypair_request(self) -> anyhow::Result<Ref<Self, SupplyKeypairRequest>> {
self.zk_parse()
}
fn supply_keypair_request_from_prefix(self) -> anyhow::Result<Ref<Self, SupplyKeypairRequest>> {
self.zk_parse_prefix()
}
fn supply_keypair_request_from_suffix(self) -> anyhow::Result<Ref<Self, SupplyKeypairRequest>> {
self.zk_parse_suffix()
}
fn supply_keypair_response_maker(self) -> RefMaker<Self, SupplyKeypairResponse> {
self.zk_ref_maker()
}
fn supply_keypair_response(self) -> anyhow::Result<Ref<Self, SupplyKeypairResponse>> {
self.zk_parse()
}
fn supply_keypair_response_from_prefix(
self,
) -> anyhow::Result<Ref<Self, SupplyKeypairResponse>> {
self.zk_parse_prefix()
}
fn supply_keypair_response_from_suffix(
self,
) -> anyhow::Result<Ref<Self, SupplyKeypairResponse>> {
self.zk_parse_suffix()
}
fn add_listen_socket_request(self) -> anyhow::Result<Ref<Self, super::AddListenSocketRequest>> {
self.zk_parse()
}
fn add_listen_socket_request_from_prefix(
self,
) -> anyhow::Result<Ref<Self, super::AddListenSocketRequest>> {
self.zk_parse_prefix()
}
fn add_listen_socket_request_from_suffix(
self,
) -> anyhow::Result<Ref<Self, super::AddListenSocketRequest>> {
self.zk_parse_suffix()
}
fn add_listen_socket_response_maker(self) -> RefMaker<Self, super::AddListenSocketResponse> {
self.zk_ref_maker()
}
fn add_listen_socket_response(
self,
) -> anyhow::Result<Ref<Self, super::AddListenSocketResponse>> {
self.zk_parse()
}
fn add_listen_socket_response_from_prefix(
self,
) -> anyhow::Result<Ref<Self, super::AddListenSocketResponse>> {
self.zk_parse_prefix()
}
fn add_listen_socket_response_from_suffix(
self,
) -> anyhow::Result<Ref<Self, super::AddListenSocketResponse>> {
self.zk_parse_suffix()
}
fn add_psk_broker_request(self) -> anyhow::Result<Ref<Self, super::AddPskBrokerRequest>> {
self.zk_parse()
}
fn add_psk_broker_request_from_prefix(
self,
) -> anyhow::Result<Ref<Self, super::AddPskBrokerRequest>> {
self.zk_parse_prefix()
}
fn add_psk_broker_request_from_suffix(
self,
) -> anyhow::Result<Ref<Self, super::AddPskBrokerRequest>> {
self.zk_parse_suffix()
}
fn add_psk_broker_response_maker(self) -> RefMaker<Self, super::AddPskBrokerResponse> {
self.zk_ref_maker()
}
fn add_psk_broker_response(self) -> anyhow::Result<Ref<Self, super::AddPskBrokerResponse>> {
self.zk_parse()
}
fn add_psk_broker_response_from_prefix(
self,
) -> anyhow::Result<Ref<Self, super::AddPskBrokerResponse>> {
self.zk_parse_prefix()
}
fn add_psk_broker_response_from_suffix(
self,
) -> anyhow::Result<Ref<Self, super::AddPskBrokerResponse>> {
self.zk_parse_suffix()
}
}
impl<B: ByteSlice> ByteSliceRefExt for B {}

View File

@@ -1,29 +0,0 @@
use zerocopy::{ByteSliceMut, Ref};
use rosenpass_util::zerocopy::RefMaker;
use super::RawMsgType;
pub trait Message {
type Payload;
type MessageClass: Into<RawMsgType>;
const MESSAGE_TYPE: Self::MessageClass;
fn from_payload(payload: Self::Payload) -> Self;
fn init(&mut self);
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>>;
}
pub trait ZerocopyResponseMakerSetupMessageExt<B, T> {
fn setup_msg(self) -> anyhow::Result<Ref<B, T>>;
}
impl<B, T> ZerocopyResponseMakerSetupMessageExt<B, T> for RefMaker<B, T>
where
B: ByteSliceMut,
T: Message,
{
fn setup_msg(self) -> anyhow::Result<Ref<B, T>> {
T::setup(self.into_buf())
}
}

View File

@@ -1,162 +0,0 @@
use hex_literal::hex;
use rosenpass_util::zerocopy::RefMaker;
use zerocopy::ByteSlice;
use crate::RosenpassError::{self, InvalidApiMessageType};
pub type RawMsgType = u128;
// constants generated by gen-ipc-msg-types:
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Ping Request
pub const PING_REQUEST: RawMsgType =
RawMsgType::from_le_bytes(hex!("2397 3ecc c441 704d 0b02 ea31 45d3 4999"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Ping Response
pub const PING_RESPONSE: RawMsgType =
RawMsgType::from_le_bytes(hex!("4ec7 f6f0 2bbc ba64 48f1 da14 c7cf 0260"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Supply Keypair Request
const SUPPLY_KEYPAIR_REQUEST: RawMsgType =
RawMsgType::from_le_bytes(hex!("ac91 a5a6 4f4b 21d0 ac7f 9b55 74f7 3529"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Supply Keypair Response
const SUPPLY_KEYPAIR_RESPONSE: RawMsgType =
RawMsgType::from_le_bytes(hex!("f2dc 49bd e261 5f10 40b7 3c16 ec61 edb9"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Add Listen Socket Request
const ADD_LISTEN_SOCKET_REQUEST: RawMsgType =
RawMsgType::from_le_bytes(hex!("3f21 434f 87cc a08c 02c4 61e4 0816 c7da"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Add Listen Socket Response
const ADD_LISTEN_SOCKET_RESPONSE: RawMsgType =
RawMsgType::from_le_bytes(hex!("45d5 0f0d 93f0 6105 98f2 9469 5dfd 5f36"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Add Psk Broker Request
const ADD_PSK_BROKER_REQUEST: RawMsgType =
RawMsgType::from_le_bytes(hex!("d798 b8dc bd61 5cab 8df1 c63d e4eb a2d1"));
// hash domain hash of: Rosenpass IPC API -> Rosenpass Protocol Server -> Add Psk Broker Response
const ADD_PSK_BROKER_RESPONSE: RawMsgType =
RawMsgType::from_le_bytes(hex!("bd25 e418 ffb0 6930 248b 217e 2fae e353"));
pub trait MessageAttributes {
fn message_size(&self) -> usize;
}
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
pub enum RequestMsgType {
Ping,
SupplyKeypair,
AddListenSocket,
AddPskBroker,
}
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
pub enum ResponseMsgType {
Ping,
SupplyKeypair,
AddListenSocket,
AddPskBroker,
}
impl MessageAttributes for RequestMsgType {
fn message_size(&self) -> usize {
match self {
Self::Ping => std::mem::size_of::<super::PingRequest>(),
Self::SupplyKeypair => std::mem::size_of::<super::SupplyKeypairRequest>(),
Self::AddListenSocket => std::mem::size_of::<super::AddListenSocketRequest>(),
Self::AddPskBroker => std::mem::size_of::<super::AddPskBrokerRequest>(),
}
}
}
impl MessageAttributes for ResponseMsgType {
fn message_size(&self) -> usize {
match self {
Self::Ping => std::mem::size_of::<super::PingResponse>(),
Self::SupplyKeypair => std::mem::size_of::<super::SupplyKeypairResponse>(),
Self::AddListenSocket => std::mem::size_of::<super::AddListenSocketResponse>(),
Self::AddPskBroker => std::mem::size_of::<super::AddPskBrokerResponse>(),
}
}
}
impl TryFrom<RawMsgType> for RequestMsgType {
type Error = RosenpassError;
fn try_from(value: RawMsgType) -> Result<Self, Self::Error> {
use RequestMsgType as E;
Ok(match value {
self::PING_REQUEST => E::Ping,
self::SUPPLY_KEYPAIR_REQUEST => E::SupplyKeypair,
self::ADD_LISTEN_SOCKET_REQUEST => E::AddListenSocket,
self::ADD_PSK_BROKER_REQUEST => E::AddPskBroker,
_ => return Err(InvalidApiMessageType(value)),
})
}
}
impl From<RequestMsgType> for RawMsgType {
fn from(val: RequestMsgType) -> Self {
use RequestMsgType as E;
match val {
E::Ping => self::PING_REQUEST,
E::SupplyKeypair => self::SUPPLY_KEYPAIR_REQUEST,
E::AddListenSocket => self::ADD_LISTEN_SOCKET_REQUEST,
E::AddPskBroker => self::ADD_PSK_BROKER_REQUEST,
}
}
}
impl TryFrom<RawMsgType> for ResponseMsgType {
type Error = RosenpassError;
fn try_from(value: RawMsgType) -> Result<Self, Self::Error> {
use ResponseMsgType as E;
Ok(match value {
self::PING_RESPONSE => E::Ping,
self::SUPPLY_KEYPAIR_RESPONSE => E::SupplyKeypair,
self::ADD_LISTEN_SOCKET_RESPONSE => E::AddListenSocket,
self::ADD_PSK_BROKER_RESPONSE => E::AddPskBroker,
_ => return Err(InvalidApiMessageType(value)),
})
}
}
impl From<ResponseMsgType> for RawMsgType {
fn from(val: ResponseMsgType) -> Self {
use ResponseMsgType as E;
match val {
E::Ping => self::PING_RESPONSE,
E::SupplyKeypair => self::SUPPLY_KEYPAIR_RESPONSE,
E::AddListenSocket => self::ADD_LISTEN_SOCKET_RESPONSE,
E::AddPskBroker => self::ADD_PSK_BROKER_RESPONSE,
}
}
}
pub trait RawMsgTypeExt {
fn into_request_msg_type(self) -> Result<RequestMsgType, RosenpassError>;
fn into_response_msg_type(self) -> Result<ResponseMsgType, RosenpassError>;
}
impl RawMsgTypeExt for RawMsgType {
fn into_request_msg_type(self) -> Result<RequestMsgType, RosenpassError> {
self.try_into()
}
fn into_response_msg_type(self) -> Result<ResponseMsgType, RosenpassError> {
self.try_into()
}
}
pub trait RefMakerRawMsgTypeExt {
fn parse_request_msg_type(self) -> anyhow::Result<RequestMsgType>;
fn parse_response_msg_type(self) -> anyhow::Result<ResponseMsgType>;
}
impl<B: ByteSlice> RefMakerRawMsgTypeExt for RefMaker<B, RawMsgType> {
fn parse_request_msg_type(self) -> anyhow::Result<RequestMsgType> {
Ok(self.parse()?.read().try_into()?)
}
fn parse_response_msg_type(self) -> anyhow::Result<ResponseMsgType> {
Ok(self.parse()?.read().try_into()?)
}
}

View File

@@ -1,17 +0,0 @@
mod byte_slice_ext;
mod message_trait;
mod message_type;
mod payload;
mod request_ref;
mod request_response;
mod response_ref;
mod server;
pub use byte_slice_ext::*;
pub use message_trait::*;
pub use message_type::*;
pub use payload::*;
pub use request_ref::*;
pub use request_response::*;
pub use response_ref::*;
pub use server::*;

View File

@@ -1,353 +0,0 @@
use rosenpass_util::zerocopy::ZerocopyMutSliceExt;
use zerocopy::{AsBytes, ByteSliceMut, FromBytes, FromZeroes, Ref};
use super::{Message, RawMsgType, RequestMsgType, ResponseMsgType};
/// Size required to fit any message in binary form
pub const MAX_REQUEST_LEN: usize = 2500; // TODO fix this
pub const MAX_RESPONSE_LEN: usize = 2500; // TODO fix this
pub const MAX_REQUEST_FDS: usize = 2;
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct Envelope<M: AsBytes + FromBytes> {
/// Which message this is
pub msg_type: RawMsgType,
/// The actual Paylod
pub payload: M,
}
pub type RequestEnvelope<M> = Envelope<M>;
pub type ResponseEnvelope<M> = Envelope<M>;
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct PingRequestPayload {
/// Randomly generated connection id
pub echo: [u8; 256],
}
pub type PingRequest = RequestEnvelope<PingRequestPayload>;
impl PingRequest {
pub fn new(echo: [u8; 256]) -> Self {
Self::from_payload(PingRequestPayload { echo })
}
}
impl Message for PingRequest {
type Payload = PingRequestPayload;
type MessageClass = RequestMsgType;
const MESSAGE_TYPE: Self::MessageClass = RequestMsgType::Ping;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct PingResponsePayload {
/// Randomly generated connection id
pub echo: [u8; 256],
}
pub type PingResponse = ResponseEnvelope<PingResponsePayload>;
impl PingResponse {
pub fn new(echo: [u8; 256]) -> Self {
Self::from_payload(PingResponsePayload { echo })
}
}
impl Message for PingResponse {
type Payload = PingResponsePayload;
type MessageClass = ResponseMsgType;
const MESSAGE_TYPE: Self::MessageClass = ResponseMsgType::Ping;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct SupplyKeypairRequestPayload {}
pub type SupplyKeypairRequest = RequestEnvelope<SupplyKeypairRequestPayload>;
impl Default for SupplyKeypairRequest {
fn default() -> Self {
Self::new()
}
}
impl SupplyKeypairRequest {
pub fn new() -> Self {
Self::from_payload(SupplyKeypairRequestPayload {})
}
}
impl Message for SupplyKeypairRequest {
type Payload = SupplyKeypairRequestPayload;
type MessageClass = RequestMsgType;
const MESSAGE_TYPE: Self::MessageClass = RequestMsgType::SupplyKeypair;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
pub mod supply_keypair_response_status {
pub const OK: u128 = 0;
pub const KEYPAIR_ALREADY_SUPPLIED: u128 = 1;
// TODO: This is not actually part of the API. Remove.
pub const INTERNAL_ERROR: u128 = 2;
pub const INVALID_REQUEST: u128 = 3;
/// TODO: Deprectaed, remove
pub const IO_ERROR: u128 = 4;
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct SupplyKeypairResponsePayload {
pub status: u128,
}
pub type SupplyKeypairResponse = ResponseEnvelope<SupplyKeypairResponsePayload>;
impl SupplyKeypairResponse {
pub fn new(status: u128) -> Self {
Self::from_payload(SupplyKeypairResponsePayload { status })
}
}
impl Message for SupplyKeypairResponse {
type Payload = SupplyKeypairResponsePayload;
type MessageClass = ResponseMsgType;
const MESSAGE_TYPE: Self::MessageClass = ResponseMsgType::SupplyKeypair;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct AddListenSocketRequestPayload {}
pub type AddListenSocketRequest = RequestEnvelope<AddListenSocketRequestPayload>;
impl Default for AddListenSocketRequest {
fn default() -> Self {
Self::new()
}
}
impl AddListenSocketRequest {
pub fn new() -> Self {
Self::from_payload(AddListenSocketRequestPayload {})
}
}
impl Message for AddListenSocketRequest {
type Payload = AddListenSocketRequestPayload;
type MessageClass = RequestMsgType;
const MESSAGE_TYPE: Self::MessageClass = RequestMsgType::AddListenSocket;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
pub mod add_listen_socket_response_status {
pub const OK: u128 = 0;
pub const INVALID_REQUEST: u128 = 1;
pub const INTERNAL_ERROR: u128 = 2;
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct AddListenSocketResponsePayload {
pub status: u128,
}
pub type AddListenSocketResponse = ResponseEnvelope<AddListenSocketResponsePayload>;
impl AddListenSocketResponse {
pub fn new(status: u128) -> Self {
Self::from_payload(AddListenSocketResponsePayload { status })
}
}
impl Message for AddListenSocketResponse {
type Payload = AddListenSocketResponsePayload;
type MessageClass = ResponseMsgType;
const MESSAGE_TYPE: Self::MessageClass = ResponseMsgType::AddListenSocket;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct AddPskBrokerRequestPayload {}
pub type AddPskBrokerRequest = RequestEnvelope<AddPskBrokerRequestPayload>;
impl Default for AddPskBrokerRequest {
fn default() -> Self {
Self::new()
}
}
impl AddPskBrokerRequest {
pub fn new() -> Self {
Self::from_payload(AddPskBrokerRequestPayload {})
}
}
impl Message for AddPskBrokerRequest {
type Payload = AddPskBrokerRequestPayload;
type MessageClass = RequestMsgType;
const MESSAGE_TYPE: Self::MessageClass = RequestMsgType::AddPskBroker;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}
pub mod add_psk_broker_response_status {
pub const OK: u128 = 0;
pub const INVALID_REQUEST: u128 = 1;
pub const INTERNAL_ERROR: u128 = 2;
}
#[repr(packed)]
#[derive(Debug, Copy, Clone, Hash, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
pub struct AddPskBrokerResponsePayload {
pub status: u128,
}
pub type AddPskBrokerResponse = ResponseEnvelope<AddPskBrokerResponsePayload>;
impl AddPskBrokerResponse {
pub fn new(status: u128) -> Self {
Self::from_payload(AddPskBrokerResponsePayload { status })
}
}
impl Message for AddPskBrokerResponse {
type Payload = AddPskBrokerResponsePayload;
type MessageClass = ResponseMsgType;
const MESSAGE_TYPE: Self::MessageClass = ResponseMsgType::AddPskBroker;
fn from_payload(payload: Self::Payload) -> Self {
Self {
msg_type: Self::MESSAGE_TYPE.into(),
payload,
}
}
fn setup<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self>> {
let mut r: Ref<B, Self> = buf.zk_zeroized()?;
r.init();
Ok(r)
}
fn init(&mut self) {
self.msg_type = Self::MESSAGE_TYPE.into();
}
}

View File

@@ -1,146 +0,0 @@
use anyhow::ensure;
use zerocopy::{ByteSlice, ByteSliceMut, Ref};
use super::{ByteSliceRefExt, MessageAttributes, PingRequest, RequestMsgType};
struct RequestRefMaker<B> {
buf: B,
msg_type: RequestMsgType,
}
impl<B: ByteSlice> RequestRef<B> {
pub fn parse(buf: B) -> anyhow::Result<Self> {
RequestRefMaker::new(buf)?.parse()
}
pub fn parse_from_prefix(buf: B) -> anyhow::Result<Self> {
RequestRefMaker::new(buf)?.from_prefix()?.parse()
}
pub fn parse_from_suffix(buf: B) -> anyhow::Result<Self> {
RequestRefMaker::new(buf)?.from_suffix()?.parse()
}
pub fn message_type(&self) -> RequestMsgType {
match self {
Self::Ping(_) => RequestMsgType::Ping,
Self::SupplyKeypair(_) => RequestMsgType::SupplyKeypair,
Self::AddListenSocket(_) => RequestMsgType::AddListenSocket,
Self::AddPskBroker(_) => RequestMsgType::AddPskBroker,
}
}
}
impl<B> From<Ref<B, PingRequest>> for RequestRef<B> {
fn from(v: Ref<B, PingRequest>) -> Self {
Self::Ping(v)
}
}
impl<B> From<Ref<B, super::SupplyKeypairRequest>> for RequestRef<B> {
fn from(v: Ref<B, super::SupplyKeypairRequest>) -> Self {
Self::SupplyKeypair(v)
}
}
impl<B> From<Ref<B, super::AddListenSocketRequest>> for RequestRef<B> {
fn from(v: Ref<B, super::AddListenSocketRequest>) -> Self {
Self::AddListenSocket(v)
}
}
impl<B> From<Ref<B, super::AddPskBrokerRequest>> for RequestRef<B> {
fn from(v: Ref<B, super::AddPskBrokerRequest>) -> Self {
Self::AddPskBroker(v)
}
}
impl<B: ByteSlice> RequestRefMaker<B> {
fn new(buf: B) -> anyhow::Result<Self> {
let msg_type = buf.deref().request_msg_type_from_prefix()?;
Ok(Self { buf, msg_type })
}
fn target_size(&self) -> usize {
self.msg_type.message_size()
}
fn parse(self) -> anyhow::Result<RequestRef<B>> {
Ok(match self.msg_type {
RequestMsgType::Ping => RequestRef::Ping(self.buf.ping_request()?),
RequestMsgType::SupplyKeypair => {
RequestRef::SupplyKeypair(self.buf.supply_keypair_request()?)
}
RequestMsgType::AddListenSocket => {
RequestRef::AddListenSocket(self.buf.add_listen_socket_request()?)
}
RequestMsgType::AddPskBroker => {
RequestRef::AddPskBroker(self.buf.add_psk_broker_request()?)
}
})
}
#[allow(clippy::wrong_self_convention)]
fn from_prefix(self) -> anyhow::Result<Self> {
self.ensure_fit()?;
let point = self.target_size();
let Self { buf, msg_type } = self;
let (buf, _) = buf.split_at(point);
Ok(Self { buf, msg_type })
}
#[allow(clippy::wrong_self_convention)]
fn from_suffix(self) -> anyhow::Result<Self> {
self.ensure_fit()?;
let point = self.buf.len() - self.target_size();
let Self { buf, msg_type } = self;
let (buf, _) = buf.split_at(point);
Ok(Self { buf, msg_type })
}
pub fn ensure_fit(&self) -> anyhow::Result<()> {
let have = self.buf.len();
let need = self.target_size();
ensure!(
need <= have,
"Buffer is undersized at {have} bytes (need {need} bytes)!"
);
Ok(())
}
}
pub enum RequestRef<B> {
Ping(Ref<B, PingRequest>),
SupplyKeypair(Ref<B, super::SupplyKeypairRequest>),
AddListenSocket(Ref<B, super::AddListenSocketRequest>),
AddPskBroker(Ref<B, super::AddPskBrokerRequest>),
}
impl<B> RequestRef<B>
where
B: ByteSlice,
{
pub fn bytes(&self) -> &[u8] {
match self {
Self::Ping(r) => r.bytes(),
Self::SupplyKeypair(r) => r.bytes(),
Self::AddListenSocket(r) => r.bytes(),
Self::AddPskBroker(r) => r.bytes(),
}
}
}
impl<B> RequestRef<B>
where
B: ByteSliceMut,
{
pub fn bytes_mut(&mut self) -> &[u8] {
match self {
Self::Ping(r) => r.bytes_mut(),
Self::SupplyKeypair(r) => r.bytes_mut(),
Self::AddListenSocket(r) => r.bytes_mut(),
Self::AddPskBroker(r) => r.bytes_mut(),
}
}
}

View File

@@ -1,190 +0,0 @@
use rosenpass_util::zerocopy::{
RefMaker, ZerocopyEmancipateExt, ZerocopyEmancipateMutExt, ZerocopySliceExt,
};
use zerocopy::{ByteSlice, ByteSliceMut, Ref};
use super::{Message, PingRequest, PingResponse};
use super::{RequestRef, ResponseRef, ZerocopyResponseMakerSetupMessageExt};
pub trait RequestMsg: Sized + Message {
type ResponseMsg: ResponseMsg;
fn zk_response_maker<B: ByteSlice>(buf: B) -> RefMaker<B, Self::ResponseMsg> {
buf.zk_ref_maker()
}
fn setup_response<B: ByteSliceMut>(buf: B) -> anyhow::Result<Ref<B, Self::ResponseMsg>> {
Self::zk_response_maker(buf).setup_msg()
}
fn setup_response_from_prefix<B: ByteSliceMut>(
buf: B,
) -> anyhow::Result<Ref<B, Self::ResponseMsg>> {
Self::zk_response_maker(buf).from_prefix()?.setup_msg()
}
fn setup_response_from_suffix<B: ByteSliceMut>(
buf: B,
) -> anyhow::Result<Ref<B, Self::ResponseMsg>> {
Self::zk_response_maker(buf).from_prefix()?.setup_msg()
}
}
pub trait ResponseMsg: Message {
type RequestMsg: RequestMsg;
}
impl RequestMsg for PingRequest {
type ResponseMsg = PingResponse;
}
impl ResponseMsg for PingResponse {
type RequestMsg = PingRequest;
}
impl RequestMsg for super::SupplyKeypairRequest {
type ResponseMsg = super::SupplyKeypairResponse;
}
impl ResponseMsg for super::SupplyKeypairResponse {
type RequestMsg = super::SupplyKeypairRequest;
}
impl RequestMsg for super::AddListenSocketRequest {
type ResponseMsg = super::AddListenSocketResponse;
}
impl ResponseMsg for super::AddListenSocketResponse {
type RequestMsg = super::AddListenSocketRequest;
}
impl RequestMsg for super::AddPskBrokerRequest {
type ResponseMsg = super::AddPskBrokerResponse;
}
impl ResponseMsg for super::AddPskBrokerResponse {
type RequestMsg = super::AddPskBrokerRequest;
}
pub type PingPair<B1, B2> = (Ref<B1, PingRequest>, Ref<B2, PingResponse>);
pub type SupplyKeypairPair<B1, B2> = (
Ref<B1, super::SupplyKeypairRequest>,
Ref<B2, super::SupplyKeypairResponse>,
);
pub type AddListenSocketPair<B1, B2> = (
Ref<B1, super::AddListenSocketRequest>,
Ref<B2, super::AddListenSocketResponse>,
);
pub type AddPskBrokerPair<B1, B2> = (
Ref<B1, super::AddPskBrokerRequest>,
Ref<B2, super::AddPskBrokerResponse>,
);
pub enum RequestResponsePair<B1, B2> {
Ping(PingPair<B1, B2>),
SupplyKeypair(SupplyKeypairPair<B1, B2>),
AddListenSocket(AddListenSocketPair<B1, B2>),
AddPskBroker(AddPskBrokerPair<B1, B2>),
}
impl<B1, B2> From<PingPair<B1, B2>> for RequestResponsePair<B1, B2> {
fn from(v: PingPair<B1, B2>) -> Self {
RequestResponsePair::Ping(v)
}
}
impl<B1, B2> From<SupplyKeypairPair<B1, B2>> for RequestResponsePair<B1, B2> {
fn from(v: SupplyKeypairPair<B1, B2>) -> Self {
RequestResponsePair::SupplyKeypair(v)
}
}
impl<B1, B2> From<AddListenSocketPair<B1, B2>> for RequestResponsePair<B1, B2> {
fn from(v: AddListenSocketPair<B1, B2>) -> Self {
RequestResponsePair::AddListenSocket(v)
}
}
impl<B1, B2> From<AddPskBrokerPair<B1, B2>> for RequestResponsePair<B1, B2> {
fn from(v: AddPskBrokerPair<B1, B2>) -> Self {
RequestResponsePair::AddPskBroker(v)
}
}
impl<B1, B2> RequestResponsePair<B1, B2>
where
B1: ByteSlice,
B2: ByteSlice,
{
pub fn both(&self) -> (RequestRef<&[u8]>, ResponseRef<&[u8]>) {
match self {
Self::Ping((req, res)) => {
let req = RequestRef::Ping(req.emancipate());
let res = ResponseRef::Ping(res.emancipate());
(req, res)
}
Self::SupplyKeypair((req, res)) => {
let req = RequestRef::SupplyKeypair(req.emancipate());
let res = ResponseRef::SupplyKeypair(res.emancipate());
(req, res)
}
Self::AddListenSocket((req, res)) => {
let req = RequestRef::AddListenSocket(req.emancipate());
let res = ResponseRef::AddListenSocket(res.emancipate());
(req, res)
}
Self::AddPskBroker((req, res)) => {
let req = RequestRef::AddPskBroker(req.emancipate());
let res = ResponseRef::AddPskBroker(res.emancipate());
(req, res)
}
}
}
pub fn request(&self) -> RequestRef<&[u8]> {
self.both().0
}
pub fn response(&self) -> ResponseRef<&[u8]> {
self.both().1
}
}
impl<B1, B2> RequestResponsePair<B1, B2>
where
B1: ByteSliceMut,
B2: ByteSliceMut,
{
pub fn both_mut(&mut self) -> (RequestRef<&mut [u8]>, ResponseRef<&mut [u8]>) {
match self {
Self::Ping((req, res)) => {
let req = RequestRef::Ping(req.emancipate_mut());
let res = ResponseRef::Ping(res.emancipate_mut());
(req, res)
}
Self::SupplyKeypair((req, res)) => {
let req = RequestRef::SupplyKeypair(req.emancipate_mut());
let res = ResponseRef::SupplyKeypair(res.emancipate_mut());
(req, res)
}
Self::AddListenSocket((req, res)) => {
let req = RequestRef::AddListenSocket(req.emancipate_mut());
let res = ResponseRef::AddListenSocket(res.emancipate_mut());
(req, res)
}
Self::AddPskBroker((req, res)) => {
let req = RequestRef::AddPskBroker(req.emancipate_mut());
let res = ResponseRef::AddPskBroker(res.emancipate_mut());
(req, res)
}
}
}
pub fn request_mut(&mut self) -> RequestRef<&mut [u8]> {
self.both_mut().0
}
pub fn response_mut(&mut self) -> ResponseRef<&mut [u8]> {
self.both_mut().1
}
}

View File

@@ -1,147 +0,0 @@
// TODO: This is copied verbatim from ResponseRef…not pretty
use anyhow::ensure;
use zerocopy::{ByteSlice, ByteSliceMut, Ref};
use super::{ByteSliceRefExt, MessageAttributes, PingResponse, ResponseMsgType};
struct ResponseRefMaker<B> {
buf: B,
msg_type: ResponseMsgType,
}
impl<B: ByteSlice> ResponseRef<B> {
pub fn parse(buf: B) -> anyhow::Result<Self> {
ResponseRefMaker::new(buf)?.parse()
}
pub fn parse_from_prefix(buf: B) -> anyhow::Result<Self> {
ResponseRefMaker::new(buf)?.from_prefix()?.parse()
}
pub fn parse_from_suffix(buf: B) -> anyhow::Result<Self> {
ResponseRefMaker::new(buf)?.from_suffix()?.parse()
}
pub fn message_type(&self) -> ResponseMsgType {
match self {
Self::Ping(_) => ResponseMsgType::Ping,
Self::SupplyKeypair(_) => ResponseMsgType::SupplyKeypair,
Self::AddListenSocket(_) => ResponseMsgType::AddListenSocket,
Self::AddPskBroker(_) => ResponseMsgType::AddPskBroker,
}
}
}
impl<B> From<Ref<B, PingResponse>> for ResponseRef<B> {
fn from(v: Ref<B, PingResponse>) -> Self {
Self::Ping(v)
}
}
impl<B> From<Ref<B, super::SupplyKeypairResponse>> for ResponseRef<B> {
fn from(v: Ref<B, super::SupplyKeypairResponse>) -> Self {
Self::SupplyKeypair(v)
}
}
impl<B> From<Ref<B, super::AddListenSocketResponse>> for ResponseRef<B> {
fn from(v: Ref<B, super::AddListenSocketResponse>) -> Self {
Self::AddListenSocket(v)
}
}
impl<B> From<Ref<B, super::AddPskBrokerResponse>> for ResponseRef<B> {
fn from(v: Ref<B, super::AddPskBrokerResponse>) -> Self {
Self::AddPskBroker(v)
}
}
impl<B: ByteSlice> ResponseRefMaker<B> {
fn new(buf: B) -> anyhow::Result<Self> {
let msg_type = buf.deref().response_msg_type_from_prefix()?;
Ok(Self { buf, msg_type })
}
fn target_size(&self) -> usize {
self.msg_type.message_size()
}
fn parse(self) -> anyhow::Result<ResponseRef<B>> {
Ok(match self.msg_type {
ResponseMsgType::Ping => ResponseRef::Ping(self.buf.ping_response()?),
ResponseMsgType::SupplyKeypair => {
ResponseRef::SupplyKeypair(self.buf.supply_keypair_response()?)
}
ResponseMsgType::AddListenSocket => {
ResponseRef::AddListenSocket(self.buf.add_listen_socket_response()?)
}
ResponseMsgType::AddPskBroker => {
ResponseRef::AddPskBroker(self.buf.add_psk_broker_response()?)
}
})
}
#[allow(clippy::wrong_self_convention)]
fn from_prefix(self) -> anyhow::Result<Self> {
self.ensure_fit()?;
let point = self.target_size();
let Self { buf, msg_type } = self;
let (buf, _) = buf.split_at(point);
Ok(Self { buf, msg_type })
}
#[allow(clippy::wrong_self_convention)]
fn from_suffix(self) -> anyhow::Result<Self> {
self.ensure_fit()?;
let point = self.buf.len() - self.target_size();
let Self { buf, msg_type } = self;
let (buf, _) = buf.split_at(point);
Ok(Self { buf, msg_type })
}
pub fn ensure_fit(&self) -> anyhow::Result<()> {
let have = self.buf.len();
let need = self.target_size();
ensure!(
need <= have,
"Buffer is undersized at {have} bytes (need {need} bytes)!"
);
Ok(())
}
}
pub enum ResponseRef<B> {
Ping(Ref<B, PingResponse>),
SupplyKeypair(Ref<B, super::SupplyKeypairResponse>),
AddListenSocket(Ref<B, super::AddListenSocketResponse>),
AddPskBroker(Ref<B, super::AddPskBrokerResponse>),
}
impl<B> ResponseRef<B>
where
B: ByteSlice,
{
pub fn bytes(&self) -> &[u8] {
match self {
Self::Ping(r) => r.bytes(),
Self::SupplyKeypair(r) => r.bytes(),
Self::AddListenSocket(r) => r.bytes(),
Self::AddPskBroker(r) => r.bytes(),
}
}
}
impl<B> ResponseRef<B>
where
B: ByteSliceMut,
{
pub fn bytes_mut(&mut self) -> &[u8] {
match self {
Self::Ping(r) => r.bytes_mut(),
Self::SupplyKeypair(r) => r.bytes_mut(),
Self::AddListenSocket(r) => r.bytes_mut(),
Self::AddPskBroker(r) => r.bytes_mut(),
}
}
}

View File

@@ -1,159 +0,0 @@
use super::{ByteSliceRefExt, Message, PingRequest, PingResponse, RequestRef, RequestResponsePair};
use std::{collections::VecDeque, os::fd::OwnedFd};
use zerocopy::{ByteSlice, ByteSliceMut};
pub trait Server {
/// This implements the handler for the [crate::api::RequestMsgType::Ping] API message
///
/// It merely takes a buffer and returns that same buffer.
fn ping(
&mut self,
req: &PingRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut PingResponse,
) -> anyhow::Result<()>;
/// Supply the cryptographic server keypair through file descriptor passing in the API
///
/// This implements the handler for the [crate::api::RequestMsgType::SupplyKeypair] API message.
///
/// # File descriptors
///
/// 1. The secret key (size must match exactly); the file descriptor must be backed by either
/// of
/// - file-system file
/// - [memfd](https://man.archlinux.org/man/memfd.2.en)
/// - [memfd_secret](https://man.archlinux.org/man/memfd.2.en)
/// 2. The public key (size must match exactly); the file descriptor must be backed by either
/// of
/// - file-system file
/// - [memfd](https://man.archlinux.org/man/memfd.2.en)
/// - [memfd_secret](https://man.archlinux.org/man/memfd.2.en)
///
/// # API Return Status
///
/// 1. [crate::api::supply_keypair_response_status::OK] - Indicates success
/// 2. [crate::api::supply_keypair_response_status::KEYPAIR_ALREADY_SUPPLIED] The endpoint was used but
/// the server already has server keys
/// 3. [crate::api::supply_keypair_response_status::INVALID_REQUEST] Malformed request; could be:
/// - Missing file descriptors for public key
/// - File descriptors contain data of invalid length
/// - Invalid file descriptor type
///
/// # Description
///
/// At startup, if no server keys are specified in the rosenpass configuration, and if the API
/// is enabled, the Rosenpass process waits for server keys to be supplied to the API. Before
/// then, any messages for the rosenpass cryptographic protocol are ignored and dropped all
/// cryptographic operations require access to the server keys.
///
/// Both private and public keys are specified through file descriptors and both are read from
/// their respective file descriptors into process memory. A file descriptor based transport is
/// used because of the excessive size of Classic McEliece public keys (100kb and up).
///
/// The file descriptors for the keys need not be backed by a file on disk. You can supply a
/// [memfd](https://man.archlinux.org/man/memfd.2.en) or [memfd_secret](https://man.archlinux.org/man/memfd_secret.2.en)
/// backed file descriptor if the server keys are not backed by a file system file.
fn supply_keypair(
&mut self,
req: &super::SupplyKeypairRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::SupplyKeypairResponse,
) -> anyhow::Result<()>;
/// Supply a new UDP listen socket through file descriptor passing via the API
///
/// This implements the handler for the [crate::api::RequestMsgType::AddListenSocket] API message.
///
/// # File descriptors
///
/// 1. The listen socket; must be backed by a UDP network listen socket
///
/// # API Return Status
///
/// 1. [crate::api::add_listen_socket_response_status::OK] - Indicates success
/// 2. [add_listen_socket_response_status::INVALID_REQUEST] Malformed request; could be:
/// - Missing file descriptors for public key
/// - Invalid file descriptor type
/// 3. [crate::api::add_listen_socket_response_status::INTERNAL_ERROR] Some other, non-fatal error
/// occured. Check the logs on log
///
/// # Description
///
/// This endpoint allows you to supply a UDP listen socket; it will be used to perform
/// cryptographic key exchanges via the Rosenpass protocol.
fn add_listen_socket(
&mut self,
req: &super::AddListenSocketRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::AddListenSocketResponse,
) -> anyhow::Result<()>;
fn add_psk_broker(
&mut self,
req: &super::AddPskBrokerRequest,
req_fds: &mut VecDeque<OwnedFd>,
res: &mut super::AddPskBrokerResponse,
) -> anyhow::Result<()>;
fn dispatch<ReqBuf, ResBuf>(
&mut self,
p: &mut RequestResponsePair<ReqBuf, ResBuf>,
req_fds: &mut VecDeque<OwnedFd>,
) -> anyhow::Result<()>
where
ReqBuf: ByteSlice,
ResBuf: ByteSliceMut,
{
match p {
RequestResponsePair::Ping((req, res)) => self.ping(req, req_fds, res),
RequestResponsePair::SupplyKeypair((req, res)) => {
self.supply_keypair(req, req_fds, res)
}
RequestResponsePair::AddListenSocket((req, res)) => {
self.add_listen_socket(req, req_fds, res)
}
RequestResponsePair::AddPskBroker((req, res)) => self.add_psk_broker(req, req_fds, res),
}
}
fn handle_message<ReqBuf, ResBuf>(
&mut self,
req: ReqBuf,
req_fds: &mut VecDeque<OwnedFd>,
res: ResBuf,
) -> anyhow::Result<usize>
where
ReqBuf: ByteSlice,
ResBuf: ByteSliceMut,
{
let req = req.parse_request_from_prefix()?;
// TODO: This is not pretty; This match should be moved into RequestRef
let mut pair = match req {
RequestRef::Ping(req) => {
let mut res = res.ping_response_from_prefix()?;
res.init();
RequestResponsePair::Ping((req, res))
}
RequestRef::SupplyKeypair(req) => {
let mut res = res.supply_keypair_response_from_prefix()?;
res.init();
RequestResponsePair::SupplyKeypair((req, res))
}
RequestRef::AddListenSocket(req) => {
let mut res = res.add_listen_socket_response_from_prefix()?;
res.init();
RequestResponsePair::AddListenSocket((req, res))
}
RequestRef::AddPskBroker(req) => {
let mut res = res.add_psk_broker_response_from_prefix()?;
res.init();
RequestResponsePair::AddPskBroker((req, res))
}
};
self.dispatch(&mut pair, req_fds)?;
let res_len = pair.response().bytes().len();
Ok(res_len)
}
}

View File

@@ -1,40 +0,0 @@
use std::path::PathBuf;
use clap::Args;
use crate::config::Rosenpass as RosenpassConfig;
use super::config::ApiConfig;
#[cfg(feature = "experiment_api")]
#[derive(Args, Debug)]
pub struct ApiCli {
/// Where in the file-system to create the unix socket the rosenpass API will be listening for
/// connections on.
#[arg(long)]
api_listen_path: Vec<PathBuf>,
/// When rosenpass is called from another process, the other process can open and bind the
/// unix socket for the Rosenpass API to use themselves, passing it to this process. In Rust this can be achieved
/// using the [command-fds](https://docs.rs/command-fds/latest/command_fds/) crate.
#[arg(long)]
api_listen_fd: Vec<i32>,
/// When rosenpass is called from another process, the other process can connect the unix socket for the API
/// themselves, for instance using the `socketpair(2)` system call.
#[arg(long)]
api_stream_fd: Vec<i32>,
}
impl ApiCli {
pub fn apply_to_config(&self, cfg: &mut RosenpassConfig) -> anyhow::Result<()> {
self.apply_to_api_config(&mut cfg.api)
}
pub fn apply_to_api_config(&self, cfg: &mut ApiConfig) -> anyhow::Result<()> {
cfg.listen_path.extend_from_slice(&self.api_listen_path);
cfg.listen_fd.extend_from_slice(&self.api_listen_fd);
cfg.stream_fd.extend_from_slice(&self.api_stream_fd);
Ok(())
}
}

View File

@@ -1,49 +0,0 @@
use std::path::PathBuf;
use mio::net::UnixListener;
use rosenpass_util::mio::{UnixListenerExt, UnixStreamExt};
use serde::{Deserialize, Serialize};
use crate::app_server::AppServer;
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
pub struct ApiConfig {
/// Where in the file-system to create the unix socket the rosenpass API will be listening for
/// connections on
pub listen_path: Vec<PathBuf>,
/// When rosenpass is called from another process, the other process can open and bind the
/// unix socket for the Rosenpass API to use themselves, passing it to this process. In Rust this can be achieved
/// using the [command-fds](https://docs.rs/command-fds/latest/command_fds/) crate.
pub listen_fd: Vec<i32>,
/// When rosenpass is called from another process, the other process can connect the unix socket for the API
/// themselves, for instance using the `socketpair(2)` system call.
pub stream_fd: Vec<i32>,
}
impl ApiConfig {
pub fn apply_to_app_server(&self, srv: &mut AppServer) -> anyhow::Result<()> {
for path in self.listen_path.iter() {
srv.add_api_listener(UnixListener::bind(path)?)?;
}
for fd in self.listen_fd.iter() {
srv.add_api_listener(UnixListenerExt::claim_fd(*fd)?)?;
}
for fd in self.stream_fd.iter() {
srv.add_api_connection(UnixStreamExt::claim_fd(*fd)?)?;
}
Ok(())
}
pub fn count_api_sources(&self) -> usize {
self.listen_path.len() + self.listen_fd.len() + self.stream_fd.len()
}
pub fn has_api_sources(&self) -> bool {
self.count_api_sources() > 0
}
}

View File

@@ -1,321 +0,0 @@
use std::borrow::{Borrow, BorrowMut};
use std::collections::VecDeque;
use std::os::fd::OwnedFd;
use mio::net::UnixStream;
use rosenpass_secret_memory::Secret;
use rosenpass_util::mio::ReadWithFileDescriptors;
use rosenpass_util::{
io::{IoResultKindHintExt, TryIoResultKindHintExt},
length_prefix_encoding::{
decoder::{self as lpe_decoder, LengthPrefixDecoder},
encoder::{self as lpe_encoder, LengthPrefixEncoder},
},
mio::interest::RW as MIO_RW,
};
use zeroize::Zeroize;
use crate::api::MAX_REQUEST_FDS;
use crate::{api::Server, app_server::AppServer};
use super::super::{ApiHandler, ApiHandlerContext};
#[derive(Debug)]
struct SecretBuffer<const N: usize>(pub Secret<N>);
impl<const N: usize> SecretBuffer<N> {
fn new() -> Self {
Self(Secret::zero())
}
}
impl<const N: usize> Borrow<[u8]> for SecretBuffer<N> {
fn borrow(&self) -> &[u8] {
self.0.secret()
}
}
impl<const N: usize> BorrowMut<[u8]> for SecretBuffer<N> {
fn borrow_mut(&mut self) -> &mut [u8] {
self.0.secret_mut()
}
}
// TODO: Unfortunately, zerocopy is quite particular about alignment, hence the 4096
type ReadBuffer = LengthPrefixDecoder<SecretBuffer<4096>>;
type WriteBuffer = LengthPrefixEncoder<SecretBuffer<4096>>;
type ReadFdBuffer = VecDeque<OwnedFd>;
#[derive(Debug)]
struct MioConnectionBuffers {
read_buffer: ReadBuffer,
write_buffer: WriteBuffer,
read_fd_buffer: ReadFdBuffer,
}
#[derive(Debug)]
pub struct MioConnection {
io: UnixStream,
mio_token: mio::Token,
invalid_read: bool,
buffers: Option<MioConnectionBuffers>,
api_handler: ApiHandler,
}
impl MioConnection {
pub fn new(app_server: &mut AppServer, mut io: UnixStream) -> std::io::Result<Self> {
let mio_token = app_server.mio_token_dispenser.dispense();
app_server
.mio_poll
.registry()
.register(&mut io, mio_token, MIO_RW)?;
let invalid_read = false;
let read_buffer = LengthPrefixDecoder::new(SecretBuffer::new());
let write_buffer = LengthPrefixEncoder::from_buffer(SecretBuffer::new());
let read_fd_buffer = VecDeque::new();
let buffers = Some(MioConnectionBuffers {
read_buffer,
write_buffer,
read_fd_buffer,
});
let api_state = ApiHandler::new();
Ok(Self {
io,
mio_token,
invalid_read,
buffers,
api_handler: api_state,
})
}
pub fn should_close(&self) -> bool {
let exhausted = self
.buffers
.as_ref()
.map(|b| b.write_buffer.exhausted())
.unwrap_or(false);
self.invalid_read && exhausted
}
pub fn close(mut self, app_server: &mut AppServer) -> anyhow::Result<()> {
app_server.mio_poll.registry().deregister(&mut self.io)?;
Ok(())
}
pub fn mio_token(&self) -> mio::Token {
self.mio_token
}
}
pub trait MioConnectionContext {
fn mio_connection(&self) -> &MioConnection;
fn app_server(&self) -> &AppServer;
fn mio_connection_mut(&mut self) -> &mut MioConnection;
fn app_server_mut(&mut self) -> &mut AppServer;
fn poll(&mut self) -> anyhow::Result<()> {
macro_rules! short {
($e:expr) => {
match $e {
None => return Ok(()),
Some(()) => {}
}
};
}
// All of these functions return an error, None ("operation incomplete")
// or some ("operation complete, keep processing")
short!(self.flush_write_buffer()?); // Flush last message
short!(self.recv()?); // Receive new message
short!(self.handle_incoming_message()?); // Process new message with API
short!(self.flush_write_buffer()?); // Begin flushing response
Ok(())
}
fn handle_incoming_message(&mut self) -> anyhow::Result<Option<()>> {
self.with_buffers_stolen(|this, bufs| {
// Acquire request & response. Caller is responsible to make sure
// that read buffer holds a message and that write buffer is cleared.
// Hence the unwraps and assertions
assert!(bufs.write_buffer.exhausted());
let req = bufs.read_buffer.message().unwrap().unwrap();
let req_fds = &mut bufs.read_fd_buffer;
let res = bufs.write_buffer.buffer_bytes_mut();
// Call API handler
// Transitive trait implementations: MioConnectionContext -> ApiHandlerContext -> as ApiServer
let response_len = this.handle_message(req, req_fds, res)?;
bufs.write_buffer
.restart_write_with_new_message(response_len)?;
bufs.read_buffer.zeroize(); // clear for new message to read
bufs.read_fd_buffer.clear();
Ok(Some(()))
})
}
fn flush_write_buffer(&mut self) -> anyhow::Result<Option<()>> {
if self.write_buf_mut().exhausted() {
return Ok(Some(()));
}
use lpe_encoder::WriteToIoReturn as Ret;
use std::io::ErrorKind as K;
loop {
let conn = self.mio_connection_mut();
let bufs = conn.buffers.as_mut().unwrap();
let sock = &conn.io;
let write_buf = &mut bufs.write_buffer;
match write_buf.write_to_stdio(sock).io_err_kind_hint() {
// Done
Ok(Ret { done: true, .. }) => {
write_buf.zeroize(); // clear for new message to write
break Ok(Some(()));
}
// Would block
Ok(Ret {
bytes_written: 0, ..
}) => break Ok(None),
Err((_e, K::WouldBlock)) => break Ok(None),
// Just continue
Ok(_) => continue, /* Ret { bytes_written > 0, done = false } acc. to previous cases*/
Err((_e, K::Interrupted)) => continue,
// Other errors
Err((e, _ek)) => Err(e)?,
}
}
}
fn recv(&mut self) -> anyhow::Result<Option<()>> {
if !self.write_buf_mut().exhausted() || self.mio_connection().invalid_read {
return Ok(None);
}
use lpe_decoder::{ReadFromIoError as E, ReadFromIoReturn as Ret};
use std::io::ErrorKind as K;
loop {
let conn = self.mio_connection_mut();
let bufs = conn.buffers.as_mut().unwrap();
let read_buf = &mut bufs.read_buffer;
let read_fd_buf = &mut bufs.read_fd_buffer;
let sock = &conn.io;
let fd_passing_sock = ReadWithFileDescriptors::<MAX_REQUEST_FDS, UnixStream, _, _>::new(
sock,
read_fd_buf,
);
match read_buf
.read_from_stdio(fd_passing_sock)
.try_io_err_kind_hint()
{
// We actually received a proper message
// (Impl below match to appease borrow checker)
Ok(Ret {
message: Some(_msg),
..
}) => break Ok(Some(())),
// Message does not fit in buffer
Err((e @ E::MessageTooLargeError { .. }, _)) => {
log::warn!("Received message on API that was too big to fit in our buffers; \
looks like the client is broken. Stopping to process messages of the client.\n\
Error: {e:?}");
conn.invalid_read = true; // Closed mio_manager
break Ok(None);
}
// Would block
Ok(Ret { bytes_read: 0, .. }) => break Ok(None),
Err((_, Some(K::WouldBlock))) => break Ok(None),
// Just keep going
Ok(Ret { bytes_read: _, .. }) => continue,
Err((_, Some(K::Interrupted))) => continue,
// Other IO Error (just pass on to the caller)
Err((E::IoError(e), _)) => {
log::warn!(
"IO error while trying to read message from API socket. \
The connection is broken. Stopping to process messages of the client.\n\
Error: {e:?}"
);
conn.invalid_read = true; // closed later by mio_manager
break Err(e.into());
}
};
}
}
fn mio_token(&self) -> mio::Token {
self.mio_connection().mio_token()
}
fn should_close(&self) -> bool {
self.mio_connection().should_close()
}
}
trait MioConnectionContextPrivate: MioConnectionContext {
fn steal_buffers(&mut self) -> MioConnectionBuffers {
self.mio_connection_mut().buffers.take().unwrap()
}
fn return_buffers(&mut self, buffers: MioConnectionBuffers) {
let opt = &mut self.mio_connection_mut().buffers;
assert!(opt.is_none());
let _ = opt.insert(buffers);
}
fn with_buffers_stolen<R, F: FnOnce(&mut Self, &mut MioConnectionBuffers) -> R>(
&mut self,
f: F,
) -> R {
let mut bufs = self.steal_buffers();
let res = f(self, &mut bufs);
self.return_buffers(bufs);
res
}
fn write_buf_mut(&mut self) -> &mut WriteBuffer {
self.mio_connection_mut()
.buffers
.as_mut()
.unwrap()
.write_buffer
.borrow_mut()
}
}
impl<T> MioConnectionContextPrivate for T where T: ?Sized + MioConnectionContext {}
impl<T> ApiHandlerContext for T
where
T: ?Sized + MioConnectionContext,
{
fn api_handler(&self) -> &ApiHandler {
&self.mio_connection().api_handler
}
fn app_server(&self) -> &AppServer {
MioConnectionContext::app_server(self)
}
fn api_handler_mut(&mut self) -> &mut ApiHandler {
&mut self.mio_connection_mut().api_handler
}
fn app_server_mut(&mut self) -> &mut AppServer {
MioConnectionContext::app_server_mut(self)
}
}

View File

@@ -1,173 +0,0 @@
use std::{borrow::BorrowMut, io};
use mio::net::{UnixListener, UnixStream};
use rosenpass_util::{
functional::ApplyExt, io::nonblocking_handle_io_errors, mio::interest::RW as MIO_RW,
};
use crate::app_server::{AppServer, AppServerIoSource};
use super::{MioConnection, MioConnectionContext};
#[derive(Default, Debug)]
pub struct MioManager {
listeners: Vec<UnixListener>,
connections: Vec<Option<MioConnection>>,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum MioManagerIoSource {
Listener(usize),
Connection(usize),
}
impl MioManager {
pub fn new() -> Self {
Self::default()
}
}
struct MioConnectionFocus<'a, T: ?Sized + MioManagerContext> {
ctx: &'a mut T,
conn_idx: usize,
}
impl<'a, T: ?Sized + MioManagerContext> MioConnectionFocus<'a, T> {
fn new(ctx: &'a mut T, conn_idx: usize) -> Self {
Self { ctx, conn_idx }
}
}
pub trait MioManagerContext {
fn mio_manager(&self) -> &MioManager;
fn mio_manager_mut(&mut self) -> &mut MioManager;
fn app_server(&self) -> &AppServer;
fn app_server_mut(&mut self) -> &mut AppServer;
fn add_listener(&mut self, mut listener: UnixListener) -> io::Result<()> {
let srv = self.app_server_mut();
let mio_token = srv.mio_token_dispenser.dispense();
srv.mio_poll
.registry()
.register(&mut listener, mio_token, MIO_RW)?;
let io_source = self
.mio_manager()
.listeners
.len()
.apply(MioManagerIoSource::Listener)
.apply(AppServerIoSource::MioManager);
self.mio_manager_mut().listeners.push(listener);
self.app_server_mut()
.register_io_source(mio_token, io_source);
Ok(())
}
fn add_connection(&mut self, connection: UnixStream) -> io::Result<()> {
let connection = MioConnection::new(self.app_server_mut(), connection)?;
let mio_token = connection.mio_token();
let conns: &mut Vec<Option<MioConnection>> =
self.mio_manager_mut().connections.borrow_mut();
let idx = conns
.iter_mut()
.enumerate()
.find(|(_, slot)| slot.is_some())
.map(|(idx, _)| idx)
.unwrap_or(conns.len());
conns.insert(idx, Some(connection));
let io_source = idx
.apply(MioManagerIoSource::Listener)
.apply(AppServerIoSource::MioManager);
self.app_server_mut()
.register_io_source(mio_token, io_source);
Ok(())
}
fn poll_particular(&mut self, io_source: MioManagerIoSource) -> anyhow::Result<()> {
use MioManagerIoSource as S;
match io_source {
S::Listener(idx) => self.accept_from(idx)?,
S::Connection(idx) => self.poll_particular_connection(idx)?,
};
Ok(())
}
fn poll(&mut self) -> anyhow::Result<()> {
self.accept_connections()?;
self.poll_connections()?;
Ok(())
}
fn accept_connections(&mut self) -> io::Result<()> {
for idx in 0..self.mio_manager_mut().listeners.len() {
self.accept_from(idx)?;
}
Ok(())
}
fn accept_from(&mut self, idx: usize) -> io::Result<()> {
// Accept connection until the socket would block or returns another error
// TODO: This currently only adds connections--we eventually need the ability to remove
// them as well, see the note in connection.rs
loop {
match nonblocking_handle_io_errors(|| self.mio_manager().listeners[idx].accept())? {
None => break,
Some((conn, _addr)) => {
self.add_connection(conn)?;
}
};
}
Ok(())
}
fn poll_connections(&mut self) -> anyhow::Result<()> {
for idx in 0..self.mio_manager().connections.len() {
self.poll_particular_connection(idx)?;
}
Ok(())
}
fn poll_particular_connection(&mut self, idx: usize) -> anyhow::Result<()> {
if self.mio_manager().connections[idx].is_none() {
return Ok(());
}
let mut conn = MioConnectionFocus::new(self, idx);
conn.poll()?;
if conn.should_close() {
let conn = self.mio_manager_mut().connections[idx].take().unwrap();
let mio_token = conn.mio_token();
if let Err(e) = conn.close(self.app_server_mut()) {
log::warn!("Error while closing API connection {e:?}");
};
self.app_server_mut().unregister_io_source(mio_token);
}
Ok(())
}
}
impl<T: ?Sized + MioManagerContext> MioConnectionContext for MioConnectionFocus<'_, T> {
fn mio_connection(&self) -> &MioConnection {
self.ctx.mio_manager().connections[self.conn_idx]
.as_ref()
.unwrap()
}
fn app_server(&self) -> &AppServer {
self.ctx.app_server()
}
fn mio_connection_mut(&mut self) -> &mut MioConnection {
self.ctx.mio_manager_mut().connections[self.conn_idx]
.as_mut()
.unwrap()
}
fn app_server_mut(&mut self) -> &mut AppServer {
self.ctx.app_server_mut()
}
}

View File

@@ -1,5 +0,0 @@
mod connection;
mod manager;
pub use connection::*;
pub use manager::*;

View File

@@ -1,11 +0,0 @@
//! The bulk code relating to the Rosenpass unix socket API
mod api_handler;
mod boilerplate;
pub use api_handler::*;
pub use boilerplate::*;
pub mod cli;
pub mod config;
pub mod mio;

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +0,0 @@
use anyhow::{Context, Result};
use heck::ToShoutySnakeCase;
use rosenpass_ciphers::{hash_domain::HashDomain, KEY_LEN};
/// Recursively calculate a concrete hash value for an API message type
fn calculate_hash_value(hd: HashDomain, values: &[&str]) -> Result<[u8; KEY_LEN]> {
match values.split_first() {
Some((head, tail)) => calculate_hash_value(hd.mix(head.as_bytes())?, tail),
None => Ok(hd.into_value()),
}
}
/// Print a hash literal for pasting into the Rosenpass source code
fn print_literal(path: &[&str]) -> Result<()> {
let val = calculate_hash_value(HashDomain::zero(), path)?;
let (last, prefix) = path.split_last().context("developer error!")?;
let var_name = last.to_shouty_snake_case();
print!("// hash domain hash of: ");
for n in prefix.iter() {
print!("{n} -> ");
}
println!("{last}");
let c = hex::encode(val)
.chars()
.collect::<Vec<char>>()
.chunks_exact(4)
.map(|chunk| chunk.iter().collect::<String>())
.collect::<Vec<_>>();
println!("const {var_name} : RawMsgType = RawMsgType::from_le_bytes(hex!(\"{} {} {} {} {} {} {} {}\"));",
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
Ok(())
}
/// Tree of domain separators where each leaf represents
/// an API message ID
#[derive(Debug, Clone)]
enum Tree {
Branch(String, Vec<Tree>),
Leaf(String),
}
impl Tree {
fn name(&self) -> &str {
match self {
Self::Branch(name, _) => name,
Self::Leaf(name) => name,
}
}
fn gen_code_inner(&self, prefix: &[&str]) -> Result<()> {
let mut path = prefix.to_owned();
path.push(self.name());
match self {
Self::Branch(_, ref children) => {
for c in children.iter() {
c.gen_code_inner(&path)?
}
}
Self::Leaf(_) => print_literal(&path)?,
};
Ok(())
}
fn gen_code(&self) -> Result<()> {
self.gen_code_inner(&[])
}
}
/// Helper for generating hash-based message IDs for the IPC API
fn main() -> Result<()> {
let tree = Tree::Branch(
"Rosenpass IPC API".to_owned(),
vec![Tree::Branch(
"Rosenpass Protocol Server".to_owned(),
vec![
Tree::Leaf("Ping Request".to_owned()),
Tree::Leaf("Ping Response".to_owned()),
Tree::Leaf("Supply Keypair Request".to_owned()),
Tree::Leaf("Supply Keypair Response".to_owned()),
Tree::Leaf("Add Listen Socket Request".to_owned()),
Tree::Leaf("Add Listen Socket Response".to_owned()),
Tree::Leaf("Add Psk Broker Request".to_owned()),
Tree::Leaf("Add Psk Broker Response".to_owned()),
],
)],
);
println!("type RawMsgType = u128;");
println!();
tree.gen_code()
}

View File

@@ -1,109 +1,38 @@
use anyhow::{bail, ensure, Context};
use anyhow::{bail, ensure};
use clap::{Parser, Subcommand};
use rosenpass_cipher_traits::Kem;
use rosenpass_ciphers::kem::StaticKem;
use rosenpass_secret_memory::file::StoreSecret;
use rosenpass_util::file::{LoadValue, LoadValueB64, StoreValue};
use rosenpass_wireguard_broker::brokers::native_unix::{
NativeUnixBroker, NativeUnixBrokerConfigBaseBuilder, NativeUnixBrokerConfigBaseBuilderError,
};
use std::ops::DerefMut;
use rosenpass_util::file::{LoadValue, LoadValueB64};
use std::path::PathBuf;
use crate::app_server::AppServerTest;
use crate::app_server::{AppServer, BrokerPeer};
use crate::app_server;
use crate::app_server::AppServer;
use crate::protocol::{SPk, SSk, SymKey};
use super::config;
#[cfg(feature = "experiment_api")]
use {
command_fds::{CommandFdExt, FdMapping},
log::{error, info},
mio::net::UnixStream,
rosenpass_util::fd::claim_fd,
rosenpass_wireguard_broker::brokers::mio_client::MioBrokerClient,
rosenpass_wireguard_broker::WireguardBrokerMio,
rustix::net::{socketpair, AddressFamily, SocketFlags, SocketType},
std::os::fd::AsRawFd,
std::os::unix::net,
std::process::Command,
std::thread,
};
/// enum representing a choice of interface to a WireGuard broker
#[derive(Debug)]
pub enum BrokerInterface {
Socket(PathBuf),
FileDescriptor(i32),
SocketPair,
}
/// struct holding all CLI arguments for `clap` crate to parse
#[derive(Parser, Debug)]
#[command(author, version, about, long_about, arg_required_else_help = true)]
#[command(author, version, about, long_about)]
pub struct CliArgs {
/// Lowest log level to show
/// lowest log level to show log messages at higher levels will be omitted
#[arg(long = "log-level", value_name = "LOG_LEVEL", group = "log-level")]
log_level: Option<log::LevelFilter>,
/// Show verbose log output sets log level to "debug"
/// show verbose log output sets log level to "debug"
#[arg(short, long, group = "log-level")]
verbose: bool,
/// Show no log output sets log level to "error"
/// show no log output sets log level to "error"
#[arg(short, long, group = "log-level")]
quiet: bool,
#[command(flatten)]
#[cfg(feature = "experiment_api")]
api: crate::api::cli::ApiCli,
/// Path of the `wireguard_psk` broker socket to connect to
#[cfg(feature = "experiment_api")]
#[arg(long, group = "psk-broker-specs")]
psk_broker_path: Option<PathBuf>,
/// File descriptor of the `wireguard_psk` broker socket to connect to
///
/// When this command is called from another process, the other process can
/// open and bind the Unix socket for the PSK broker connection to use
/// themselves, passing it to this process - in Rust this can be achieved
/// using the [command-fds](https://docs.rs/command-fds/latest/command_fds/)
/// crate
#[cfg(feature = "experiment_api")]
#[arg(long, group = "psk-broker-specs")]
psk_broker_fd: Option<i32>,
/// Spawn a PSK broker locally using a socket pair
#[cfg(feature = "experiment_api")]
#[arg(short, long, group = "psk-broker-specs")]
psk_broker_spawn: bool,
#[command(subcommand)]
pub command: Option<CliCommand>,
/// Generate man pages for the CLI
///
/// This option is used to generate man pages for Rosenpass in the specified
/// directory and exit.
#[clap(long, value_name = "out_dir")]
pub generate_manpage: Option<PathBuf>,
/// Generate completion file for a shell
///
/// This option is used to generate completion files for the specified shell
#[clap(long, value_name = "shell")]
pub print_completions: Option<clap_complete::Shell>,
pub command: CliCommand,
}
impl CliArgs {
pub fn apply_to_config(&self, _cfg: &mut config::Rosenpass) -> anyhow::Result<()> {
#[cfg(feature = "experiment_api")]
self.api.apply_to_config(_cfg)?;
Ok(())
}
/// returns the log level filter set by CLI args
/// returns `None` if the user did not specify any log level filter via CLI
///
@@ -115,54 +44,32 @@ impl CliArgs {
return Some(log::LevelFilter::Info);
}
if self.quiet {
return Some(log::LevelFilter::Warn);
return Some(log::LevelFilter::Error);
}
if let Some(level_filter) = self.log_level {
return Some(level_filter);
}
None
}
#[cfg(feature = "experiment_api")]
/// returns the broker interface set by CLI args
/// returns `None` if the `experiment_api` feature isn't enabled
pub fn get_broker_interface(&self) -> Option<BrokerInterface> {
if let Some(path_ref) = self.psk_broker_path.as_ref() {
Some(BrokerInterface::Socket(path_ref.to_path_buf()))
} else if let Some(fd) = self.psk_broker_fd {
Some(BrokerInterface::FileDescriptor(fd))
} else if self.psk_broker_spawn {
Some(BrokerInterface::SocketPair)
} else {
None
}
}
#[cfg(not(feature = "experiment_api"))]
/// returns the broker interface set by CLI args
/// returns `None` if the `experiment_api` feature isn't enabled
pub fn get_broker_interface(&self) -> Option<BrokerInterface> {
None
}
}
/// represents a command specified via CLI
#[derive(Subcommand, Debug)]
pub enum CliCommand {
/// Start Rosenpass key exchanges based on a configuration file
/// Start Rosenpass in server mode and carry on with the key exchange
///
/// This will parse the configuration file and perform key exchanges with
/// the specified peers. If a peer's endpoint is specified, this Rosenpass
/// instance will try to initiate a key exchange with the peer; otherwise,
/// only initiation attempts from other peers will be responded to.
/// This will parse the configuration file and perform the key exchange
/// with the specified peers. If a peer's endpoint is specified, this
/// Rosenpass instance will try to initiate a key exchange with the peer,
/// otherwise only initiation attempts from the peer will be responded to.
ExchangeConfig { config_file: PathBuf },
/// Start Rosenpass key exchanges based on command line arguments
/// Start in daemon mode, performing key exchanges
///
/// The configuration is read from the command line. The `peer` token always
/// separates multiple peers, e.g., if the token `peer` appears in the
/// WIREGUARD_EXTRA_ARGS, it is not put into the WireGuard arguments but
/// instead a new peer is created.
/// The configuration is read from the command line. The `peer` token
/// always separates multiple peers, e. g. if the token `peer` appears
/// in the WIREGUARD_EXTRA_ARGS it is not put into the WireGuard arguments
/// but instead a new peer is created.
/* Explanation: `first_arg` and `rest_of_args` are combined into one
* `Vec<String>`. They are only used to trick clap into displaying some
* guidance on the CLI usage.
@@ -191,10 +98,7 @@ pub enum CliCommand {
config_file: Option<PathBuf>,
},
/// Generate a demo config file for Rosenpass
///
/// The generated config file will contain a single peer and all common
/// options.
/// Generate a demo config file
GenConfig {
config_file: PathBuf,
@@ -203,19 +107,19 @@ pub enum CliCommand {
force: bool,
},
/// Generate secret & public key for Rosenpass
/// Generate the keys mentioned in a configFile
///
/// Generates secret & public key to their destination. If a config file is
/// provided then the key file destination is taken from there, otherwise
/// the destination is taken from the CLI arguments.
/// Generates secret- & public-key to their destination. If a config file
/// is provided then the key file destination is taken from there.
/// Otherwise the
GenKeys {
config_file: Option<PathBuf>,
/// Where to write public key to
/// where to write public-key to
#[clap(short, long)]
public_key: Option<PathBuf>,
/// Where to write secret key to
/// where to write secret-key to
#[clap(short, long)]
secret_key: Option<PathBuf>,
@@ -224,57 +128,60 @@ pub enum CliCommand {
force: bool,
},
/// Validate a configuration file
///
/// This command will validate the configuration file and print any errors
/// it finds. If the configuration file is valid, it will print a success.
/// Defined secret & public keys are checked for existence and validity.
Validate { config_files: Vec<PathBuf> },
/// DEPRECATED - use the gen-keys command instead
/// Deprecated - use gen-keys instead
#[allow(rustdoc::broken_intra_doc_links)]
#[allow(rustdoc::invalid_html_tags)]
#[command(hide = true)]
Keygen {
// NOTE yes, the legacy keygen argument initially really accepted
// "private-key", not "secret-key"!
// NOTE yes, the legacy keygen argument initially really accepted "privet-key", not "secret-key"!
/// public-key <PATH> private-key <PATH>
args: Vec<String>,
},
/// Validate a configuration
Validate { config_files: Vec<PathBuf> },
/// Show the rosenpass manpage
// TODO make this the default, but only after the manpage has been adjusted once the CLI stabilizes
Man,
}
impl CliArgs {
/// Runs the command specified via CLI
impl CliCommand {
/// runs the command specified via CLI
///
/// ## TODO
/// - This method consumes the [`CliCommand`] value. It might be wise to use a reference...
pub fn run(
self,
broker_interface: Option<BrokerInterface>,
test_helpers: Option<AppServerTest>,
) -> anyhow::Result<()> {
pub fn run(self) -> anyhow::Result<()> {
use CliCommand::*;
match &self.command {
Some(GenConfig { config_file, force }) => {
match self {
Man => {
let man_cmd = std::process::Command::new("man")
.args(["1", "rosenpass"])
.status();
if !(man_cmd.is_ok() && man_cmd.unwrap().success()) {
println!(include_str!(env!("ROSENPASS_MAN")));
}
}
GenConfig { config_file, force } => {
ensure!(
*force || !config_file.exists(),
force || !config_file.exists(),
"config file {config_file:?} already exists"
);
std::fs::write(config_file, config::EXAMPLE_CONFIG)?;
config::Rosenpass::example_config().store(config_file)?;
}
// Deprecated - use gen-keys instead
Some(Keygen { args }) => {
Keygen { args } => {
log::warn!("The 'keygen' command is deprecated. Please use the 'gen-keys' command instead.");
let mut public_key: Option<PathBuf> = None;
let mut secret_key: Option<PathBuf> = None;
// Manual arg parsing, since clap wants to prefix flags with "--"
let mut args = args.iter();
let mut args = args.into_iter();
loop {
match (args.next().map(|x| x.as_str()), args.next()) {
match (args.next().as_deref(), args.next()) {
(Some("private-key"), Some(opt)) | (Some("secret-key"), Some(opt)) => {
secret_key = Some(opt.into());
}
@@ -298,12 +205,12 @@ impl CliArgs {
generate_and_save_keypair(secret_key.unwrap(), public_key.unwrap())?;
}
Some(GenKeys {
GenKeys {
config_file,
public_key,
secret_key,
force,
}) => {
} => {
// figure out where the key file is specified, in the config file or directly as flag?
let (pkf, skf) = match (config_file, public_key, secret_key) {
(Some(config_file), _, _) => {
@@ -313,13 +220,10 @@ impl CliArgs {
);
let config = config::Rosenpass::load(config_file)?;
let keypair = config
.keypair
.context("Config file present, but no keypair is specified.")?;
(keypair.public_key, keypair.secret_key)
(config.public_key, config.secret_key)
}
(_, Some(pkf), Some(skf)) => (pkf.clone(), skf.clone()),
(_, Some(pkf), Some(skf)) => (pkf, skf),
_ => {
bail!("either a config-file or both public-key and secret-key file are required")
}
@@ -329,14 +233,12 @@ impl CliArgs {
let mut problems = vec![];
if !force && pkf.is_file() {
problems.push(format!(
"public-key file {:?} exists, refusing to overwrite",
std::fs::canonicalize(&pkf)?,
"public-key file {pkf:?} exist, refusing to overwrite it"
));
}
if !force && skf.is_file() {
problems.push(format!(
"secret-key file {:?} exists, refusing to overwrite",
std::fs::canonicalize(&skf)?,
"secret-key file {skf:?} exist, refusing to overwrite it"
));
}
if !problems.is_empty() {
@@ -347,224 +249,90 @@ impl CliArgs {
generate_and_save_keypair(skf, pkf)?;
}
Some(ExchangeConfig { config_file }) => {
ExchangeConfig { config_file } => {
ensure!(
config_file.exists(),
"config file '{config_file:?}' does not exist"
);
let mut config = config::Rosenpass::load(config_file)?;
let config = config::Rosenpass::load(config_file)?;
config.validate()?;
self.apply_to_config(&mut config)?;
config.check_usefullness()?;
Self::event_loop(config, broker_interface, test_helpers)?;
Self::event_loop(config)?;
}
Some(Exchange {
Exchange {
first_arg,
rest_of_args,
mut rest_of_args,
config_file,
}) => {
let mut rest_of_args = rest_of_args.clone();
rest_of_args.insert(0, first_arg.clone());
} => {
rest_of_args.insert(0, first_arg);
let args = rest_of_args;
let mut config = config::Rosenpass::parse_args(args)?;
if let Some(p) = config_file {
config.store(p)?;
config.config_file_path.clone_from(p);
config.store(&p)?;
config.config_file_path = p;
}
config.validate()?;
self.apply_to_config(&mut config)?;
config.check_usefullness()?;
Self::event_loop(config, broker_interface, test_helpers)?;
Self::event_loop(config)?;
}
Some(Validate { config_files }) => {
Validate { config_files } => {
for file in config_files {
match config::Rosenpass::load(file) {
match config::Rosenpass::load(&file) {
Ok(config) => {
eprintln!("{file:?} is valid TOML and conforms to the expected schema");
match config.validate() {
Ok(_) => eprintln!("{file:?} has passed all logical checks"),
Err(err) => eprintln!("{file:?} contains logical errors: '{err}'"),
Err(_) => eprintln!("{file:?} contains logical errors"),
}
}
Err(e) => eprintln!("{file:?} is not valid: {e}"),
}
}
}
&None => {} // calp print help if no command is given
}
Ok(())
}
fn event_loop(
config: config::Rosenpass,
broker_interface: Option<BrokerInterface>,
test_helpers: Option<AppServerTest>,
) -> anyhow::Result<()> {
const MAX_PSK_SIZE: usize = 1000;
fn event_loop(config: config::Rosenpass) -> anyhow::Result<()> {
// load own keys
let keypair = config
.keypair
.as_ref()
.map(|kp| -> anyhow::Result<_> {
let sk = SSk::load(&kp.secret_key)?;
let pk = SPk::load(&kp.public_key)?;
Ok((sk, pk))
})
.transpose()?;
let sk = SSk::load(&config.secret_key)?;
let pk = SPk::load(&config.public_key)?;
// start an application server
let mut srv = std::boxed::Box::<AppServer>::new(AppServer::new(
keypair,
config.listen.clone(),
sk,
pk,
config.listen,
config.verbosity,
test_helpers,
)?);
config.apply_to_app_server(&mut srv)?;
let broker = Self::create_broker(broker_interface)?;
let broker_store_ptr = srv.register_broker(broker)?;
fn cfg_err_map(e: NativeUnixBrokerConfigBaseBuilderError) -> anyhow::Error {
anyhow::Error::msg(format!("NativeUnixBrokerConfigBaseBuilderError: {:?}", e))
}
for cfg_peer in config.peers {
let broker_peer = if let Some(wg) = &cfg_peer.wg {
let peer_cfg = NativeUnixBrokerConfigBaseBuilder::default()
.peer_id_b64(&wg.peer)?
.interface(wg.device.clone())
.extra_params_ser(&wg.extra_params)?
.build()
.map_err(cfg_err_map)?;
let broker_peer = BrokerPeer::new(broker_store_ptr.clone(), Box::new(peer_cfg));
Some(broker_peer)
} else {
None
};
srv.add_peer(
// psk, pk, outfile, outwg, tx_addr
cfg_peer
.pre_shared_key
.map(SymKey::load_b64::<MAX_PSK_SIZE, _>)
.transpose()?,
cfg_peer.pre_shared_key.map(SymKey::load_b64).transpose()?,
SPk::load(&cfg_peer.public_key)?,
cfg_peer.key_out,
broker_peer,
cfg_peer.wg.map(|cfg| app_server::WireguardOut {
dev: cfg.device,
pk: cfg.peer,
extra_params: cfg.extra_params,
}),
cfg_peer.endpoint.clone(),
)?;
}
srv.event_loop()
}
#[cfg(feature = "experiment_api")]
fn create_broker(
broker_interface: Option<BrokerInterface>,
) -> Result<
Box<dyn WireguardBrokerMio<MioError = anyhow::Error, Error = anyhow::Error>>,
anyhow::Error,
> {
if let Some(interface) = broker_interface {
let socket = Self::get_broker_socket(interface)?;
Ok(Box::new(MioBrokerClient::new(socket)))
} else {
Ok(Box::new(NativeUnixBroker::new()))
}
}
#[cfg(not(feature = "experiment_api"))]
fn create_broker(
_broker_interface: Option<BrokerInterface>,
) -> Result<Box<NativeUnixBroker>, anyhow::Error> {
Ok(Box::new(NativeUnixBroker::new()))
}
#[cfg(feature = "experiment_api")]
fn get_broker_socket(broker_interface: BrokerInterface) -> Result<UnixStream, anyhow::Error> {
// Connect to the psk broker unix socket if one was specified
// OR OTHERWISE spawn the psk broker and use socketpair(2) to connect with them
match broker_interface {
BrokerInterface::Socket(broker_path) => Ok(UnixStream::connect(broker_path)?),
BrokerInterface::FileDescriptor(broker_fd) => {
// mio::net::UnixStream doesn't implement From<OwnedFd>, so we have to go through std
let sock = net::UnixStream::from(claim_fd(broker_fd)?);
sock.set_nonblocking(true)?;
Ok(UnixStream::from_std(sock))
}
BrokerInterface::SocketPair => {
// Form a socketpair for communicating to the broker
let (ours, theirs) = socketpair(
AddressFamily::UNIX,
SocketType::STREAM,
SocketFlags::empty(),
None,
)?;
// Setup our end of the socketpair
let ours = net::UnixStream::from(ours);
ours.set_nonblocking(true)?;
// Start the PSK broker
let mut child = Command::new("rosenpass-wireguard-broker-socket-handler")
.args(["--stream-fd", "3"])
.fd_mappings(vec![FdMapping {
parent_fd: theirs.as_raw_fd(),
child_fd: 3,
}])?
.spawn()?;
// Handle the PSK broker crashing
thread::spawn(move || {
let status = child.wait();
if let Ok(status) = status {
if status.success() {
// Maybe they are doing double forking?
info!("PSK broker exited.");
} else {
error!("PSK broker exited with an error ({status:?})");
}
} else {
error!("Wait on PSK broker process failed ({status:?})");
}
});
Ok(UnixStream::from_std(ours))
}
}
}
}
/// generate secret and public keys, store in files according to the paths passed as arguments
fn generate_and_save_keypair(secret_key: PathBuf, public_key: PathBuf) -> anyhow::Result<()> {
let mut ssk = crate::protocol::SSk::random();
let mut spk = crate::protocol::SPk::random();
StaticKem::keygen(ssk.secret_mut(), spk.deref_mut())?;
StaticKem::keygen(ssk.secret_mut(), spk.secret_mut())?;
ssk.store_secret(secret_key)?;
spk.store(public_key)
}
#[cfg(feature = "internal_testing")]
pub mod testing {
use super::*;
pub fn generate_and_save_keypair(
secret_key: PathBuf,
public_key: PathBuf,
) -> anyhow::Result<()> {
super::generate_and_save_keypair(secret_key, public_key)
}
spk.store_secret(public_key)
}

View File

@@ -6,8 +6,7 @@
//! ## TODO
//! - support `~` in <https://github.com/rosenpass/rosenpass/issues/237>
//! - provide tooling to create config file from shell <https://github.com/rosenpass/rosenpass/issues/247>
use crate::protocol::{SPk, SSk};
use rosenpass_util::file::LoadValue;
use std::{
collections::HashSet,
fs,
@@ -17,31 +16,16 @@ use std::{
};
use anyhow::{bail, ensure};
use rosenpass_util::file::{fopen_w, Visibility};
use rosenpass_util::file::fopen_w;
use serde::{Deserialize, Serialize};
use crate::app_server::AppServer;
#[cfg(feature = "experiment_api")]
fn empty_api_config() -> crate::api::config::ApiConfig {
crate::api::config::ApiConfig {
listen_path: Vec::new(),
listen_fd: Vec::new(),
stream_fd: Vec::new(),
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Rosenpass {
// TODO: Raise error if secret key or public key alone is set during deserialization
// SEE: https://github.com/serde-rs/serde/issues/2793
#[serde(flatten)]
pub keypair: Option<Keypair>,
/// path to the public key file
pub public_key: PathBuf,
/// Location of the API listen sockets
#[cfg(feature = "experiment_api")]
#[serde(default = "empty_api_config")]
pub api: crate::api::config::ApiConfig,
/// path to the secret key file
pub secret_key: PathBuf,
/// list of [`SocketAddr`] to listen on
///
@@ -68,29 +52,9 @@ pub struct Rosenpass {
pub config_file_path: PathBuf,
}
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)]
pub struct Keypair {
/// path to the public key file
pub public_key: PathBuf,
/// path to the secret key file
pub secret_key: PathBuf,
}
impl Keypair {
pub fn new<Pk: AsRef<Path>, Sk: AsRef<Path>>(public_key: Pk, secret_key: Sk) -> Self {
let public_key = public_key.as_ref().to_path_buf();
let secret_key = secret_key.as_ref().to_path_buf();
Self {
public_key,
secret_key,
}
}
}
/// ## TODO
/// - replace this type with [`log::LevelFilter`], also see <https://github.com/rosenpass/rosenpass/pull/246>
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Copy, Clone)]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Verbosity {
Quiet,
Verbose,
@@ -143,12 +107,6 @@ pub struct WireGuard {
pub extra_params: Vec<String>,
}
impl Default for Rosenpass {
fn default() -> Self {
Self::empty()
}
}
impl Rosenpass {
/// load configuration from a TOML file
///
@@ -164,10 +122,8 @@ impl Rosenpass {
// resolve `~` (see https://github.com/rosenpass/rosenpass/issues/237)
use util::resolve_path_with_tilde;
if let Some(ref mut keypair) = config.keypair {
resolve_path_with_tilde(&mut keypair.public_key);
resolve_path_with_tilde(&mut keypair.secret_key);
}
resolve_path_with_tilde(&mut config.public_key);
resolve_path_with_tilde(&mut config.secret_key);
for peer in config.peers.iter_mut() {
resolve_path_with_tilde(&mut peer.public_key);
if let Some(ref mut psk) = &mut peer.pre_shared_key {
@@ -179,7 +135,7 @@ impl Rosenpass {
}
// add path to "self"
p.as_ref().clone_into(&mut config.config_file_path);
config.config_file_path = p.as_ref().to_owned();
// return
Ok(config)
@@ -195,49 +151,31 @@ impl Rosenpass {
/// Commit the configuration to where it came from, overwriting the original file
pub fn commit(&self) -> anyhow::Result<()> {
let mut f = fopen_w(&self.config_file_path, Visibility::Public)?;
let mut f = fopen_w(&self.config_file_path)?;
f.write_all(toml::to_string_pretty(&self)?.as_bytes())?;
self.store(&self.config_file_path)
}
pub fn apply_to_app_server(&self, _srv: &mut AppServer) -> anyhow::Result<()> {
#[cfg(feature = "experiment_api")]
self.api.apply_to_app_server(_srv)?;
Ok(())
}
/// Validate a configuration
///
/// ## TODO
/// - check that files do not just exist but are also readable
/// - warn if neither out_key nor exchange_command of a peer is defined (v.i.)
pub fn validate(&self) -> anyhow::Result<()> {
if let Some(ref keypair) = self.keypair {
// check the public key file exists
ensure!(
keypair.public_key.is_file(),
"could not find public-key file {:?}: no such file. Consider running `rosenpass gen-keys` to generate a new keypair.",
keypair.public_key
);
// check the public key file exists
ensure!(
self.public_key.is_file(),
"could not find public-key file {:?}: no such file",
self.public_key
);
// check the public-key file is a valid key
ensure!(
SPk::load(&keypair.public_key).is_ok(),
"could not load public-key file {:?}: invalid key",
keypair.public_key
);
// check the secret-key file exists
ensure!(
keypair.secret_key.is_file(),
"could not find secret-key file {:?}: no such file. Consider running `rosenpass gen-keys` to generate a new keypair.",
keypair.secret_key
);
// check the secret-key file is a valid key
ensure!(
SSk::load(&keypair.secret_key).is_ok(),
"could not load public-key file {:?}: invalid key",
keypair.secret_key
);
}
// check the secret-key file exists
ensure!(
self.secret_key.is_file(),
"could not find secret-key file {:?}: no such file",
self.secret_key
);
for (i, peer) in self.peers.iter().enumerate() {
// check peer's public-key file exists
@@ -247,13 +185,6 @@ impl Rosenpass {
peer.public_key
);
// check peer's public-key file is a valid key
ensure!(
SPk::load(&peer.public_key).is_ok(),
"peer {i} public-key file {:?} is invalid",
peer.public_key
);
// check endpoint is usable
if let Some(addr) = peer.endpoint.as_ref() {
ensure!(
@@ -263,57 +194,18 @@ impl Rosenpass {
);
}
// check if `key_out` or `device` and `peer` are defined
if peer.key_out.is_none() {
if let Some(wg) = &peer.wg {
if wg.device.is_empty() || wg.peer.is_empty() {
ensure!(
false,
"peer {i} has neither `key_out` nor valid wireguard config defined"
);
}
} else {
ensure!(
false,
"peer {i} has neither `key_out` nor valid wireguard config defined"
);
}
}
// TODO warn if neither out_key nor exchange_command is defined
}
Ok(())
}
pub fn check_usefullness(&self) -> anyhow::Result<()> {
#[cfg(not(feature = "experiment_api"))]
ensure!(self.keypair.is_some(), "Server keypair missing.");
#[cfg(feature = "experiment_api")]
ensure!(
self.keypair.is_some() || self.api.has_api_sources(),
"{}{}",
"Specify a server keypair or some API connections to configure the keypair with.",
"Without a keypair, rosenpass can not operate."
);
Ok(())
}
pub fn empty() -> Self {
Self::new(None)
}
pub fn from_sk_pk<Sk: AsRef<Path>, Pk: AsRef<Path>>(sk: Sk, pk: Pk) -> Self {
Self::new(Some(Keypair::new(pk, sk)))
}
/// Creates a new configuration
pub fn new(keypair: Option<Keypair>) -> Self {
pub fn new<P1: AsRef<Path>, P2: AsRef<Path>>(public_key: P1, secret_key: P2) -> Self {
Self {
keypair,
public_key: PathBuf::from(public_key.as_ref()),
secret_key: PathBuf::from(secret_key.as_ref()),
listen: vec![],
#[cfg(feature = "experiment_api")]
api: crate::api::config::ApiConfig::default(),
verbosity: Verbosity::Quiet,
peers: vec![],
config_file_path: PathBuf::new(),
@@ -336,7 +228,7 @@ impl Rosenpass {
/// from chaotic args
/// Quest: the grammar is undecideable, what do we do here?
pub fn parse_args(args: Vec<String>) -> anyhow::Result<Self> {
let mut config = Self::new(Some(Keypair::new("", "")));
let mut config = Self::new("", "");
#[derive(Debug, Hash, PartialEq, Eq)]
enum State {
@@ -397,7 +289,7 @@ impl Rosenpass {
already_set.insert(OwnPublicKey),
"public-key was already set"
);
config.keypair.as_mut().unwrap().public_key = pk.into();
config.public_key = pk.into();
Own
}
(OwnSecretKey, sk, None) => {
@@ -405,7 +297,7 @@ impl Rosenpass {
already_set.insert(OwnSecretKey),
"secret-key was already set"
);
config.keypair.as_mut().unwrap().secret_key = sk.into();
config.secret_key = sk.into();
Own
}
(OwnListen, l, None) => {
@@ -524,146 +416,46 @@ impl Rosenpass {
}
}
impl Rosenpass {
/// Generate an example configuration
pub fn example_config() -> Self {
let peer = RosenpassPeer {
public_key: "/path/to/rp-peer-public-key".into(),
endpoint: Some("my-peer.test:9999".into()),
key_out: Some("/path/to/rp-key-out.txt".into()),
pre_shared_key: Some("additional pre shared key".into()),
wg: Some(WireGuard {
device: "wirgeguard device e.g. wg0".into(),
peer: "wireguard public key".into(),
extra_params: vec!["passed to".into(), "wg set".into()],
}),
};
Self {
public_key: "/path/to/rp-public-key".into(),
secret_key: "/path/to/rp-secret-key".into(),
peers: vec![peer],
..Self::new("", "")
}
}
}
impl Default for Verbosity {
fn default() -> Self {
Self::Quiet
}
}
pub static EXAMPLE_CONFIG: &str = r###"public_key = "/path/to/rp-public-key"
secret_key = "/path/to/rp-secret-key"
listen = []
verbosity = "Verbose"
[[peers]]
# Commented out fields are optional
public_key = "/path/to/rp-peer-public-key"
endpoint = "127.0.0.1:9998"
# pre_shared_key = "/path/to/preshared-key"
# Choose to store the key in a file via `key_out` or pass it to WireGuard by
# defining `device` and `peer`. You may choose to do both.
key_out = "/path/to/rp-key-out.txt" # path to store the key
# device = "wg0" # WireGuard interface
#peer = "RULdRAtUw7SFfVfGD..." # WireGuard public key
# extra_params = [] # passed to WireGuard `wg set`
"###;
#[cfg(test)]
mod test {
use std::net::IpAddr;
use super::*;
use std::{borrow::Borrow, net::IpAddr};
fn toml_des<S: Borrow<str>>(s: S) -> Result<toml::Table, toml::de::Error> {
toml::from_str(s.borrow())
}
fn toml_ser<S: Serialize>(s: S) -> Result<toml::Table, toml::ser::Error> {
toml::Table::try_from(s)
}
fn assert_toml<L: Serialize, R: Borrow<str>>(l: L, r: R, info: &str) -> anyhow::Result<()> {
fn lines_prepend(prefix: &str, s: &str) -> anyhow::Result<String> {
use std::fmt::Write;
let mut buf = String::new();
for line in s.lines() {
writeln!(&mut buf, "{prefix}{line}")?;
}
Ok(buf)
}
let l = toml_ser(l)?;
let r = toml_des(r.borrow())?;
ensure!(
l == r,
"{}{}TOML value mismatch.\n Have:\n{}\n Expected:\n{}",
info,
if info.is_empty() { "" } else { ": " },
lines_prepend(" ", &toml::to_string_pretty(&l)?)?,
lines_prepend(" ", &toml::to_string_pretty(&r)?)?
);
Ok(())
}
fn assert_toml_round<'de, L: Serialize + Deserialize<'de>, R: Borrow<str>>(
l: L,
r: R,
) -> anyhow::Result<()> {
let l = toml_ser(l)?;
assert_toml(&l, r.borrow(), "Straight deserialization")?;
let l: L = l.try_into().unwrap();
let l = toml_ser(l).unwrap();
assert_toml(l, r.borrow(), "Roundtrip deserialization")?;
Ok(())
}
fn split_str(s: &str) -> Vec<String> {
s.split(' ').map(|s| s.to_string()).collect()
}
#[test]
fn toml_serialization() -> anyhow::Result<()> {
#[cfg(feature = "experiment_api")]
assert_toml_round(
Rosenpass::empty(),
r#"
listen = []
verbosity = "Quiet"
peers = []
[api]
listen_path = []
listen_fd = []
stream_fd = []
"#,
)?;
#[cfg(not(feature = "experiment_api"))]
assert_toml_round(
Rosenpass::empty(),
r#"
listen = []
verbosity = "Quiet"
peers = []
"#,
)?;
#[cfg(feature = "experiment_api")]
assert_toml_round(
Rosenpass::from_sk_pk("/my/sk", "/my/pk"),
r#"
public_key = "/my/pk"
secret_key = "/my/sk"
listen = []
verbosity = "Quiet"
peers = []
[api]
listen_path = []
listen_fd = []
stream_fd = []
"#,
)?;
#[cfg(not(feature = "experiment_api"))]
assert_toml_round(
Rosenpass::from_sk_pk("/my/sk", "/my/pk"),
r#"
public_key = "/my/pk"
secret_key = "/my/sk"
listen = []
verbosity = "Quiet"
peers = []
"#,
)?;
Ok(())
}
#[test]
fn test_simple_cli_parse() {
let args = split_str(
@@ -674,10 +466,8 @@ mod test {
let config = Rosenpass::parse_args(args).unwrap();
assert_eq!(
config.keypair,
Some(Keypair::new("/my/public-key", "/my/secret-key"))
);
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
assert_eq!(config.verbosity, Verbosity::Verbose);
assert_eq!(
&config.listen,
@@ -706,10 +496,8 @@ mod test {
let config = Rosenpass::parse_args(args).unwrap();
assert_eq!(
config.keypair,
Some(Keypair::new("/my/public-key", "/my/secret-key"))
);
assert_eq!(config.public_key, PathBuf::from("/my/public-key"));
assert_eq!(config.secret_key, PathBuf::from("/my/secret-key"));
assert_eq!(config.verbosity, Verbosity::Verbose);
assert!(&config.listen.is_empty());
assert_eq!(

View File

@@ -1,68 +1,13 @@
//! Pseudo Random Functions (PRFs) with a tree-like label scheme which
//! ensures their uniqueness.
//!
//! This ensures [domain separation](https://en.wikipedia.org/wiki/Domain_separation) is used
//! across the Rosenpass protocol.
//!
//! There is a chart containing all hash domains used in Rosenpass in the
//! [whitepaper](https://rosenpass.eu/whitepaper.pdf) ([/papers/whitepaper.md] in this repository).
//!
//! # Tutorial
//!
//! ```
//! use rosenpass::{hash_domain, hash_domain_ns};
//! use rosenpass::hash_domains::protocol;
//!
//! // Declaring a custom hash domain
//! hash_domain_ns!(protocol, custom_domain, "my custom hash domain label");
//!
//! // Declaring a custom hashers
//! hash_domain_ns!(custom_domain, hashers, "hashers");
//! hash_domain_ns!(hashers, hasher1, "1");
//! hash_domain_ns!(hashers, hasher2, "2");
//!
//! // Declaring specific domain separators
//! hash_domain_ns!(custom_domain, domain_separators, "domain separators");
//! hash_domain!(domain_separators, sep1, "1");
//! hash_domain!(domain_separators, sep2, "2");
//!
//! // Generating values under hasher1 with both domain separators
//! let h1 = hasher1()?.mix(b"some data")?.dup();
//! let h1v1 = h1.mix(&sep1()?)?.mix(b"More data")?.into_value();
//! let h1v2 = h1.mix(&sep2()?)?.mix(b"More data")?.into_value();
//!
//! // Generating values under hasher2 with both domain separators
//! let h2 = hasher2()?.mix(b"some data")?.dup();
//! let h2v1 = h2.mix(&sep1()?)?.mix(b"More data")?.into_value();
//! let h2v2 = h2.mix(&sep2()?)?.mix(b"More data")?.into_value();
//!
//! // All of the domain separators are now different, random strings
//! let values = [h1v1, h1v2, h2v1, h2v2];
//! for i in 0..values.len() {
//! for j in (i+1)..values.len() {
//! assert_ne!(values[i], values[j]);
//! }
//! }
//!
//! Ok::<(), anyhow::Error>(())
//! ```
//! ensures their uniqueness
use anyhow::Result;
use rosenpass_ciphers::hash_domain::HashDomain;
use rosenpass_ciphers::{hash_domain::HashDomain, KEY_LEN};
/// Declare a hash function
///
/// # Examples
///
/// See the source file for details about how this is used concretely.
///
/// See the [module](self) documentation on how to use the hash domains in general
// TODO Use labels that can serve as identifiers
#[macro_export]
macro_rules! hash_domain_ns {
($(#[$($attrss:tt)*])* $base:ident, $name:ident, $($lbl:expr),+ ) => {
$(#[$($attrss)*])*
pub fn $name() -> ::anyhow::Result<::rosenpass_ciphers::hash_domain::HashDomain> {
($base:ident, $name:ident, $($lbl:expr),* ) => {
pub fn $name() -> Result<HashDomain> {
let t = $base()?;
$( let t = t.mix($lbl.as_bytes())?; )*
Ok(t)
@@ -70,18 +15,9 @@ macro_rules! hash_domain_ns {
}
}
/// Declare a concrete hash value
///
/// # Examples
///
/// See the source file for details about how this is used concretely.
///
/// See the [module](self) documentation on how to use the hash domains in general
#[macro_export]
macro_rules! hash_domain {
($(#[$($attrss:tt)*])* $base:ident, $name:ident, $($lbl:expr),+ ) => {
$(#[$($attrss)*])*
pub fn $name() -> ::anyhow::Result<[u8; ::rosenpass_ciphers::KEY_LEN]> {
($base:ident, $name:ident, $($lbl:expr),* ) => {
pub fn $name() -> Result<[u8; KEY_LEN]> {
let t = $base()?;
$( let t = t.mix($lbl.as_bytes())?; )*
Ok(t.into_value())
@@ -89,227 +25,22 @@ macro_rules! hash_domain {
}
}
/// The hash domain containing the protocol string.
///
/// This serves as a global [domain separator](https://en.wikipedia.org/wiki/Domain_separation)
/// used in various places in the rosenpass protocol.
///
/// This is generally used to create further hash-domains for specific purposes. See
///
/// # Examples
///
/// See the source file for details about how this is used concretely.
///
/// See the [module](self) documentation on how to use the hash domains in general
pub fn protocol() -> Result<HashDomain> {
HashDomain::zero().mix("Rosenpass v1 mceliece460896 Kyber512 ChaChaPoly1305 BLAKE2s".as_bytes())
}
hash_domain_ns!(
/// Hash domain based on [protocol] for calculating [crate::msgs::Envelope::mac].
///
/// # Examples
///
/// See the source of [crate::msgs::Envelope::seal] and [crate::msgs::Envelope::check_seal]
/// to figure out how this is concretely used.
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, mac, "mac");
hash_domain_ns!(
/// Hash domain based on [protocol] involved in calculating [crate::msgs::Envelope::cookie].
///
/// # Examples
///
/// See the source of [crate::msgs::Envelope::seal_cookie],
/// [crate::protocol::CryptoServer::handle_msg_under_load], and
/// [crate::protocol::CryptoServer::handle_cookie_reply]
/// to figure out how this is concretely used.
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, cookie, "cookie");
hash_domain_ns!(
/// Hash domain based on [protocol] involved in calculating [crate::msgs::Envelope::cookie].
///
/// # Examples
///
/// See the source of [crate::msgs::Envelope::seal_cookie],
/// [crate::protocol::CryptoServer::handle_msg_under_load], and
/// [crate::protocol::CryptoServer::handle_cookie_reply]
/// to figure out how this is concretely used.
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, cookie_value, "cookie-value");
hash_domain_ns!(
/// Hash domain based on [protocol] involved in calculating [crate::msgs::Envelope::cookie].
///
/// # Examples
///
/// See the source of [crate::msgs::Envelope::seal_cookie],
/// [crate::protocol::CryptoServer::handle_msg_under_load], and
/// [crate::protocol::CryptoServer::handle_cookie_reply]
/// to figure out how this is concretely used.
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, cookie_key, "cookie-key");
hash_domain_ns!(
/// Hash domain based on [protocol] for calculating the peer id as transmitted (encrypted)
/// in [crate::msgs::InitHello::pidic].
///
/// # Examples
///
/// See the source of [crate::protocol::CryptoServer::pidm] and
/// [crate::protocol::Peer::pidt]
/// to figure out how this is concretely used.
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, peerid, "peer id");
hash_domain_ns!(
/// Hash domain based on [protocol] for calculating the additional data
/// during [crate::msgs::Biscuit] encryption, storing the biscuit into
/// [crate::msgs::RespHello::biscuit].
///
/// # Examples
///
/// To understand how the biscuit is used, it is best to read
/// the code of [crate::protocol::HandshakeState::store_biscuit] and
/// [crate::protocol::HandshakeState::load_biscuit]
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, biscuit_ad, "biscuit additional data");
hash_domain_ns!(
/// This hash domain begins our actual handshake procedure, initializing the
/// chaining key [crate::protocol::HandshakeState::ck].
///
/// # Examples
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, ckinit, "chaining key init");
hash_domain_ns!(
/// Namespace for chaining key usage domain separators.
///
/// During the execution of the Rosenpass protocol, we use the chaining key for multiple
/// purposes, so to make sure that we have unique value domains, we mix a domain separator
/// into the chaining key before using it for any particular purpose.
///
/// We could use the full domain separation strings, but using a hash value here is nice
/// because it does not lead to any constraints about domain separator format and we can
/// even allow third parties to define their own separators by claiming a namespace.
///
/// # Examples
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
protocol, _ckextract, "chaining key extract");
hash_domain_ns!(protocol, mac, "mac");
hash_domain_ns!(protocol, cookie, "cookie");
hash_domain_ns!(protocol, peerid, "peer id");
hash_domain_ns!(protocol, biscuit_ad, "biscuit additional data");
hash_domain_ns!(protocol, ckinit, "chaining key init");
hash_domain_ns!(protocol, _ckextract, "chaining key extract");
hash_domain!(
/// Used to mix in further values into the chaining key during the handshake.
///
/// See [_ckextract].
///
/// # Examples
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
_ckextract, mix, "mix");
hash_domain!(
/// Chaining key domain separator for generating encryption keys that can
/// encrypt parts of the handshake.
///
/// See [_ckextract].
///
/// # Examples
///
/// Encryption of data during the handshake happens in
/// [crate::protocol::HandshakeState::encrypt_and_mix] and decryption happens in
/// [crate::protocol::HandshakeState::decrypt_and_mix]. See their source code
/// for details.
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
_ckextract, hs_enc, "handshake encryption");
hash_domain!(
/// Chaining key domain separator for live data encryption.
/// Live data encryption is only used to send confirmation of handshake
/// done in [crate::msgs::EmptyData].
///
/// See [_ckextract].
///
/// # Examples
///
/// This domain separator finds use in [crate::protocol::HandshakeState::enter_live].
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
_ckextract, ini_enc, "initiator handshake encryption");
hash_domain!(
/// Chaining key domain separator for live data encryption.
/// Live data encryption is only used to send confirmation of handshake
/// done in [crate::msgs::EmptyData].
///
/// See [_ckextract].
///
/// # Examples
///
/// This domain separator finds use in [crate::protocol::HandshakeState::enter_live].
/// Check out its source code!
///
/// To understand how the chaining key is used, study
/// [crate::protocol::HandshakeState], especially [crate::protocol::HandshakeState::init]
/// and [crate::protocol::HandshakeState::mix].
///
/// See the [module](self) documentation on how to use the hash domains in general.
_ckextract, res_enc, "responder handshake encryption");
hash_domain!(_ckextract, mix, "mix");
hash_domain!(_ckextract, hs_enc, "handshake encryption");
hash_domain!(_ckextract, ini_enc, "initiator handshake encryption");
hash_domain!(_ckextract, res_enc, "responder handshake encryption");
hash_domain_ns!(
/// Chaining key domain separator for any usage specific purposes.
///
/// We do recommend that third parties base their specific domain separators
/// on a internet domain and/or mix in much more specific information.
///
/// We only really use this to derive a output key for wireguard; see [osk].
///
/// See [_ckextract].
///
/// # Examples
///
/// See the [module](self) documentation on how to use the hash domains in general.
_ckextract, _user, "user");
hash_domain_ns!(
/// Chaining key domain separator for any rosenpass specific purposes.
///
/// We only really use this to derive a output key for wireguard; see [osk].
///
/// See [_ckextract].
///
/// # Examples
///
/// See the [module](self) documentation on how to use the hash domains in general.
_user, _rp, "rosenpass.eu");
hash_domain!(
/// Chaining key domain separator for deriving the key sent to WireGuard.
///
/// See [_ckextract].
///
/// # Examples
///
/// This domain separator finds use in [crate::protocol::CryptoServer::osk].
/// Check out its source code!
///
/// See the [module](self) documentation on how to use the hash domains in general.
_rp, osk, "wireguard psk");
hash_domain_ns!(_ckextract, _user, "user");
hash_domain_ns!(_user, _rp, "rosenpass.eu");
hash_domain!(_rp, osk, "wireguard psk");

View File

@@ -1,20 +1,3 @@
//! This is the central rosenpass crate implementing the rosenpass protocol.
//!
//! - [crate::app_server] contains the business logic of rosenpass, handling networking
//! - [crate::cli] contains the cli parsing logic and contains quite a bit of startup logic; the
//! main function quickly hands over to [crate::cli::CliArgs::run] which contains quite a bit
//! of our startup logic
//! - [crate::config] has the code to parse and generate configuration files
//! - [crate::hash_domains] lists the different hash function domains used in the Rosenpass
//! protocol
//! - [crate::msgs] provides declarations of the Rosenpass protocol network messages and facilities
//! to parse those messages through the [::zerocopy] crate
//! - [crate::protocol] this is where the bulk of our code lives; this module contains the actual
//! cryptographic protocol logic
//! - crate::api implements the Rosenpass unix socket API, if feature "experiment_api" is active
#[cfg(feature = "experiment_api")]
pub mod api;
pub mod app_server;
pub mod cli;
pub mod config;
@@ -22,25 +5,10 @@ pub mod hash_domains;
pub mod msgs;
pub mod protocol;
/// Error types used in diverse places across Rosenpass
#[derive(thiserror::Error, Debug)]
pub enum RosenpassError {
/// Usually indicates that parsing a struct through the
/// [::zerocopy] crate failed
#[error("buffer size mismatch")]
BufferSizeMismatch,
/// Mostly raised by the `TryFrom<u8>` implementation for [crate::msgs::MsgType]
/// to indicate that a message type is not defined
#[error("invalid message type")]
InvalidMessageType(
/// The message type that could not be parsed
u8,
),
/// Raised by the `TryFrom<RawMsgType>` (crate::api::RawMsgType) implementation for crate::api::RequestMsgType
/// and crate::api::RequestMsgType to indicate that a message type is not defined
#[error("invalid API message type")]
InvalidApiMessageType(
/// The message type that could not be parsed
u128,
),
InvalidMessageType(u8),
}

Some files were not shown because too many files have changed in this diff Show More