Compare commits

..

No commits in common. "main" and "1.30.1" have entirely different histories.
main ... 1.30.1

126 changed files with 14261 additions and 14965 deletions

View File

@ -1,15 +1,40 @@
// Ignore everything
*
# Local build artifacts
target
// Allow what is needed
!.git
# Data folder
data
# Misc
.env
.env.template
.gitattributes
.gitignore
rustfmt.toml
# IDE files
.vscode
.idea
.editorconfig
*.iml
# Documentation
.github
*.md
*.txt
*.yml
*.yaml
# Docker
hooks
tools
Dockerfile
.dockerignore
docker/**
!docker/healthcheck.sh
!docker/start.sh
!migrations
!src
!build.rs
!Cargo.lock
!Cargo.toml
!rustfmt.toml
!rust-toolchain.toml
# Web vault
web-vault
# Vaultwarden Resources
resources

View File

@ -10,67 +10,30 @@
## variable ENV_FILE can be set to the location of this file prior to starting
## Vaultwarden.
####################
### Data folders ###
####################
## Main data folder
# DATA_FOLDER=data
## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
# SENDS_FOLDER=data/sends
# TMP_FOLDER=data/tmp
## Templates data folder, by default uses embedded templates
## Check source code to see the format
# TEMPLATES_FOLDER=data/templates
## Automatically reload the templates for every request, slow, use only for development
# RELOAD_TEMPLATES=false
## Web vault settings
# WEB_VAULT_FOLDER=web-vault/
# WEB_VAULT_ENABLED=true
#########################
### Database settings ###
#########################
## Database URL
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
# DATABASE_URL=data/db.sqlite3
## When using MySQL, specify an appropriate connection URI.
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
# DATABASE_URL=mysql://user:password@host[:port]/database_name
## When using PostgreSQL, specify an appropriate connection URI (recommended)
## or keyword/value connection string.
## Details:
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
## Enable WAL for the DB
## Set to false to avoid enabling WAL during startup.
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
## this setting only prevents Vaultwarden from automatically enabling it on start.
## Please read project wiki page about this setting first before changing the value as it can
## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true
## Database connection retries
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
# DB_CONNECTION_RETRIES=15
## Database timeout
## Timeout when acquiring database connection
# DATABASE_TIMEOUT=30
## Database max connections
## Define the size of the connection pool used for connecting to the database.
# DATABASE_MAX_CONNS=10
## Database timeout
## Timeout when acquiring database connection
# DATABASE_TIMEOUT=30
## Database connection initialization
## Allows SQL statements to be run whenever a new database connection is created.
## This is mainly useful for connection-scoped pragmas.
@ -80,36 +43,74 @@
## - PostgreSQL: ""
# DATABASE_CONN_INIT=""
#################
### WebSocket ###
#################
## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
# SENDS_FOLDER=data/sends
# TMP_FOLDER=data/tmp
## Enable websocket notifications
# ENABLE_WEBSOCKET=true
## Templates data folder, by default uses embedded templates
## Check source code to see the format
# TEMPLATES_FOLDER=/path/to/templates
## Automatically reload the templates for every request, slow, use only for development
# RELOAD_TEMPLATES=false
##########################
### Push notifications ###
##########################
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
# IP_HEADER=X-Real-IP
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
# ICON_CACHE_TTL=2592000
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
# ICON_CACHE_NEGTTL=259200
## Web vault settings
# WEB_VAULT_FOLDER=web-vault/
# WEB_VAULT_ENABLED=true
## Enables websocket notifications
# WEBSOCKET_ENABLED=false
## Controls the WebSocket server address and port
# WEBSOCKET_ADDRESS=0.0.0.0
# WEBSOCKET_PORT=3012
## Enables push notifications (requires key and id from https://bitwarden.com/host)
## Details about mobile client push notification:
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
# PUSH_ENABLED=false
# PUSH_ENABLED=true
# PUSH_INSTALLATION_ID=CHANGEME
# PUSH_INSTALLATION_KEY=CHANGEME
# WARNING: Do not modify the following settings unless you fully understand their implications!
# Default Push Relay and Identity URIs
## Don't change this unless you know what you're doing.
# PUSH_RELAY_URI=https://push.bitwarden.com
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
# European Union Data Region Settings
# If you have selected "European Union" as your data region, use the following URIs instead.
# PUSH_RELAY_URI=https://api.bitwarden.eu
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
#####################
### Schedule jobs ###
#####################
## Controls whether users are allowed to create Bitwarden Sends.
## This setting applies globally to all users.
## To control this on a per-org basis instead, use the "Disable Send" org policy.
# SENDS_ALLOWED=true
## Controls whether users can enable emergency access to their accounts.
## This setting applies globally to all users.
# EMERGENCY_ACCESS_ALLOWED=true
## Controls whether event logging is enabled for organizations
## This setting applies to organizations.
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
# ORG_EVENTS_ENABLED=false
## Controls whether users can change their email.
## This setting applies globally to all users
# EMAIL_CHANGE_ALLOWED=true
## Number of days to retain events stored in the database.
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
# EVENTS_DAYS_RETAIN=
## BETA FEATURE: Groups
## Controls whether group support is enabled for organizations
## This setting applies to organizations.
## Disabled by default because this is a beta feature, it contains known issues!
## KNOW WHAT YOU ARE DOING!
# ORG_GROUPS_ENABLED=false
## Job scheduler settings
##
@ -150,151 +151,39 @@
## Cron schedule of the job that cleans old events from the event table.
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
## Number of days to retain events stored in the database.
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
# EVENTS_DAYS_RETAIN=
##
## Cron schedule of the job that cleans old auth requests from the auth request.
## Defaults to every minute. Set blank to disable this job.
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
##
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
## Defaults to every minute. Set blank to disable this job.
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
########################
### General settings ###
########################
## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true
## Domain settings
## The domain must match the address from where you access the server
## It's recommended to configure this value, otherwise certain functionality might not work,
## like attachment downloads, email links and U2F.
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
## Details:
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
## For development
# DOMAIN=http://localhost
## For public server
# DOMAIN=https://vw.domain.tld
## For public server (URL with port number)
# DOMAIN=https://vw.domain.tld:8443
## For public server (URL with path)
# DOMAIN=https://domain.tld/vw
## Timestamp format used in extended logging.
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
## Controls whether users are allowed to create Bitwarden Sends.
## This setting applies globally to all users.
## To control this on a per-org basis instead, use the "Disable Send" org policy.
# SENDS_ALLOWED=true
## Logging to file
# LOG_FILE=/path/to/log
## HIBP Api Key
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
# HIBP_API_KEY=
## Logging to Syslog
## This requires extended logging
# USE_SYSLOG=false
## Per-organization attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per organization.
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
# ORG_ATTACHMENT_LIMIT=
## Per-user attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further attachments.
# USER_ATTACHMENT_LIMIT=
## Per-user send storage limit (KB)
## Max kilobytes of send storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further sends.
# USER_SEND_LIMIT=
## Log level
## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## Setting it to "trace" or "debug" would also show logs for mounted
## routes and static file, websocket and alive requests
# LOG_LEVEL=Info
## Number of days to wait before auto-deleting a trashed item.
## If unset (the default), trashed items are not auto-deleted.
## This setting applies globally, so make sure to inform all users of any changes to this setting.
# TRASH_AUTO_DELETE_DAYS=
## Enable WAL for the DB
## Set to false to avoid enabling WAL during startup.
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
## this setting only prevents Vaultwarden from automatically enabling it on start.
## Please read project wiki page about this setting first before changing the value as it can
## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
## resulting in an email notification. An incomplete 2FA login is one where the correct
## master password was provided but the required 2FA step was not completed, which
## potentially indicates a master password compromise. Set to 0 to disable this check.
## This setting applies globally to all users.
# INCOMPLETE_2FA_TIME_LIMIT=3
## Disable icon downloading
## Set to true to disable icon downloading in the internal icon service.
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
## will be deleted eventually, but won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false
## Controls if new users can register
# SIGNUPS_ALLOWED=true
## Controls if new users need to verify their email address upon registration
## Note that setting this option to true prevents logins until the email address has been verified!
## The welcome email will include a verification link, and login attempts will periodically
## trigger another verification email to be sent.
# SIGNUPS_VERIFY=false
## If SIGNUPS_VERIFY is set to true, this limits how many seconds after the last time
## an email verification link has been sent another verification email will be sent
# SIGNUPS_VERIFY_RESEND_TIME=3600
## If SIGNUPS_VERIFY is set to true, this limits how many times an email verification
## email will be re-sent upon an attempted login.
# SIGNUPS_VERIFY_RESEND_LIMIT=6
## Controls if new users from a list of comma-separated domains can register
## even if SIGNUPS_ALLOWED is set to false
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
## Controls whether event logging is enabled for organizations
## This setting applies to organizations.
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
# ORG_EVENTS_ENABLED=false
## Controls which users can create new orgs.
## Blank or 'all' means all users can create orgs (this is the default):
# ORG_CREATION_USERS=
## 'none' means no users can create orgs:
# ORG_CREATION_USERS=none
## A comma-separated list means only those users can create orgs:
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true
## Name shown in the invitation emails that don't come from a specific organization
# INVITATION_ORG_NAME=Vaultwarden
## The number of hours after which an organization invite token, emergency access invite token,
## email verification token and deletion request token will expire (must be at least 1)
# INVITATION_EXPIRATION_HOURS=120
## Controls whether users can enable emergency access to their accounts.
## This setting applies globally to all users.
# EMERGENCY_ACCESS_ALLOWED=true
## Controls whether users can change their email.
## This setting applies globally to all users
# EMAIL_CHANGE_ALLOWED=true
## Number of server-side passwords hashing iterations for the password hash.
## The default for new users. If changed, it will be updated during login for existing users.
# PASSWORD_ITERATIONS=600000
## Controls whether users can set password hints. This setting applies globally to all users.
# PASSWORD_HINTS_ALLOWED=true
## Controls whether a password hint should be shown directly in the web page if
## SMTP service is not configured. Not recommended for publicly-accessible instances
## as this provides unauthenticated access to potentially sensitive data.
# SHOW_PASSWORD_HINT=false
#########################
### Advanced settings ###
#########################
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
# IP_HEADER=X-Real-IP
## Database connection retries
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
# DB_CONNECTION_RETRIES=15
## Icon service
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
@ -317,64 +206,73 @@
## are currently better supported by the Bitwarden clients.
# ICON_REDIRECT_CODE=302
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
## Default: 2592000 (30 days)
# ICON_CACHE_TTL=2592000
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
## Default: 2592000 (3 days)
# ICON_CACHE_NEGTTL=259200
## Disable icon downloading
## Set to true to disable icon downloading in the internal icon service.
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
## will be deleted eventually, but won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false
## Icon download timeout
## Configure the timeout value when downloading the favicons.
## The default is 10 seconds, but this could be to low on slower network connections
# ICON_DOWNLOAD_TIMEOUT=10
## Block HTTP domains/IPs by Regex
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
## Icon blacklist Regex
## Any domains or IPs that match this regex won't be fetched by the icon service.
## Useful to hide other servers in the local network. Check the WIKI for more details
## NOTE: Always enclose this regex withing single quotes!
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
## Any IP which is not defined as a global IP will be blacklisted.
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
## Client Settings
## Enable experimental feature flags for clients.
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
##
## The following flags are available:
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
## - "autofill-v2": Use the new autofill implementation.
## - "browser-fileless-import": Directly import credentials from other providers without a file.
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
## Disable 2FA remember
## Enabling this would force the users to use a second factor to login every time.
## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false
## Require new device emails. When a user logs in an email is required to be sent.
## If sending the email fails the login attempt will fail!!
# REQUIRE_DEVICE_EMAIL=false
## Maximum attempts before an email token is reset and a new email will need to be sent.
# EMAIL_ATTEMPTS_LIMIT=3
## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true
## Token expiration time
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
# EMAIL_EXPIRATION_TIME=600
## Timestamp format used in extended logging.
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
## Email token size
## Number of digits in an email 2FA token (min: 6, max: 255).
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
# EMAIL_TOKEN_SIZE=6
## Logging to Syslog
## This requires extended logging
# USE_SYSLOG=false
## Controls if new users can register
# SIGNUPS_ALLOWED=true
## Logging to file
# LOG_FILE=/path/to/log
## Controls if new users need to verify their email address upon registration
## Note that setting this option to true prevents logins until the email address has been verified!
## The welcome email will include a verification link, and login attempts will periodically
## trigger another verification email to be sent.
# SIGNUPS_VERIFY=false
## Log level
## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
## For a specific module append a comma separated `path::to::module=log_level`
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
# LOG_LEVEL=info
## If SIGNUPS_VERIFY is set to true, this limits how many seconds after the last time
## an email verification link has been sent another verification email will be sent
# SIGNUPS_VERIFY_RESEND_TIME=3600
## If SIGNUPS_VERIFY is set to true, this limits how many times an email verification
## email will be re-sent upon an attempted login.
# SIGNUPS_VERIFY_RESEND_LIMIT=6
## Controls if new users from a list of comma-separated domains can register
## even if SIGNUPS_ALLOWED is set to false
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
## Controls which users can create new orgs.
## Blank or 'all' means all users can create orgs (this is the default):
# ORG_CREATION_USERS=
## 'none' means no users can create orgs:
# ORG_CREATION_USERS=none
## A comma-separated list means only those users can create orgs:
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
## Token for the admin interface, preferably an Argon2 PCH string
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
@ -391,13 +289,54 @@
## meant to be used with the use of a separate auth layer in front
# DISABLE_ADMIN_TOKEN=false
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
# ADMIN_RATELIMIT_SECONDS=300
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
# ADMIN_RATELIMIT_MAX_BURST=3
## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true
## Name shown in the invitation emails that don't come from a specific organization
# INVITATION_ORG_NAME=Vaultwarden
## Set the lifetime of admin sessions to this value (in minutes).
# ADMIN_SESSION_LIFETIME=20
## The number of hours after which an organization invite token, emergency access invite token,
## email verification token and deletion request token will expire (must be at least 1)
# INVITATION_EXPIRATION_HOURS=120
## Per-organization attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per organization.
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
# ORG_ATTACHMENT_LIMIT=
## Per-user attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further attachments.
# USER_ATTACHMENT_LIMIT=
## Number of days to wait before auto-deleting a trashed item.
## If unset (the default), trashed items are not auto-deleted.
## This setting applies globally, so make sure to inform all users of any changes to this setting.
# TRASH_AUTO_DELETE_DAYS=
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
## resulting in an email notification. An incomplete 2FA login is one where the correct
## master password was provided but the required 2FA step was not completed, which
## potentially indicates a master password compromise. Set to 0 to disable this check.
## This setting applies globally to all users.
# INCOMPLETE_2FA_TIME_LIMIT=3
## Number of server-side passwords hashing iterations for the password hash.
## The default for new users. If changed, it will be updated during login for existing users.
# PASSWORD_ITERATIONS=350000
## Controls whether users can set password hints. This setting applies globally to all users.
# PASSWORD_HINTS_ALLOWED=true
## Controls whether a password hint should be shown directly in the web page if
## SMTP service is not configured. Not recommended for publicly-accessible instances
## as this provides unauthenticated access to potentially sensitive data.
# SHOW_PASSWORD_HINT=false
## Domain settings
## The domain must match the address from where you access the server
## It's recommended to configure this value, otherwise certain functionality might not work,
## like attachment downloads, email links and U2F.
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
# DOMAIN=https://vw.domain.tld:8443
## Allowed iframe ancestors (Know the risks!)
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
@ -412,28 +351,13 @@
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
# LOGIN_RATELIMIT_MAX_BURST=10
## BETA FEATURE: Groups
## Controls whether group support is enabled for organizations
## This setting applies to organizations.
## Disabled by default because this is a beta feature, it contains known issues!
## KNOW WHAT YOU ARE DOING!
# ORG_GROUPS_ENABLED=false
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
# ADMIN_RATELIMIT_SECONDS=300
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
# ADMIN_RATELIMIT_MAX_BURST=3
## Increase secure note size limit (Know the risks!)
## Sets the secure note size limit to 100_000 instead of the default 10_000.
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
## KNOW WHAT YOU ARE DOING!
# INCREASE_NOTE_SIZE_LIMIT=false
## Enforce Single Org with Reset Password Policy
## Enforce that the Single Org policy is enabled before setting the Reset Password policy
## Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
## Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
# ENFORCE_SINGLE_ORG_WITH_RESET_PW_POLICY=false
########################
### MFA/2FA settings ###
########################
## Set the lifetime of admin sessions to this value (in minutes).
# ADMIN_SESSION_LIFETIME=20
## Yubico (Yubikey) Settings
## Set your Client ID and Secret Key for Yubikey OTP
@ -444,46 +368,16 @@
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
## Duo Settings
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
## Otherwise users will need to configure it themselves.
## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
## Then set the following options, based on the values obtained from the last step:
# DUO_IKEY=<Client ID>
# DUO_SKEY=<Client Secret>
# DUO_IKEY=<Integration Key>
# DUO_SKEY=<Secret Key>
# DUO_HOST=<API Hostname>
## After that, you should be able to follow the rest of the guide linked above,
## ignoring the fields that ask for the values that you already configured beforehand.
##
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
## Duo no longer supports this, but it still works for some integrations.
## If you aren't sure, leave this alone.
# DUO_USE_IFRAME=false
## Email 2FA settings
## Email token size
## Number of digits in an email 2FA token (min: 6, max: 255).
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
# EMAIL_TOKEN_SIZE=6
##
## Token expiration time
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
# EMAIL_EXPIRATION_TIME=600
##
## Maximum attempts before an email token is reset and a new email will need to be sent.
# EMAIL_ATTEMPTS_LIMIT=3
##
## Setup email 2FA regardless of any organization policy
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
## Automatically setup email 2FA as fallback provider when needed
# EMAIL_2FA_AUTO_FALLBACK=false
## Other MFA/2FA settings
## Disable 2FA remember
## Enabling this would force the users to use a second factor to login every time.
## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false
##
## Authenticator Settings
## Disable authenticator time drifted codes to be valid.
## TOTP codes of the previous and next 30 seconds will be invalid
@ -496,9 +390,12 @@
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
###########################
### SMTP Email settings ###
###########################
## Rocket specific settings
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
# ROCKET_ADDRESS=0.0.0.0
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
# ROCKET_WORKERS=10
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
@ -506,19 +403,12 @@
# SMTP_HOST=smtp.domain.tld
# SMTP_FROM=vaultwarden@domain.tld
# SMTP_FROM_NAME=Vaultwarden
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
# SMTP_USERNAME=username
# SMTP_PASSWORD=password
# SMTP_TIMEOUT=15
## Choose the type of secure connection for SMTP. The default is "starttls".
## The available options are:
## - "starttls": The default port is 587.
## - "force_tls": The default port is 465.
## - "off": The default port is 25.
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
# SMTP_SECURITY=starttls
# SMTP_PORT=587
# Whether to send mail via the `sendmail` command
# USE_SENDMAIL=false
# Which sendmail command to use. The one found in the $PATH is used if not specified.
@ -527,7 +417,7 @@
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
## Possible values: ["Plain", "Login", "Xoauth2"].
## Multiple options need to be separated by a comma ','.
# SMTP_AUTH_MECHANISM=
# SMTP_AUTH_MECHANISM="Plain"
## Server name sent during the SMTP HELO
## By default this value should be is on the machine's hostname,
@ -535,34 +425,30 @@
# HELO_NAME=
## Embed images as email attachments
# SMTP_EMBED_IMAGES=true
# SMTP_EMBED_IMAGES=false
## SMTP debugging
## When set to true this will output very detailed SMTP messages.
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
# SMTP_DEBUG=false
## Accept Invalid Hostnames
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
## Accept Invalid Certificates
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
# SMTP_ACCEPT_INVALID_CERTS=false
## Accept Invalid Hostnames
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
#######################
### Rocket settings ###
#######################
## Rocket specific settings
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
# ROCKET_ADDRESS=0.0.0.0
## The default port is 8000, unless running in a Docker container, in which case it is 80.
# ROCKET_PORT=8000
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
## Require new device emails. When a user logs in an email is required to be sent.
## If sending the email fails the login attempt will fail!!
# REQUIRE_DEVICE_EMAIL=false
## HIBP Api Key
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
# HIBP_API_KEY=
# vim: syntax=ini

3
.github/CODEOWNERS vendored
View File

@ -1,3 +0,0 @@
/.github @dani-garcia @BlackDex
/.github/CODEOWNERS @dani-garcia @BlackDex
/.github/workflows/** @dani-garcia @BlackDex

66
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,66 @@
---
name: Bug report
about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum.
title: ''
labels: ''
assignees: ''
---
<!--
# ###
NOTE: Please update to the latest version of vaultwarden before reporting an issue!
This saves you and us a lot of time and troubleshooting.
See:
* https://github.com/dani-garcia/vaultwarden/issues/1180
* https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image
# ###
-->
<!--
Please fill out the following template to make solving your problem easier and faster for us.
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
Remember to hide/redact personal or confidential information,
such as passwords, IP addresses, and DNS names as appropriate.
-->
### Subject of the issue
<!-- Describe your issue here. -->
### Deployment environment
<!--
=========================================================================================
Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab.
That will auto-generate most of the info requested in this section.
=========================================================================================
-->
<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page -->
<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden -->
<!-- Remember to check if your issue exists on the latest version first! -->
* vaultwarden version:
<!-- How the server was installed: Docker image, OS package, built from source, etc. -->
* Install method:
* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) -->
* Reverse proxy and version: <!-- if applicable -->
* MySQL/MariaDB or PostgreSQL version: <!-- if applicable -->
* Other relevant details:
### Steps to reproduce
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
and how did you start vaultwarden? -->
### Expected behaviour
<!-- Tell us what you expected to happen -->
### Actual behaviour
<!-- Tell us what actually happened -->
### Troubleshooting data
<!-- Share any log files, screenshots, or other relevant troubleshooting data -->

View File

@ -1,167 +0,0 @@
name: Bug Report
description: File a bug report
labels: ["bug"]
body:
#
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
Please *do not* submit feature requests or ask for help on how to configure Vaultwarden here.
The [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions/) has sections for Questions and Ideas.
Also, make sure you are running [![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/releases/latest) of Vaultwarden!
And search for existing open or closed issues or discussions regarding your topic before posting.
Be sure to check and validate the Vaultwarden Admin Diagnostics (`/admin/diagnostics`) page for any errors!
See here [how to enable the admin page](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
#
- id: support-string
type: textarea
attributes:
label: Vaultwarden Support String
description: Output of the **Generate Support String** from the `/admin/diagnostics` page.
placeholder: |
1. Go to the Vaultwarden Admin of your instance https://example.domain.tld/admin/diagnostics
2. Click on `Generate Support String`
3. Click on `Copy To Clipboard`
4. Replace this text by pasting it into this textarea without any modifications
validations:
required: true
#
- id: version
type: input
attributes:
label: Vaultwarden Build Version
description: What version of Vaultwarden are you running?
placeholder: ex. v1.31.0 or v1.32.0-3466a804
validations:
required: true
#
- id: deployment
type: dropdown
attributes:
label: Deployment method
description: How did you deploy Vaultwarden?
multiple: false
options:
- Official Container Image
- Build from source
- OS Package (apt, yum/dnf, pacman, apk, nix, ...)
- Manually Extracted from Container Image
- Downloaded from GitHub Actions Release Workflow
- Other method
validations:
required: true
#
- id: deployment-other
type: textarea
attributes:
label: Custom deployment method
description: If you deployed Vaultwarden via any other method, please describe how.
#
- id: reverse-proxy
type: input
attributes:
label: Reverse Proxy
description: Are you using a reverse proxy, if so which and what version?
placeholder: ex. nginx 1.26.2, caddy 2.8.4, traefik 3.1.2, haproxy 3.0
validations:
required: true
#
- id: os
type: dropdown
attributes:
label: Host/Server Operating System
description: On what operating system are you running the Vaultwarden server?
multiple: false
options:
- Linux
- NAS/SAN
- Cloud
- Windows
- macOS
- Other
validations:
required: true
#
- id: os-version
type: input
attributes:
label: Operating System Version
description: What version of the operating system(s) are you seeing the problem on?
placeholder: ex. Arch Linux, Ubuntu 24.04, Kubernetes, Synology DSM 7.x, Windows 11
#
- id: clients
type: dropdown
attributes:
label: Clients
description: What client(s) are you seeing the problem on?
multiple: true
options:
- Web Vault
- Browser Extension
- CLI
- Desktop
- Android
- iOS
validations:
required: true
#
- id: client-version
type: input
attributes:
label: Client Version
description: What version(s) of the client(s) are you seeing the problem on?
placeholder: ex. CLI v2024.7.2, Firefox 130 - v2024.7.0
#
- id: reproduce
type: textarea
attributes:
label: Steps To Reproduce
description: How can we reproduce the behavior.
value: |
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. Click on '...'
5. Etc '...'
validations:
required: true
#
- id: expected
type: textarea
attributes:
label: Expected Result
description: A clear and concise description of what you expected to happen.
validations:
required: true
#
- id: actual
type: textarea
attributes:
label: Actual Result
description: A clear and concise description of what is happening.
validations:
required: true
#
- id: logs
type: textarea
attributes:
label: Logs
description: Provide the logs generated by Vaultwarden during the time this issue occurs.
render: text
#
- id: screenshots
type: textarea
attributes:
label: Screenshots or Videos
description: If applicable, add screenshots and/or a short video to help explain your problem.
#
- id: additional-context
type: textarea
attributes:
label: Additional Context
description: Add any other context about the problem here.

View File

@ -1,8 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: GitHub Discussions for Vaultwarden
url: https://github.com/dani-garcia/vaultwarden/discussions
about: Use the discussions to request features or get help with usage/configuration.
- name: Discourse forum for Vaultwarden
- name: Discourse forum for vaultwarden
url: https://vaultwarden.discourse.group/
about: An alternative to the GitHub Discussions, if this is easier for you.
about: Use this forum to request features or get help with usage/configuration.
- name: GitHub Discussions for vaultwarden
url: https://github.com/dani-garcia/vaultwarden/discussions
about: An alternative to the Discourse forum, if this is easier for you.

View File

@ -28,7 +28,6 @@ on:
jobs:
build:
# We use Ubuntu 22.04 here because this matches the library versions used within the Debian docker containers
runs-on: ubuntu-22.04
timeout-minutes: 120
# Make warnings errors, this is to prevent warnings slipping through.
@ -47,7 +46,7 @@ jobs:
steps:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
# End Checkout the repo
@ -75,7 +74,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
if: ${{ matrix.channel == 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -85,7 +84,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version"
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
if: ${{ matrix.channel != 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -107,7 +106,7 @@ jobs:
# End Show environment
# Enable Rust Caching
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
- uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with:
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.

View File

@ -8,26 +8,14 @@ on: [
jobs:
hadolint:
name: Validate Dockerfile syntax
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
timeout-minutes: 30
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
# End Checkout the repo
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
with:
buildkitd-config-inline: |
[worker.oci]
max-parallelism = 2
driver-opts: |
network=host
# Download hadolint - https://github.com/hadolint/hadolint/releases
- name: Download hadolint
shell: bash
@ -38,18 +26,8 @@ jobs:
HADOLINT_VERSION: 2.12.0
# End Download hadolint
# Test Dockerfiles with hadolint
# Test Dockerfiles
- name: Run hadolint
shell: bash
run: hadolint docker/Dockerfile.{debian,alpine}
# End Test Dockerfiles with hadolint
# Test Dockerfiles with docker build checks
- name: Run docker build check
shell: bash
run: |
echo "Checking docker/Dockerfile.debian"
docker build --check . -f docker/Dockerfile.debian
echo "Checking docker/Dockerfile.alpine"
docker build --check . -f docker/Dockerfile.alpine
# End Test Dockerfiles with docker build checks
# End Test Dockerfiles

View File

@ -2,10 +2,21 @@ name: Release
on:
push:
branches:
- main
paths:
- ".github/workflows/release.yml"
- "src/**"
- "migrations/**"
- "docker/**"
- "Cargo.*"
- "build.rs"
- "diesel.toml"
- "rust-toolchain.toml"
tags:
branches: # Only on paths above
- main
- release-build-revision
tags: # Always, regardless of paths above
- '*'
jobs:
@ -13,30 +24,30 @@ jobs:
# Some checks to determine if we need to continue with building a new docker.
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
skip_check:
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- name: Skip Duplicates Actions
id: skip_check
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
with:
cancel_others: 'true'
# Only run this when not creating a tag
if: ${{ github.ref_type == 'branch' }}
docker-build:
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
timeout-minutes: 120
needs: skip_check
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
# Start a local docker registry to extract the final Alpine static build binaries
services:
registry:
image: registry:2
ports:
- 5000:5000
# TODO: Start a local docker registry to be used to extract the final Alpine static build images
# services:
# registry:
# image: registry:2
# ports:
# - 5000:5000
env:
SOURCE_COMMIT: ${{ github.sha }}
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
@ -58,22 +69,22 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
with:
fetch-depth: 0
- name: Initialize QEMU binfmt support
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
with:
platforms: "arm64,arm"
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
with:
buildkitd-config-inline: |
config-inline: |
[worker.oci]
max-parallelism = 2
driver-opts: |
@ -102,7 +113,7 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -116,7 +127,7 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -131,7 +142,7 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
@ -144,28 +155,8 @@ jobs:
run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
- name: Configure build cache from/to
shell: bash
run: |
#
# Check if there is a GitHub Container Registry Login and use it for caching
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
else
echo "BAKE_CACHE_FROM="
echo "BAKE_CACHE_TO="
fi
#
- name: Add localhost registry
if: ${{ matrix.base_image == 'alpine' }}
shell: bash
run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@76cc8060bdff6d632a465001e4cf300684c5472c # v5.7.0
uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0
env:
BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -177,76 +168,3 @@ jobs:
push: true
files: docker/docker-bake.hcl
targets: "${{ matrix.base_image }}-multi"
set: |
*.cache-from=${{ env.BAKE_CACHE_FROM }}
*.cache-to=${{ env.BAKE_CACHE_TO }}
# Extract the Alpine binaries from the containers
- name: Extract binaries
if: ${{ matrix.base_image == 'alpine' }}
shell: bash
run: |
# Check which main tag we are going to build determined by github.ref_type
if [[ "${{ github.ref_type }}" == "tag" ]]; then
EXTRACT_TAG="latest"
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
EXTRACT_TAG="testing"
fi
# After each extraction the image is removed.
# This is needed because using different platforms doesn't trigger a new pull/download
# Extract amd64 binary
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp amd64:/vaultwarden vaultwarden-amd64
docker rm --force amd64
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract arm64 binary
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp arm64:/vaultwarden vaultwarden-arm64
docker rm --force arm64
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract armv7 binary
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp armv7:/vaultwarden vaultwarden-armv7
docker rm --force armv7
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract armv6 binary
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp armv6:/vaultwarden vaultwarden-armv6
docker rm --force armv6
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Upload artifacts to Github Actions
- name: "Upload amd64 artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64
- name: "Upload arm64 artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64
- name: "Upload armv7 artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7
- name: "Upload armv6 artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
path: vaultwarden-armv6
# End Upload artifacts to Github Actions

View File

@ -1,26 +0,0 @@
on:
workflow_dispatch:
inputs:
manual_trigger:
description: "Manual trigger buildcache cleanup"
required: false
default: ""
schedule:
- cron: '0 1 * * FRI'
name: Cleanup
jobs:
releasecache-cleanup:
name: Releasecache Cleanup
runs-on: ubuntu-24.04
continue-on-error: true
timeout-minutes: 30
steps:
- name: Delete vaultwarden-buildcache containers
uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0
with:
package-name: 'vaultwarden-buildcache'
package-type: 'container'
min-versions-to-keep: 0
delete-only-untagged-versions: 'false'

View File

@ -4,6 +4,7 @@ on:
push:
branches:
- main
- release-build-revision
tags:
- '*'
pull_request:
@ -17,7 +18,7 @@ permissions:
jobs:
trivy-scan:
name: Check
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
timeout-minutes: 30
permissions:
contents: read
@ -25,10 +26,10 @@ jobs:
actions: read
steps:
- name: Checkout code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0
uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1
with:
scan-type: repo
ignore-unfixed: true
@ -37,6 +38,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.10
uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4
with:
sarif_file: 'trivy-results.sarif'

View File

@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v4.5.0
hooks:
- id: check-yaml
- id: check-json

2430
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ name = "vaultwarden"
version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021"
rust-version = "1.79.0"
rust-version = "1.71.1"
resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden"
@ -18,17 +18,17 @@ build = "build.rs"
enable_syslog = []
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
# Enable to use a vendored and statically linked openssl
vendored_openssl = ["openssl/vendored"]
# Enable MiMalloc memory allocator to replace the default malloc
# This can improve performance for Alpine builds
enable_mimalloc = ["dep:mimalloc"]
enable_mimalloc = ["mimalloc"]
# This is a development dependency, and should only be used during development!
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
# if you want to turn off the logging for a specific run.
query_logger = ["dep:diesel_logger"]
query_logger = ["diesel_logger"]
# Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support
@ -36,11 +36,11 @@ unstable = []
[target."cfg(not(windows))".dependencies]
# Logging
syslog = "6.1.1"
syslog = "6.1.0"
[dependencies]
# Logging
log = "0.4.22"
log = "0.4.20"
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
@ -48,59 +48,59 @@ tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and
dotenvy = { version = "0.15.7", default-features = false }
# Lazy initialization
once_cell = "1.19.0"
once_cell = "1.18.0"
# Numerical libraries
num-traits = "0.2.19"
num-derive = "0.4.2"
bigdecimal = "0.4.5"
num-traits = "0.2.17"
num-derive = "0.4.1"
# Web framework
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
rocket_ws = { version ="0.1.1" }
rocket = { version = "0.5.0-rc.4", features = ["tls", "json"], default-features = false }
rocket_ws = { version ="0.1.0-rc.4" }
# WebSockets libraries
rmpv = "1.3.0" # MessagePack library
tokio-tungstenite = "0.20.1"
rmpv = "1.0.1" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "6.1.0"
dashmap = "5.5.3"
# Async futures
futures = "0.3.30"
tokio = { version = "1.40.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
futures = "0.3.29"
tokio = { version = "1.34.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
# A generic serialization/deserialization framework
serde = { version = "1.0.210", features = ["derive"] }
serde_json = "1.0.128"
serde = { version = "1.0.192", features = ["derive"] }
serde_json = "1.0.108"
# A safe, extensible ORM and Query builder
diesel = { version = "2.2.4", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.2.0"
diesel = { version = "2.1.4", features = ["chrono", "r2d2"] }
diesel_migrations = "2.1.0"
diesel_logger = { version = "0.3.0", optional = true }
# Bundled/Static SQLite
libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true }
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
# Crypto-related libraries
rand = { version = "0.8.5", features = ["small_rng"] }
ring = "0.17.8"
ring = "0.17.5"
# UUID generation
uuid = { version = "1.10.0", features = ["v4"] }
uuid = { version = "1.5.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.10.0"
time = "0.3.36"
chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.8.4"
time = "0.3.30"
# Job scheduler
job_scheduler_ng = "2.0.5"
job_scheduler_ng = "2.0.4"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.6.0"
data-encoding = "2.4.0"
# JWT library
jsonwebtoken = "9.3.0"
jsonwebtoken = "9.1.0"
# TOTP library
totp-lite = "2.0.1"
@ -112,64 +112,66 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
webauthn-rs = "0.3.2"
# Handling of URL's for WebAuthn and favicons
url = "2.5.2"
url = "2.4.1"
# Email libraries
lettre = { version = "0.11.9", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.9"
lettre = { version = "0.11.1", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
email_address = "0.2.4"
# HTML Template library
handlebars = { version = "6.1.0", features = ["dir_source"] }
handlebars = { version = "4.5.0", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.7", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
# Favicon extraction libraries
html5gum = "0.5.7"
regex = { version = "1.10.6", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.1"
bytes = "1.7.2"
regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.0"
bytes = "1.5.0"
# Cache function results (Used for version check and favicon fetching)
cached = { version = "0.53.1", features = ["async"] }
cached = { version = "0.46.1", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction
cookie = "0.18.1"
cookie_store = "0.21.0"
cookie = "0.16.2"
cookie_store = "0.19.1"
# Used by U2F, JWT and PostgreSQL
openssl = "0.10.66"
openssl = "=0.10.57"
# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width
# It will force add a dynamically linked library which prevents the build from being static
openssl-sys = "=0.9.92"
# CLI argument parsing
pico-args = "0.5.0"
# Macro ident concatenation
paste = "1.0.15"
governor = "0.6.3"
paste = "1.0.14"
governor = "0.6.0"
# Check client versions for specific features.
semver = "1.0.23"
semver = "1.0.20"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
which = "6.0.3"
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
which = "5.0.0"
# Argon2 library with support for the PHC format
argon2 = "0.5.3"
argon2 = "0.5.2"
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
rpassword = "7.3.1"
# Strip debuginfo from the release builds
# The symbols are the provide better panic traces
# Also enable fat LTO and use 1 codegen unit for optimizations
# Also enable thin LTO for some optimizations
[profile.release]
strip = "debuginfo"
lto = "fat"
codegen-units = 1
lto = "thin"
# A little bit of a speedup
[profile.dev]
@ -179,86 +181,3 @@ split-debuginfo = "unpacked"
# This is a huge speed improvement during testing
[profile.dev.package.argon2]
opt-level = 3
# Optimize for size
[profile.release-micro]
inherits = "release"
opt-level = "z"
strip = "symbols"
lto = "fat"
codegen-units = 1
panic = "abort"
# Profile for systems with low resources
# It will use less resources during build
[profile.release-low]
inherits = "release"
strip = "symbols"
lto = "thin"
codegen-units = 16
# Linting config
# https://doc.rust-lang.org/rustc/lints/groups.html
[lints.rust]
# Forbid
unsafe_code = "forbid"
non_ascii_idents = "forbid"
# Deny
deprecated_in_future = "deny"
future_incompatible = { level = "deny", priority = -1 }
keyword_idents = { level = "deny", priority = -1 }
let_underscore = { level = "deny", priority = -1 }
noop_method_call = "deny"
refining_impl_trait = { level = "deny", priority = -1 }
rust_2018_idioms = { level = "deny", priority = -1 }
rust_2021_compatibility = { level = "deny", priority = -1 }
# rust_2024_compatibility = { level = "deny", priority = -1 } # Enable once we are at MSRV 1.81.0
single_use_lifetimes = "deny"
trivial_casts = "deny"
trivial_numeric_casts = "deny"
unused = { level = "deny", priority = -1 }
unused_import_braces = "deny"
unused_lifetimes = "deny"
unused_qualifications = "deny"
variant_size_differences = "deny"
# The lints below are part of the rust_2024_compatibility group
static-mut-refs = "deny"
unsafe-op-in-unsafe-fn = "deny"
# https://rust-lang.github.io/rust-clippy/stable/index.html
[lints.clippy]
# Warn
dbg_macro = "warn"
todo = "warn"
# Deny
case_sensitive_file_extension_comparisons = "deny"
cast_lossless = "deny"
clone_on_ref_ptr = "deny"
equatable_if_let = "deny"
filter_map_next = "deny"
float_cmp_const = "deny"
inefficient_to_string = "deny"
iter_on_empty_collections = "deny"
iter_on_single_items = "deny"
linkedlist = "deny"
macro_use_imports = "deny"
manual_assert = "deny"
manual_instant_elapsed = "deny"
manual_string_new = "deny"
match_on_vec_items = "deny"
match_wildcard_for_single_variants = "deny"
mem_forget = "deny"
needless_continue = "deny"
needless_lifetimes = "deny"
option_option = "deny"
string_add_assign = "deny"
string_to_string = "deny"
unnecessary_join = "deny"
unnecessary_self_imports = "deny"
unnested_or_patterns = "deny"
unused_async = "deny"
unused_self = "deny"
verbose_file_reads = "deny"
zero_sized_map_values = "deny"

View File

@ -92,11 +92,4 @@ Thanks for your contribution to the project!
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/IQ333777" style="width: 75px">
<sub><b>IQ333777</b></sub>
</a>
</td>
</tr>
</table>

View File

@ -39,11 +39,7 @@ Thank you for helping keep Vaultwarden and our users safe!
# How to contact us
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (users: `@danig:matrix.org` and/or `@blackdex:matrix.org`)
- You can send an ![security-contact](/.github/security-contact.gif) to report a security issue.<br>
If you want to send an encrypted email you can use the following GPG key: 13BB3A34C9E380258CE43D595CB150B31F6426BC<br>
It can be found on several public GPG key servers.<br>
* https://keys.openpgp.org/search?q=security%40vaultwarden.org
* https://keys.mailvelope.com/pks/lookup?op=get&search=security%40vaultwarden.org
* https://pgpkeys.eu/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
* https://keyserver.ubuntu.com/pks/lookup?search=security%40vaultwarden.org&fingerprint=on&op=index
- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`)
- You can send an ![security-contact](/.github/security-contact.gif) to report a security issue.
- If you want to send an encrypted email you can use the following GPG key:<br>
https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index

View File

@ -17,20 +17,6 @@ fn main() {
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
);
// Use check-cfg to let cargo know which cfg's we define,
// and avoid warnings when they are used in the code.
println!("cargo::rustc-check-cfg=cfg(sqlite)");
println!("cargo::rustc-check-cfg=cfg(mysql)");
println!("cargo::rustc-check-cfg=cfg(postgresql)");
println!("cargo::rustc-check-cfg=cfg(query_logger)");
// Rerun when these paths are changed.
// Someone could have checked-out a tag or specific commit, but no other files changed.
println!("cargo:rerun-if-changed=.git");
println!("cargo:rerun-if-changed=.git/HEAD");
println!("cargo:rerun-if-changed=.git/index");
println!("cargo:rerun-if-changed=.git/refs/tags");
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
@ -56,11 +42,11 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
/// This method reads info from Git, namely tags, branch, and revision
/// To access these values, use:
/// - `env!("GIT_EXACT_TAG")`
/// - `env!("GIT_LAST_TAG")`
/// - `env!("GIT_BRANCH")`
/// - `env!("GIT_REV")`
/// - `env!("VW_VERSION")`
/// - env!("GIT_EXACT_TAG")
/// - env!("GIT_LAST_TAG")
/// - env!("GIT_BRANCH")
/// - env!("GIT_REV")
/// - env!("VW_VERSION")
fn version_from_git_info() -> Result<String, std::io::Error> {
// The exact tag for the current commit, can be empty when
// the current commit doesn't have an associated tag

View File

@ -1,13 +1,12 @@
---
vault_version: "v2024.6.2c"
vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b"
# Cross Compile Docker Helper Scripts v1.5.0
vault_version: "v2023.10.0"
vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935"
# Cross Compile Docker Helper Scripts v1.3.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa"
rust_version: 1.81.0 # Rust version to be used
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
rust_version: 1.73.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used
alpine_version: "3.20" # Alpine version to be used
alpine_version: 3.18 # Alpine version to be used
# For which platforms/architectures will we try to build images
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
# Determine the build images per OS/Arch

View File

@ -1,5 +1,4 @@
# syntax=docker/dockerfile:1
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
# This file was generated using a Jinja2 template.
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
@ -19,27 +18,27 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
# [docker.io/vaultwarden/web-vault:v2023.10.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.81.0 AS build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.81.0 AS build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.81.0 AS build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.81.0 AS build_armv6
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build
ARG TARGETARCH
ARG TARGETVARIANT
ARG TARGETPLATFORM
@ -59,29 +58,33 @@ ENV DEBIAN_FRONTEND=noninteractive \
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" && \
rustup set profile minimal
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for Cargo on Alpine based builds
# Shared variables across Debian and Alpine
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release
ARG VW_VERSION
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain.toml ./rust-toolchain.toml
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
@ -94,13 +97,10 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore
COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp
# Also do this for build.rs to ensure the version is rechecked
touch build.rs src/main.rs && \
touch src/main.rs && \
# Create a symlink to the binary target folder to easy copy the binary in the final stage
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
@ -126,7 +126,7 @@ RUN source /env-cargo && \
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
#
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18
ENV ROCKET_PROFILE="release" \
ROCKET_ADDRESS=0.0.0.0 \
@ -143,12 +143,14 @@ RUN mkdir /data && \
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY docker/healthcheck.sh docker/start.sh /
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden .

View File

@ -1,5 +1,4 @@
# syntax=docker/dockerfile:1
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
# This file was generated using a Jinja2 template.
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
@ -19,24 +18,24 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
# [docker.io/vaultwarden/web-vault:v2023.10.0]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.81.0-slim-bookworm AS build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT
@ -65,40 +64,32 @@ RUN apt-get update && \
"libc6-$(xx-info debian-arch)-cross" \
"libc6-dev-$(xx-info debian-arch)-cross" \
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
xx-apt-get install -y \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
RUN xx-apt-get install -y \
--no-install-recommends \
gcc \
libmariadb3 \
libpq-dev \
libpq5 \
libssl-dev \
zlib1g-dev && \
libssl-dev && \
# Force install arch dependend mariadb dev packages
# Installing them the normal way breaks several other packages (again)
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
dpkg --force-all -i ./libmariadb-dev*.deb && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
dpkg --force-all -i ./libmariadb-dev*.deb
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" && \
rustup set profile minimal
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
# Environment variables for cargo across Debian and Alpine
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -111,16 +102,19 @@ RUN source /env-cargo && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release
ARG VW_VERSION
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain.toml ./rust-toolchain.toml
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
@ -133,13 +127,10 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore
COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp
# Also do this for build.rs to ensure the version is rechecked
touch build.rs src/main.rs && \
touch src/main.rs && \
# Create a symlink to the binary target folder to easy copy the binary in the final stage
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
@ -186,12 +177,14 @@ RUN mkdir /data && \
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY docker/healthcheck.sh docker/start.sh /
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden .

View File

@ -1,5 +1,4 @@
# syntax=docker/dockerfile:1
# check=skip=FromPlatformFlagConstDisallowed,RedundantTargetPlatform
# This file was generated using a Jinja2 template.
# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make`
@ -27,7 +26,7 @@
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }}
# [docker.io/vaultwarden/web-vault:{{ vault_version }}]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} AS vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault
{% if base == "debian" %}
########################## Cross Compile Docker Helper Scripts ##########################
@ -39,13 +38,13 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used
{% for arch in build_stage_image[base].arch_image %}
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }}
{% endfor %}
{% endif %}
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build
{% if base == "debian" %}
COPY --from=xx / /
{% endif %}
@ -83,42 +82,34 @@ RUN apt-get update && \
"libc6-$(xx-info debian-arch)-cross" \
"libc6-dev-$(xx-info debian-arch)-cross" \
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
xx-apt-get install -y \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
RUN xx-apt-get install -y \
--no-install-recommends \
gcc \
libmariadb3 \
libpq-dev \
libpq5 \
libssl-dev \
zlib1g-dev && \
libssl-dev && \
# Force install arch dependend mariadb dev packages
# Installing them the normal way breaks several other packages (again)
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
dpkg --force-all -i ./libmariadb-dev*.deb && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
dpkg --force-all -i ./libmariadb-dev*.deb
{% endif %}
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" && \
rustup set profile minimal
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
{% if base == "debian" %}
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
# Environment variables for cargo across Debian and Alpine
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -131,29 +122,31 @@ RUN source /env-cargo && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %}
# Environment variables for Cargo on Alpine based builds
# Shared variables across Debian and Alpine
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
{% endif %}
RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release
# Configure the DB ARG as late as possible to not invalidate the cached layers above
{% if base == "debian" %}
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %}
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %}
RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}"
ARG CARGO_PROFILE=release
ARG VW_VERSION
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain.toml ./rust-toolchain.toml
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -165,13 +158,10 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore
COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp
# Also do this for build.rs to ensure the version is rechecked
touch build.rs src/main.rs && \
touch src/main.rs && \
# Create a symlink to the binary target folder to easy copy the binary in the final stage
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
@ -230,12 +220,14 @@ RUN mkdir /data && \
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY docker/healthcheck.sh docker/start.sh /
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden .

View File

@ -11,11 +11,6 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7)
- armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br>

View File

@ -125,40 +125,6 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6")
}
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing
group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

View File

@ -1,20 +1,12 @@
#!/usr/bin/env sh
#!/bin/sh
# Use the value of the corresponding env var (if present),
# or a default value otherwise.
: "${DATA_FOLDER:="/data"}"
: "${DATA_FOLDER:="data"}"
: "${ROCKET_PORT:="80"}"
: "${ENV_FILE:="/.env"}"
CONFIG_FILE="${DATA_FOLDER}"/config.json
# Check if the $ENV_FILE file exist and is readable
# If that is the case, load it into the environment before running any check
if [ -r "${ENV_FILE}" ]; then
# shellcheck disable=SC1090
. "${ENV_FILE}"
fi
# Given a config key, return the corresponding config value from the
# config file. If the key doesn't exist, return an empty string.
get_config_val() {

View File

@ -1,9 +1,5 @@
#!/bin/sh
if [ -n "${UMASK}" ]; then
umask "${UMASK}"
fi
if [ -r /etc/vaultwarden.sh ]; then
. /etc/vaultwarden.sh
elif [ -r /etc/bitwarden_rs.sh ]; then

View File

@ -1 +0,0 @@
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;

View File

@ -1 +0,0 @@
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;

View File

@ -1 +0,0 @@
DROP TABLE twofactor_duo_ctx;

View File

@ -1,8 +0,0 @@
CREATE TABLE twofactor_duo_ctx (
state VARCHAR(64) NOT NULL,
user_email VARCHAR(255) NOT NULL,
nonce VARCHAR(64) NOT NULL,
exp BIGINT NOT NULL,
PRIMARY KEY (state)
);

View File

@ -1 +0,0 @@
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;

View File

@ -1 +0,0 @@
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser

View File

@ -1,3 +0,0 @@
ALTER TABLE attachments
ALTER COLUMN file_size TYPE BIGINT,
ALTER COLUMN file_size SET NOT NULL;

View File

@ -1,3 +0,0 @@
ALTER TABLE twofactor
ALTER COLUMN last_used TYPE BIGINT,
ALTER COLUMN last_used SET NOT NULL;

View File

@ -1 +0,0 @@
DROP TABLE twofactor_duo_ctx;

View File

@ -1,8 +0,0 @@
CREATE TABLE twofactor_duo_ctx (
state VARCHAR(64) NOT NULL,
user_email VARCHAR(255) NOT NULL,
nonce VARCHAR(64) NOT NULL,
exp BIGINT NOT NULL,
PRIMARY KEY (state)
);

View File

@ -1 +0,0 @@
ALTER TABLE twofactor_incomplete DROP COLUMN device_type;

View File

@ -1 +0,0 @@
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser

View File

@ -1 +0,0 @@
-- Integer size in SQLite is already i64, so we don't need to do anything

View File

@ -1 +0,0 @@
-- Integer size in SQLite is already i64, so we don't need to do anything

View File

@ -1 +0,0 @@
DROP TABLE twofactor_duo_ctx;

View File

@ -1,8 +0,0 @@
CREATE TABLE twofactor_duo_ctx (
state TEXT NOT NULL,
user_email TEXT NOT NULL,
nonce TEXT NOT NULL,
exp INTEGER NOT NULL,
PRIMARY KEY (state)
);

View File

@ -1 +0,0 @@
ALTER TABLE `twofactor_incomplete` DROP COLUMN `device_type`;

View File

@ -1 +0,0 @@
ALTER TABLE twofactor_incomplete ADD COLUMN device_type INTEGER NOT NULL DEFAULT 14; -- 14 = Unknown Browser

View File

@ -1,4 +1,4 @@
[toolchain]
channel = "1.81.0"
channel = "1.73.0"
components = [ "rustfmt", "clippy" ]
profile = "minimal"

View File

@ -1,5 +1,4 @@
use once_cell::sync::Lazy;
use reqwest::Method;
use serde::de::DeserializeOwned;
use serde_json::Value;
use std::env;
@ -14,19 +13,14 @@ use rocket::{
};
use crate::{
api::{
core::{log_event, two_factor},
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
},
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure},
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
config::ConfigBuilder,
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
error::{Error, MapResult},
http_client::make_http_request,
mail,
util::{
container_base_image, format_naive_datetime_local, get_display_size, get_web_vault_version,
is_running_in_container, NumberOrString,
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
},
CONFIG, VERSION,
};
@ -170,12 +164,7 @@ struct LoginForm {
}
#[post("/", data = "<data>")]
fn post_admin_login(
data: Form<LoginForm>,
cookies: &CookieJar<'_>,
ip: ClientIp,
secure: Secure,
) -> Result<Redirect, AdminResponse> {
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
let data = data.into_inner();
let redirect = data.redirect;
@ -197,10 +186,9 @@ fn post_admin_login(
let cookie = Cookie::build((COOKIE_NAME, jwt))
.path(admin_path())
.max_age(time::Duration::minutes(CONFIG.admin_session_lifetime()))
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
.same_site(SameSite::Strict)
.http_only(true)
.secure(secure.https);
.http_only(true);
cookies.add(cookie);
if let Some(redirect) = redirect {
@ -273,8 +261,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
render_admin_login(None, None)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct InviteData {
email: String,
}
@ -298,7 +286,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
if CONFIG.mail_enabled() {
mail::send_invite(user, None, None, &CONFIG.invitation_org_name(), None).await
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
} else {
let invitation = Invitation::new(&user.email);
invitation.save(conn).await
@ -334,9 +322,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
let mut users_json = Vec::with_capacity(users.len());
for u in users {
let mut usr = u.to_json(&mut conn).await;
usr["userEnabled"] = json!(u.enabled);
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["lastActive"] = match u.last_active(&mut conn).await {
usr["UserEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["LastActive"] = match u.last_active(&mut conn).await {
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
None => json!(None::<String>),
};
@ -354,7 +342,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
let mut usr = u.to_json(&mut conn).await;
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
usr["user_enabled"] = json!(u.enabled);
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["last_active"] = match u.last_active(&mut conn).await {
@ -372,8 +360,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
let mut usr = u.to_json(&mut conn).await;
usr["userEnabled"] = json!(u.enabled);
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["UserEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
Ok(Json(usr))
} else {
err_code!("User doesn't exist", Status::NotFound.code);
@ -384,8 +372,8 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
let u = get_user_or_404(uuid, &mut conn).await?;
let mut usr = u.to_json(&mut conn).await;
usr["userEnabled"] = json!(u.enabled);
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["UserEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
Ok(Json(usr))
}
@ -402,7 +390,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
EventType::OrganizationUserRemoved as i32,
&user_org.uuid,
&user_org.org_uuid,
ACTING_ADMIN_USER,
String::from(ACTING_ADMIN_USER),
14, // Use UnknownBrowser type
&token.ip.ip,
&mut conn,
@ -421,7 +409,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
if CONFIG.push_enabled() {
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
match unregister_push_device(device.push_uuid).await {
match unregister_push_device(device.uuid).await {
Ok(r) => r,
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
};
@ -457,10 +445,9 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
}
#[post("/users/<uuid>/remove-2fa")]
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
let mut user = get_user_or_404(uuid, &mut conn).await?;
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
user.totp_recover = None;
user.save(&mut conn).await
}
@ -474,7 +461,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
}
if CONFIG.mail_enabled() {
mail::send_invite(&user, None, None, &CONFIG.invitation_org_name(), None).await
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
} else {
Ok(())
}
@ -483,7 +470,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
}
}
#[derive(Debug, Deserialize)]
#[derive(Deserialize, Debug)]
struct UserOrgTypeData {
user_type: NumberOrString,
user_uuid: String,
@ -518,11 +505,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
} else {
err!("You cannot modify this user to this type because they have not setup 2FA");
}
err!("You cannot modify this user to this type because it has no two-step login method activated");
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
@ -534,7 +517,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
EventType::OrganizationUserUpdated as i32,
&user_to_edit.uuid,
&data.org_uuid,
ACTING_ADMIN_USER,
String::from(ACTING_ADMIN_USER),
14, // Use UnknownBrowser type
&token.ip.ip,
&mut conn,
@ -562,7 +545,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
organizations_json.push(org);
}
@ -576,6 +559,11 @@ async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -
org.delete(&mut conn).await
}
#[derive(Deserialize)]
struct WebVaultVersion {
version: String,
}
#[derive(Deserialize)]
struct GitRelease {
tag_name: String,
@ -597,15 +585,15 @@ struct TimeApi {
}
async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
Ok(make_http_request(Method::GET, url)?.send().await?.error_for_status()?.json::<T>().await?)
let json_api = get_reqwest_client();
Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
}
async fn has_http_access() -> bool {
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") {
Ok(r) => r,
Err(_) => return false,
};
match req.send().await {
let http_access = get_reqwest_client();
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
Ok(r) => r.status().is_success(),
_ => false,
}
@ -615,7 +603,7 @@ use cached::proc_macro::cached;
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
#[cached(time = 300, sync_writes = true)]
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
if has_http_access {
(
@ -632,9 +620,9 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
}
_ => "-".to_string(),
},
// Do not fetch the web-vault version when running within a container.
// Do not fetch the web-vault version when running within Docker.
// The web-vault version is embedded within the container it self, and should not be updated manually
if running_within_container {
if running_within_docker {
"-".to_string()
} else {
match get_json_api::<GitRelease>(
@ -675,8 +663,20 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
use chrono::prelude::*;
use std::net::ToSocketAddrs;
// Get current running versions
let web_vault_version: WebVaultVersion =
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
Ok(s) => serde_json::from_str(&s)?,
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
Ok(s) => serde_json::from_str(&s)?,
_ => WebVaultVersion {
version: String::from("Version file missing"),
},
},
};
// Execute some environment checks
let running_within_container = is_running_in_container();
let running_within_docker = is_running_in_docker();
let has_http_access = has_http_access().await;
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|| env::var_os("http_proxy").is_some()
@ -690,12 +690,12 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
};
let (latest_release, latest_commit, latest_web_build) =
get_release_info(has_http_access, running_within_container).await;
get_release_info(has_http_access, running_within_docker).await;
let ip_header_name = &ip_header.0.unwrap_or_default();
// Get current running versions
let web_vault_version = get_web_vault_version();
let ip_header_name = match &ip_header.0 {
Some(h) => h,
_ => "",
};
let diagnostics_json = json!({
"dns_resolved": dns_resolved,
@ -703,13 +703,13 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"latest_release": latest_release,
"latest_commit": latest_commit,
"web_vault_enabled": &CONFIG.web_vault_enabled(),
"web_vault_version": web_vault_version,
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
"latest_web_build": latest_web_build,
"running_within_container": running_within_container,
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
"running_within_docker": running_within_docker,
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
"has_http_access": has_http_access,
"ip_header_exists": !ip_header_name.is_empty(),
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
"ip_header_exists": &ip_header.0.is_some(),
"ip_header_match": ip_header_name == CONFIG.ip_header(),
"ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy,
@ -717,8 +717,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"db_version": get_sql_server_version(&mut conn).await,
"admin_url": format!("{}/diagnostics", admin_url()),
"overrides": &CONFIG.get_overrides().join(", "),
"host_arch": env::consts::ARCH,
"host_os": env::consts::OS,
"host_arch": std::env::consts::ARCH,
"host_os": std::env::consts::OS,
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference
"ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference
@ -737,27 +737,18 @@ fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
#[post("/config", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner();
if let Err(e) = CONFIG.update_config(data) {
err!(format!("Unable to save config: {e:?}"))
}
Ok(())
CONFIG.update_config(data)
}
#[post("/config/delete")]
fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config() {
err!(format!("Unable to delete config: {e:?}"))
}
Ok(())
CONFIG.delete_user_config()
}
#[post("/config/backup_db")]
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult<String> {
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
if *CAN_BACKUP {
match backup_database(&mut conn).await {
Ok(f) => Ok(format!("Backup to '{f}' was successful")),
Err(e) => err!(format!("Backup was unsuccessful {e}")),
}
backup_database(&mut conn).await
} else {
err!("Can't back up current DB (Only SQLite supports this feature)");
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,15 @@
use chrono::{TimeDelta, Utc};
use chrono::{Duration, Utc};
use rocket::{serde::json::Json, Route};
use serde_json::Value;
use crate::{
api::{
core::{CipherSyncData, CipherSyncType},
EmptyResult, JsonResult,
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
},
auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool},
mail,
util::NumberOrString,
CONFIG,
mail, CONFIG,
};
pub fn routes() -> Vec<Route> {
@ -20,7 +18,6 @@ pub fn routes() -> Vec<Route> {
get_grantees,
get_emergency_access,
put_emergency_access,
post_emergency_access,
delete_emergency_access,
post_delete_emergency_access,
send_invite,
@ -40,66 +37,45 @@ pub fn routes() -> Vec<Route> {
// region get
#[get("/emergency-access/trusted")]
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
if !CONFIG.emergency_access_allowed() {
return Json(json!({
"data": [{
"id": "",
"status": 2,
"type": 0,
"waitTimeDays": 0,
"granteeId": "",
"email": "",
"name": "NOTE: Emergency Access is disabled!",
"object": "emergencyAccessGranteeDetails",
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
}],
"object": "list",
"continuationToken": null
}));
}
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list {
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
}
Json(json!({
"data": emergency_access_list_json,
"object": "list",
"continuationToken": null
}))
Ok(Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/emergency-access/granted")]
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
let emergency_access_list = if CONFIG.emergency_access_allowed() {
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
} else {
Vec::new()
};
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list {
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
}
Json(json!({
"data": emergency_access_list_json,
"object": "list",
"continuationToken": null
}))
Ok(Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/emergency-access/<emer_id>")]
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
None => err!("Emergency access not valid."),
}
}
@ -109,49 +85,42 @@ async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn)
// region put/post
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct EmergencyAccessUpdateData {
r#type: NumberOrString,
wait_time_days: i32,
key_encrypted: Option<String>,
Type: NumberOrString,
WaitTimeDays: i32,
KeyEncrypted: Option<String>,
}
#[put("/emergency-access/<emer_id>", data = "<data>")]
async fn put_emergency_access(
emer_id: &str,
data: Json<EmergencyAccessUpdateData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
post_emergency_access(emer_id, data, headers, conn).await
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
post_emergency_access(emer_id, data, conn).await
}
#[post("/emergency-access/<emer_id>", data = "<data>")]
async fn post_emergency_access(
emer_id: &str,
data: Json<EmergencyAccessUpdateData>,
headers: Headers,
data: JsonUpcase<EmergencyAccessUpdateData>,
mut conn: DbConn,
) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let data: EmergencyAccessUpdateData = data.into_inner();
let data: EmergencyAccessUpdateData = data.into_inner().data;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => emergency_access,
None => err!("Emergency access not valid."),
};
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
emergency_access.atype = new_type;
emergency_access.wait_time_days = data.wait_time_days;
if data.key_encrypted.is_some() {
emergency_access.key_encrypted = data.key_encrypted;
emergency_access.wait_time_days = data.WaitTimeDays;
if data.KeyEncrypted.is_some() {
emergency_access.key_encrypted = data.KeyEncrypted;
}
emergency_access.save(&mut conn).await?;
@ -164,23 +133,19 @@ async fn post_emergency_access(
#[delete("/emergency-access/<emer_id>")]
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let emergency_access = match (
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
) {
(Some(grantor_emer), None) => {
info!("Grantor deleted emergency access {emer_id}");
grantor_emer
let grantor_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => {
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
err!("Emergency access not valid.")
}
(None, Some(grantee_emer)) => {
info!("Grantee deleted emergency access {emer_id}");
grantee_emer
emer
}
_ => err!("Emergency access not valid."),
None => err!("Emergency access not valid."),
};
emergency_access.delete(&mut conn).await?;
Ok(())
}
@ -195,24 +160,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
// region invite
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct EmergencyAccessInviteData {
email: String,
r#type: NumberOrString,
wait_time_days: i32,
Email: String,
Type: NumberOrString,
WaitTimeDays: i32,
}
#[post("/emergency-access/invite", data = "<data>")]
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let data: EmergencyAccessInviteData = data.into_inner();
let email = data.email.to_lowercase();
let wait_time_days = data.wait_time_days;
let data: EmergencyAccessInviteData = data.into_inner().data;
let email = data.Email.to_lowercase();
let wait_time_days = data.WaitTimeDays;
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
@ -224,7 +189,7 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
err!("You can not set yourself as an emergency contact.")
}
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email))
@ -241,10 +206,9 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
let mut user = User::new(email.clone());
user.save(&mut conn).await?;
(user, true)
user
}
Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
Some(user) => user,
};
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -272,9 +236,15 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
&grantor_user.email,
)
.await?;
} else if !new_user {
// if mail is not enabled immediately accept the invitation for existing users
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else {
// Automatically mark user as accepted if no email invites
match User::find_by_mail(&email, &mut conn).await {
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
}
Ok(())
@ -282,14 +252,17 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
#[post("/emergency-access/<emer_id>/reinvite")]
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.grantor_uuid != headers.user.uuid {
err!("Emergency access not valid.");
}
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
err!("The grantee user is already accepted or confirmed to the organization");
}
@ -315,29 +288,34 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email,
)
.await?;
} else if !grantee_user.password_hash.is_empty() {
// accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
} else {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
}
Ok(())
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct AcceptData {
token: String,
Token: String,
}
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let data: AcceptData = data.into_inner();
let token = &data.token;
let data: AcceptData = data.into_inner().data;
let token = &data.Token;
let claims = decode_emergency_access_invite(token)?;
// This can happen if the user who received the invite used a different email to signup.
@ -354,10 +332,7 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
None => err!("Invited user not found"),
};
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
// The uuid of the grantee gets stored once accepted.
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -372,7 +347,10 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
&& grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email
{
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -384,27 +362,46 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
}
}
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct ConfirmData {
key: String,
Key: String,
}
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
async fn confirm_emergency_access(
emer_id: &str,
data: Json<ConfirmData>,
data: JsonUpcase<ConfirmData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let confirming_user = headers.user;
let data: ConfirmData = data.into_inner();
let key = data.key;
let data: ConfirmData = data.into_inner().data;
let key = data.Key;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -447,16 +444,17 @@ async fn confirm_emergency_access(
#[post("/emergency-access/<emer_id>/initiate")]
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let initiating_user = headers.user;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
{
err!("Emergency access not valid.")
}
@ -486,15 +484,16 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
#[post("/emergency-access/<emer_id>/approve")]
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|| emergency_access.grantor_uuid != headers.user.uuid
{
err!("Emergency access not valid.")
}
@ -523,20 +522,25 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
#[post("/emergency-access/<emer_id>/reject")]
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|| emergency_access.grantor_uuid != headers.user.uuid
{
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
Some(user) => user,
None => err!("Grantor user not found."),
};
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
Some(user) => user,
@ -547,7 +551,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
emergency_access.save(&mut conn).await?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
}
Ok(Json(emergency_access.to_json()))
} else {
@ -561,10 +565,9 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
#[post("/emergency-access/<emer_id>/view")]
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -591,19 +594,18 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
}
Ok(Json(json!({
"ciphers": ciphers_json,
"keyEncrypted": &emergency_access.key_encrypted,
"object": "emergencyAccessView",
"Ciphers": ciphers_json,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessView",
})))
}
#[post("/emergency-access/<emer_id>/takeover")]
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let requesting_user = headers.user;
let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -618,40 +620,39 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
};
let result = json!({
"kdf": grantor_user.client_kdf_type,
"kdfIterations": grantor_user.client_kdf_iter,
"kdfMemory": grantor_user.client_kdf_memory,
"kdfParallelism": grantor_user.client_kdf_parallelism,
"keyEncrypted": &emergency_access.key_encrypted,
"object": "emergencyAccessTakeover",
"Kdf": grantor_user.client_kdf_type,
"KdfIterations": grantor_user.client_kdf_iter,
"KdfMemory": grantor_user.client_kdf_memory,
"KdfParallelism": grantor_user.client_kdf_parallelism,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessTakeover",
});
Ok(Json(result))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct EmergencyAccessPasswordData {
new_master_password_hash: String,
key: String,
NewMasterPasswordHash: String,
Key: String,
}
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
async fn password_emergency_access(
emer_id: &str,
data: Json<EmergencyAccessPasswordData>,
data: JsonUpcase<EmergencyAccessPasswordData>,
headers: Headers,
mut conn: DbConn,
) -> EmptyResult {
check_emergency_access_enabled()?;
check_emergency_access_allowed()?;
let data: EmergencyAccessPasswordData = data.into_inner();
let new_master_password_hash = &data.new_master_password_hash;
let data: EmergencyAccessPasswordData = data.into_inner().data;
let new_master_password_hash = &data.NewMasterPasswordHash;
//let key = &data.Key;
let requesting_user = headers.user;
let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -666,7 +667,7 @@ async fn password_emergency_access(
};
// change grantor_user password
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
grantor_user.save(&mut conn).await?;
// Disable TwoFactor providers since they will otherwise block logins
@ -686,8 +687,7 @@ async fn password_emergency_access(
#[get("/emergency-access/<emer_id>/policies")]
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
let requesting_user = headers.user;
let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
@ -705,9 +705,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
"data": policies_json,
"object": "list",
"continuationToken": null
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
})))
}
@ -722,9 +722,9 @@ fn is_valid_request(
&& emergency_access.atype == requested_access_type as i32
}
fn check_emergency_access_enabled() -> EmptyResult {
fn check_emergency_access_allowed() -> EmptyResult {
if !CONFIG.emergency_access_allowed() {
err!("Emergency access is not enabled.")
err!("Emergency access is not allowed.")
}
Ok(())
}
@ -746,7 +746,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
for mut emer in emergency_access_list {
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
let recovery_allowed_at =
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
if recovery_allowed_at.le(&now) {
// Only update the access status
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
@ -802,10 +802,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
// Calculate the day before the recovery will become active
let final_recovery_reminder_at =
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
// Calculate if a day has passed since the previous notification, else no notification has been sent before
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
last_notification_at + TimeDelta::try_days(1).unwrap()
last_notification_at + Duration::days(1)
} else {
now
};

View File

@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
use serde_json::Value;
use crate::{
api::{EmptyResult, JsonResult},
api::{EmptyResult, JsonResult, JsonUpcaseVec},
auth::{AdminHeaders, Headers},
db::{
models::{Cipher, Event, UserOrganization},
@ -22,6 +22,7 @@ pub fn routes() -> Vec<Route> {
}
#[derive(FromForm)]
#[allow(non_snake_case)]
struct EventRange {
start: String,
end: String,
@ -52,9 +53,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
};
Ok(Json(json!({
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
})))
}
@ -84,9 +85,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
};
Ok(Json(json!({
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
})))
}
@ -118,13 +119,13 @@ async fn get_user_events(
};
Ok(Json(json!({
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
})))
}
fn get_continuation_token(events_json: &[Value]) -> Option<&str> {
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
// When the length of the vec equals the max page_size there probably is more data
// When it is less, then all events are loaded.
if events_json.len() as i64 == Event::PAGE_SIZE {
@ -144,33 +145,33 @@ pub fn main_routes() -> Vec<Route> {
routes![post_events_collect,]
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EventCollection {
// Mandatory
r#type: i32,
date: String,
Type: i32,
Date: String,
// Optional
cipher_id: Option<String>,
organization_id: Option<String>,
CipherId: Option<String>,
OrganizationId: Option<String>,
}
// Upstream:
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
#[post("/collect", format = "application/json", data = "<data>")]
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.org_events_enabled() {
return Ok(());
}
for event in data.iter() {
let event_date = parse_date(&event.date);
match event.r#type {
for event in data.iter().map(|d| &d.data) {
let event_date = parse_date(&event.Date);
match event.Type {
1000..=1099 => {
_log_user_event(
event.r#type,
event.Type,
&headers.user.uuid,
headers.device.atype,
Some(event_date),
@ -180,9 +181,9 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
.await;
}
1600..=1699 => {
if let Some(org_uuid) = &event.organization_id {
if let Some(org_uuid) = &event.OrganizationId {
_log_event(
event.r#type,
event.Type,
org_uuid,
org_uuid,
&headers.user.uuid,
@ -195,11 +196,11 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
}
}
_ => {
if let Some(cipher_uuid) = &event.cipher_id {
if let Some(cipher_uuid) = &event.CipherId {
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
if let Some(org_uuid) = cipher.organization_uuid {
_log_event(
event.r#type,
event.Type,
cipher_uuid,
&org_uuid,
&headers.user.uuid,
@ -262,7 +263,7 @@ pub async fn log_event(
event_type: i32,
source_uuid: &str,
org_uuid: &str,
act_user_uuid: &str,
act_user_uuid: String,
device_type: i32,
ip: &IpAddr,
conn: &mut DbConn,
@ -270,7 +271,7 @@ pub async fn log_event(
if !CONFIG.org_events_enabled() {
return;
}
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
}
#[allow(clippy::too_many_arguments)]
@ -288,7 +289,7 @@ async fn _log_event(
let mut event = Event::new(event_type, event_date);
match event_type {
// 1000..=1099 Are user events, they need to be logged via log_user_event()
// Cipher Events
// Collection Events
1100..=1199 => {
event.cipher_uuid = Some(String::from(source_uuid));
}

View File

@ -2,7 +2,7 @@ use rocket::serde::json::Json;
use serde_json::Value;
use crate::{
api::{EmptyResult, JsonResult, Notify, UpdateType},
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
auth::Headers,
db::{models::*, DbConn},
};
@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Json(json!({
"data": folders_json,
"object": "list",
"continuationToken": null,
"Data": folders_json,
"Object": "list",
"ContinuationToken": null,
}))
}
@ -38,17 +38,16 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct FolderData {
pub name: String,
pub id: Option<String>,
pub Name: String,
}
#[post("/folders", data = "<data>")]
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let data: FolderData = data.into_inner();
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let data: FolderData = data.into_inner().data;
let mut folder = Folder::new(headers.user.uuid, data.name);
let mut folder = Folder::new(headers.user.uuid, data.Name);
folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
@ -57,19 +56,25 @@ async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn
}
#[post("/folders/<uuid>", data = "<data>")]
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
async fn post_folder(
uuid: &str,
data: JsonUpcase<FolderData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
put_folder(uuid, data, headers, conn, nt).await
}
#[put("/folders/<uuid>", data = "<data>")]
async fn put_folder(
uuid: &str,
data: Json<FolderData>,
data: JsonUpcase<FolderData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: FolderData = data.into_inner();
let data: FolderData = data.into_inner().data;
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
Some(folder) => folder,
@ -80,7 +85,7 @@ async fn put_folder(
err!("Folder belongs to another user")
}
folder.name = data.name;
folder.name = data.Name;
folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;

View File

@ -12,8 +12,8 @@ pub use accounts::purge_auth_requests;
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
pub use events::{event_cleanup_job, log_event, log_user_event};
use reqwest::Method;
pub use sends::purge_sends;
pub use two_factor::send_incomplete_2fa_notifications;
pub fn routes() -> Vec<Route> {
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
@ -47,23 +47,23 @@ pub fn events_routes() -> Vec<Route> {
//
// Move this somewhere else
//
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
use rocket::{serde::json::Json, Catcher, Route};
use serde_json::Value;
use crate::{
api::{JsonResult, Notify, UpdateType},
api::{JsonResult, JsonUpcase, Notify, UpdateType},
auth::Headers,
db::DbConn,
error::Error,
http_client::make_http_request,
util::parse_experimental_client_feature_flags,
util::get_reqwest_client,
};
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Serialize, Deserialize, Debug)]
#[allow(non_snake_case)]
struct GlobalDomain {
r#type: i32,
domains: Vec<String>,
excluded: bool,
Type: i32,
Domains: Vec<String>,
Excluded: bool,
}
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
@ -83,38 +83,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
for global in &mut globals {
global.excluded = excluded_globals.contains(&global.r#type);
global.Excluded = excluded_globals.contains(&global.Type);
}
if no_excluded {
globals.retain(|g| !g.excluded);
globals.retain(|g| !g.Excluded);
}
Json(json!({
"equivalentDomains": equivalent_domains,
"globalEquivalentDomains": globals,
"object": "domains",
"EquivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals,
"Object": "domains",
}))
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EquivDomainData {
excluded_global_equivalent_domains: Option<Vec<i32>>,
equivalent_domains: Option<Vec<Vec<String>>>,
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
EquivalentDomains: Option<Vec<Vec<String>>>,
}
#[post("/settings/domains", data = "<data>")]
async fn post_eq_domains(
data: Json<EquivDomainData>,
data: JsonUpcase<EquivDomainData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: EquivDomainData = data.into_inner();
let data: EquivDomainData = data.into_inner().data;
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
let mut user = headers.user;
use serde_json::to_string;
@ -130,7 +130,12 @@ async fn post_eq_domains(
}
#[put("/settings/domains", data = "<data>")]
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
async fn put_eq_domains(
data: JsonUpcase<EquivDomainData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
post_eq_domains(data, headers, conn, nt).await
}
@ -141,7 +146,9 @@ async fn hibp_breach(username: &str) -> JsonResult {
);
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
let hibp_client = get_reqwest_client();
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
// If we get a 404, return a 404, it means no breached accounts
if res.status() == 404 {
@ -152,15 +159,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
Ok(Json(value))
} else {
Ok(Json(json!([{
"name": "HaveIBeenPwned",
"title": "Manual HIBP Check",
"domain": "haveibeenpwned.com",
"breachDate": "2019-08-18T00:00:00Z",
"addedDate": "2019-08-18T00:00:00Z",
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
"logoPath": "vw_static/hibp.png",
"pwnCount": 0,
"dataClasses": [
"Name": "HaveIBeenPwned",
"Title": "Manual HIBP Check",
"Domain": "haveibeenpwned.com",
"BreachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
"LogoPath": "vw_static/hibp.png",
"PwnCount": 0,
"DataClasses": [
"Error - No API key set!"
]
}])))
@ -186,26 +193,18 @@ fn version() -> Json<&'static str> {
#[get("/config")]
fn config() -> Json<Value> {
let domain = crate::CONFIG.domain();
let mut feature_states =
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
// Force the new key rotation feature
feature_states.insert("key-rotation-improvements".to_string(), true);
feature_states.insert("flexible-collections-v-1".to_string(), false);
Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns
// This means they expect a version that closely matches the Bitwarden server version
// We should make sure that we keep this updated when we support the new server features
// Version history:
// - Individual cipher key encryption: 2023.9.1
"version": "2024.2.0",
"version": "2023.9.1",
"gitHash": option_env!("GIT_REV"),
"server": {
"name": "Vaultwarden",
"url": "https://github.com/dani-garcia/vaultwarden"
},
"settings": {
"disableUserRegistration": !crate::CONFIG.signups_allowed() && crate::CONFIG.signups_domains_whitelist().is_empty(),
"url": "https://github.com/dani-garcia/vaultwarden",
"version": crate::VERSION
},
"environment": {
"vault": domain,
@ -214,7 +213,13 @@ fn config() -> Json<Value> {
"notifications": format!("{domain}/notifications"),
"sso": "",
},
"featureStates": feature_states,
"featureStates": {
// Any feature flags that we want the clients to use
// Can check the enabled ones at:
// https://vault.bitwarden.com/api/config
"fido2-vault-credentials": true, // Passkey support
"autofill-v2": false, // Disabled because it is causing issues https://github.com/dani-garcia/vaultwarden/discussions/4052
},
"object": "config",
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,13 @@
use chrono::Utc;
use rocket::{
request::{FromRequest, Outcome},
serde::json::Json,
request::{self, FromRequest, Outcome},
Request, Route,
};
use std::collections::HashSet;
use crate::{
api::EmptyResult,
api::{EmptyResult, JsonUpcase},
auth,
db::{models::*, DbConn},
mail, CONFIG,
@ -19,43 +18,43 @@ pub fn routes() -> Vec<Route> {
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct OrgImportGroupData {
name: String,
external_id: String,
member_external_ids: Vec<String>,
Name: String,
ExternalId: String,
MemberExternalIds: Vec<String>,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct OrgImportUserData {
email: String,
external_id: String,
deleted: bool,
Email: String,
ExternalId: String,
Deleted: bool,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct OrgImportData {
groups: Vec<OrgImportGroupData>,
members: Vec<OrgImportUserData>,
overwrite_existing: bool,
// largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
Groups: Vec<OrgImportGroupData>,
Members: Vec<OrgImportUserData>,
OverwriteExisting: bool,
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
}
#[post("/public/organization/import", data = "<data>")]
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
// Most of the logic for this function can be found here
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
let org_id = token.0;
let data = data.into_inner();
let data = data.into_inner().data;
for user_data in &data.members {
if user_data.deleted {
for user_data in &data.Members {
if user_data.Deleted {
// If user is marked for deletion and it exists, revoke it
if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
{
// Only revoke a user if it is not the last confirmed owner
let revoked = if user_org.atype == UserOrgType::Owner
@ -73,27 +72,27 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
user_org.revoke()
};
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
if revoked || ext_modified {
user_org.save(&mut conn).await?;
}
}
// If user is part of the organization, restore it
} else if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
{
let restored = user_org.restore();
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
if restored || ext_modified {
user_org.save(&mut conn).await?;
}
} else {
// If user is not part of the organization
let user = match User::find_by_mail(&user_data.email, &mut conn).await {
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
Some(user) => user, // exists in vaultwarden
None => {
// User does not exist yet
let mut new_user = User::new(user_data.email.clone());
let mut new_user = User::new(user_data.Email.clone());
new_user.save(&mut conn).await?;
if !CONFIG.mail_enabled() {
@ -110,7 +109,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
new_org_user.set_external_id(Some(user_data.external_id.clone()));
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status;
@ -123,24 +122,26 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
None => err!("Error looking up organization"),
};
mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email))
mail::send_invite(
&user_data.Email,
&user.uuid,
Some(org_id.clone()),
Some(new_org_user.uuid),
&org_name,
Some(org_email),
)
.await?;
}
}
}
if CONFIG.org_groups_enabled() {
for group_data in &data.groups {
let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await
{
for group_data in &data.Groups {
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
Some(group) => group.uuid,
None => {
let mut group = Group::new(
org_id.clone(),
group_data.name.clone(),
false,
Some(group_data.external_id.clone()),
);
let mut group =
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
group.save(&mut conn).await?;
group.uuid
}
@ -148,7 +149,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
for ext_id in &group_data.member_external_ids {
for ext_id in &group_data.MemberExternalIds {
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
{
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
@ -161,9 +162,9 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
}
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
if data.overwrite_existing {
if data.OverwriteExisting {
// Generate a HashSet to quickly verify if a member is listed or not.
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
if let Some(ref user_external_id) = user_org.external_id {
if !sync_members.contains(user_external_id) {
@ -192,7 +193,7 @@ pub struct PublicToken(String);
impl<'r> FromRequest<'r> for PublicToken {
type Error = &'static str;
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
let headers = request.headers();
// Get access_token
let access_token: &str = match headers.get_one("Authorization") {
@ -208,15 +209,19 @@ impl<'r> FromRequest<'r> for PublicToken {
Err(_) => err_handler!("Invalid claim"),
};
// Check if time is between claims.nbf and claims.exp
let time_now = Utc::now().timestamp();
let time_now = Utc::now().naive_utc().timestamp();
if time_now < claims.nbf {
err_handler!("Token issued in the future");
}
if time_now > claims.exp {
err_handler!("Token expired");
}
// Check if claims.iss is domain|claims.scope[0]
let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
// Check if claims.iss is host|claims.scope[0]
let host = match auth::Host::from_request(request).await {
Outcome::Success(host) => host,
_ => err_handler!("Error getting Host"),
};
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
if complete_host != claims.iss {
err_handler!("Token not issued by this server");
}

View File

@ -1,7 +1,6 @@
use std::path::Path;
use chrono::{DateTime, TimeDelta, Utc};
use num_traits::ToPrimitive;
use chrono::{DateTime, Duration, Utc};
use rocket::form::Form;
use rocket::fs::NamedFile;
use rocket::fs::TempFile;
@ -9,17 +8,17 @@ use rocket::serde::json::Json;
use serde_json::Value;
use crate::{
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
auth::{ClientIp, Headers, Host},
db::{models::*, DbConn, DbPool},
util::{NumberOrString, SafeString},
util::SafeString,
CONFIG,
};
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
const SIZE_525_MB: i64 = 550_502_400;
const SIZE_525_MB: u64 = 550_502_400;
pub fn routes() -> Vec<rocket::Route> {
routes![
@ -48,26 +47,23 @@ pub async fn purge_sends(pool: DbPool) {
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SendData {
r#type: i32,
key: String,
password: Option<String>,
max_access_count: Option<NumberOrString>,
expiration_date: Option<DateTime<Utc>>,
deletion_date: DateTime<Utc>,
disabled: bool,
hide_email: Option<bool>,
#[allow(non_snake_case)]
struct SendData {
Type: i32,
Key: String,
Password: Option<String>,
MaxAccessCount: Option<NumberOrString>,
ExpirationDate: Option<DateTime<Utc>>,
DeletionDate: DateTime<Utc>,
Disabled: bool,
HideEmail: Option<bool>,
// Data field
name: String,
notes: Option<String>,
text: Option<Value>,
file: Option<Value>,
file_length: Option<NumberOrString>,
// Used for key rotations
pub id: Option<String>,
Name: String,
Notes: Option<String>,
Text: Option<Value>,
File: Option<Value>,
FileLength: Option<NumberOrString>,
}
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -96,7 +92,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
/// Ref: https://bitwarden.com/help/article/policies/#send-options
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid;
let hide_email = data.hide_email.unwrap_or(false);
let hide_email = data.HideEmail.unwrap_or(false);
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
err!(
"Due to an Enterprise Policy, you are not allowed to hide your email address \
@ -107,40 +103,40 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
}
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
let data_val = if data.r#type == SendType::Text as i32 {
data.text
} else if data.r#type == SendType::File as i32 {
data.file
let data_val = if data.Type == SendType::Text as i32 {
data.Text
} else if data.Type == SendType::File as i32 {
data.File
} else {
err!("Invalid Send type")
};
let data_str = if let Some(mut d) = data_val {
d.as_object_mut().and_then(|o| o.remove("response"));
d.as_object_mut().and_then(|o| o.remove("Response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
};
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
if data.DeletionDate > Utc::now() + Duration::days(31) {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
send.user_uuid = Some(user_uuid);
send.notes = data.notes;
send.max_access_count = match data.max_access_count {
send.notes = data.Notes;
send.max_access_count = match data.MaxAccessCount {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.disabled = data.disabled;
send.hide_email = data.hide_email;
send.atype = data.r#type;
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.disabled = data.Disabled;
send.hide_email = data.HideEmail;
send.atype = data.Type;
send.set_password(data.password.as_deref());
send.set_password(data.Password.as_deref());
Ok(send)
}
@ -151,9 +147,9 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
Json(json!({
"data": sends_json,
"object": "list",
"continuationToken": null
"Data": sends_json,
"Object": "list",
"ContinuationToken": null
}))
}
@ -172,13 +168,13 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult
}
#[post("/sends", data = "<data>")]
async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner();
let data: SendData = data.into_inner().data;
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
if data.r#type == SendType::File as i32 {
if data.Type == SendType::File as i32 {
err!("File sends should use /api/sends/file")
}
@ -198,7 +194,7 @@ async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt:
#[derive(FromForm)]
struct UploadData<'f> {
model: Json<SendData>,
model: Json<crate::util::UpCase<SendData>>,
data: TempFile<'f>,
}
@ -218,43 +214,32 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
model,
mut data,
} = data.into_inner();
let model = model.into_inner();
let Some(size) = data.len().to_i64() else {
err!("Invalid send size");
};
if size < 0 {
err!("Send size can't be negative")
}
let model = model.into_inner().data;
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
let size_limit = match CONFIG.user_send_limit() {
let size_limit = match CONFIG.user_attachment_limit() {
Some(0) => err!("File uploads are disabled"),
Some(limit_kb) => {
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
err!("Existing sends overflow")
};
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
err!("Send size overflow");
};
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
if left <= 0 {
err!("Send storage limit reached! Delete some sends to free up space")
err!("Attachment storage limit reached! Delete some attachments to free up space")
}
i64::clamp(left, 0, SIZE_525_MB)
std::cmp::Ord::max(left as u64, SIZE_525_MB)
}
None => SIZE_525_MB,
};
if size > size_limit {
err!("Send storage limit exceeded with this file");
}
let mut send = create_send(model, headers.user.uuid)?;
if send.atype != SendType::File as i32 {
err!("Send content is not a file");
}
let size = data.len();
if size > size_limit {
err!("Attachment storage limit exceeded with this file");
}
let file_id = crate::crypto::generate_send_id();
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let file_path = folder_path.join(&file_id);
@ -266,9 +251,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("id"), Value::String(file_id));
o.insert(String::from("size"), Value::Number(size.into()));
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
o.insert(String::from("Id"), Value::String(file_id));
o.insert(String::from("Size"), Value::Number(size.into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
}
send.data = serde_json::to_string(&data_value)?;
@ -288,44 +273,36 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
#[post("/sends/file/v2", data = "<data>")]
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data = data.into_inner();
let data = data.into_inner().data;
if data.r#type != SendType::File as i32 {
if data.Type != SendType::File as i32 {
err!("Send content is not a file");
}
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let file_length = match &data.file_length {
Some(m) => m.into_i64()?,
_ => err!("Invalid send length"),
let file_length = match &data.FileLength {
Some(m) => Some(m.into_i32()?),
_ => None,
};
if file_length < 0 {
err!("Send size can't be negative")
}
let size_limit = match CONFIG.user_send_limit() {
let size_limit = match CONFIG.user_attachment_limit() {
Some(0) => err!("File uploads are disabled"),
Some(limit_kb) => {
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
err!("Existing sends overflow")
};
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
err!("Send size overflow");
};
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
if left <= 0 {
err!("Send storage limit reached! Delete some sends to free up space")
err!("Attachment storage limit reached! Delete some attachments to free up space")
}
i64::clamp(left, 0, SIZE_525_MB)
std::cmp::Ord::max(left as u64, SIZE_525_MB)
}
None => SIZE_525_MB,
};
if file_length > size_limit {
err!("Send storage limit exceeded with this file");
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
err!("Attachment storage limit exceeded with this file");
}
let mut send = create_send(data, headers.user.uuid)?;
@ -334,9 +311,9 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("id"), Value::String(file_id.clone()));
o.insert(String::from("size"), Value::Number(file_length.into()));
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
o.insert(String::from("Id"), Value::String(file_id.clone()));
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
}
send.data = serde_json::to_string(&data_value)?;
send.save(&mut conn).await?;
@ -349,15 +326,7 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct SendFileData {
id: String,
size: u64,
fileName: String,
}
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
async fn post_send_file_v2_data(
send_uuid: &str,
@ -375,55 +344,15 @@ async fn post_send_file_v2_data(
err!("Send not found. Unable to save the file.")
};
if send.atype != SendType::File as i32 {
err!("Send is not a file type send.");
}
let Some(send_user_id) = &send.user_uuid else {
err!("Sends are only supported for users at the moment.")
err!("Sends are only supported for users at the moment")
};
if send_user_id != &headers.user.uuid {
err!("Send doesn't belong to user.");
}
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
err!("Unable to decode send data as json.")
};
match data.data.raw_name() {
Some(raw_file_name) if raw_file_name.dangerous_unsafe_unsanitized_raw() == send_data.fileName => (),
Some(raw_file_name) => err!(
"Send file name does not match.",
format!(
"Expected file name '{}' got '{}'",
send_data.fileName,
raw_file_name.dangerous_unsafe_unsanitized_raw()
)
),
_ => err!("Send file name does not match or is not provided."),
}
if file_id != send_data.id {
err!("Send file does not match send data.", format!("Expected id {} got {file_id}", send_data.id));
}
let Some(size) = data.data.len().to_u64() else {
err!("Send file size overflow.");
};
if size != send_data.size {
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
err!("Send doesn't belong to user");
}
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
let file_path = folder_path.join(file_id);
// Check if the file already exists, if that is the case do not overwrite it
if tokio::fs::metadata(&file_path).await.is_ok() {
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
}
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.data.persist_to(&file_path).await {
@ -443,15 +372,15 @@ async fn post_send_file_v2_data(
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct SendAccessData {
pub password: Option<String>,
pub Password: Option<String>,
}
#[post("/sends/access/<access_id>", data = "<data>")]
async fn post_access(
access_id: &str,
data: Json<SendAccessData>,
data: JsonUpcase<SendAccessData>,
mut conn: DbConn,
ip: ClientIp,
nt: Notify<'_>,
@ -482,7 +411,7 @@ async fn post_access(
}
if send.password_hash.is_some() {
match data.into_inner().password {
match data.into_inner().data.Password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
@ -512,7 +441,7 @@ async fn post_access(
async fn post_access_file(
send_id: &str,
file_id: &str,
data: Json<SendAccessData>,
data: JsonUpcase<SendAccessData>,
host: Host,
mut conn: DbConn,
nt: Notify<'_>,
@ -543,7 +472,7 @@ async fn post_access_file(
}
if send.password_hash.is_some() {
match data.into_inner().password {
match data.into_inner().data.Password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password."),
None => err_code!("Password not provided", 401),
@ -566,9 +495,9 @@ async fn post_access_file(
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({
"object": "send-fileDownload",
"id": file_id,
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
"Object": "send-fileDownload",
"Id": file_id,
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
})))
}
@ -583,10 +512,16 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
}
#[put("/sends/<id>", data = "<data>")]
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
async fn put_send(
id: &str,
data: JsonUpcase<SendData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner();
let data: SendData = data.into_inner().data;
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let mut send = match Send::find_by_uuid(id, &mut conn).await {
@ -594,38 +529,19 @@ async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: Db
None => err!("Send not found"),
};
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
Ok(Json(send.to_json()))
}
pub async fn update_send_from_data(
send: &mut Send,
data: SendData,
headers: &Headers,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
) -> EmptyResult {
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
if send.atype != data.r#type {
if send.atype != data.Type {
err!("Sends can't change type")
}
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
// When updating a file Send, we receive nulls in the File field, as it's immutable,
// so we only need to update the data field in the Text case
if data.r#type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.text {
d.as_object_mut().and_then(|d| d.remove("response"));
if data.Type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.Text {
d.as_object_mut().and_then(|d| d.remove("Response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
@ -633,28 +549,39 @@ pub async fn update_send_from_data(
send.data = data_str;
}
send.name = data.name;
send.akey = data.key;
send.deletion_date = data.deletion_date.naive_utc();
send.notes = data.notes;
send.max_access_count = match data.max_access_count {
if data.DeletionDate > Utc::now() + Duration::days(31) {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
send.name = data.Name;
send.akey = data.Key;
send.deletion_date = data.DeletionDate.naive_utc();
send.notes = data.Notes;
send.max_access_count = match data.MaxAccessCount {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.hide_email = data.hide_email;
send.disabled = data.disabled;
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.hide_email = data.HideEmail;
send.disabled = data.Disabled;
// Only change the value if it's present
if let Some(password) = data.password {
if let Some(password) = data.Password {
send.set_password(Some(&password));
}
send.save(conn).await?;
if ut != UpdateType::None {
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
}
Ok(())
send.save(&mut conn).await?;
nt.send_send_update(
UpdateType::SyncSendUpdate,
&send,
&send.update_users_revision(&mut conn).await,
&headers.device.uuid,
&mut conn,
)
.await;
Ok(Json(send.to_json()))
}
#[delete("/sends/<id>")]

View File

@ -3,14 +3,16 @@ use rocket::serde::json::Json;
use rocket::Route;
use crate::{
api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
api::{
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
NumberOrString, PasswordOrOtpData,
},
auth::{ClientIp, Headers},
crypto,
db::{
models::{EventType, TwoFactor, TwoFactorType},
DbConn,
},
util::NumberOrString,
};
pub use crate::config::CONFIG;
@ -20,8 +22,8 @@ pub fn routes() -> Vec<Route> {
}
#[post("/two-factor/get-authenticator", data = "<data>")]
async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -35,32 +37,36 @@ async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers,
};
Ok(Json(json!({
"enabled": enabled,
"key": key,
"object": "twoFactorAuthenticator"
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableAuthenticatorData {
key: String,
token: NumberOrString,
master_password_hash: Option<String>,
otp: Option<String>,
Key: String,
Token: NumberOrString,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
#[post("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner();
let key = data.key;
let token = data.token.into_string();
async fn activate_authenticator(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let key = data.Key;
let token = data.Token.into_string();
let mut user = headers.user;
PasswordOrOtpData {
master_password_hash: data.master_password_hash,
otp: data.otp,
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -83,14 +89,18 @@ async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: He
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"enabled": true,
"key": key,
"object": "twoFactorAuthenticator"
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_authenticator_put(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
activate_authenticator(data, headers, conn).await
}
@ -145,8 +155,8 @@ pub async fn validate_totp_code(
let time = (current_timestamp + step * 30i64) as u64;
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
// Check the given code equals the generated and if the time_step is larger then the one last used.
if generated == totp_code && time_step > twofactor.last_used {
// Check the the given code equals the generated and if the time_step is larger then the one last used.
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
// If the step does not equals 0 the time is drifted either server or client side.
if step != 0 {
warn!("TOTP Time drift detected. The step offset is {}", step);
@ -154,10 +164,10 @@ pub async fn validate_totp_code(
// Save the last used time step so only totp time steps higher then this one are allowed.
// This will also save a newly created twofactor if the code is correct.
twofactor.last_used = time_step;
twofactor.last_used = time_step as i32;
twofactor.save(conn).await?;
return Ok(());
} else if generated == totp_code && time_step <= twofactor.last_used {
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
err!(
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),

View File

@ -5,7 +5,7 @@ use rocket::Route;
use crate::{
api::{
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
PasswordOrOtpData,
},
auth::Headers,
@ -15,7 +15,7 @@ use crate::{
DbConn,
},
error::MapResult,
http_client::make_http_request,
util::get_reqwest_client,
CONFIG,
};
@ -92,8 +92,8 @@ impl DuoStatus {
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
#[post("/two-factor/get-duo", data = "<data>")]
async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -109,16 +109,16 @@ async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbCo
let json = if let Some(data) = data {
json!({
"enabled": enabled,
"host": data.host,
"secretKey": data.sk,
"integrationKey": data.ik,
"object": "twoFactorDuo"
"Enabled": enabled,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})
} else {
json!({
"enabled": enabled,
"object": "twoFactorDuo"
"Enabled": enabled,
"Object": "twoFactorDuo"
})
};
@ -126,21 +126,21 @@ async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbCo
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case, dead_code)]
struct EnableDuoData {
host: String,
secret_key: String,
integration_key: String,
master_password_hash: Option<String>,
otp: Option<String>,
Host: String,
SecretKey: String,
IntegrationKey: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
impl From<EnableDuoData> for DuoData {
fn from(d: EnableDuoData) -> Self {
Self {
host: d.host,
ik: d.integration_key,
sk: d.secret_key,
host: d.Host,
ik: d.IntegrationKey,
sk: d.SecretKey,
}
}
}
@ -151,17 +151,17 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
}
!empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
}
#[post("/two-factor/duo", data = "<data>")]
async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner();
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner().data;
let mut user = headers.user;
PasswordOrOtpData {
master_password_hash: data.master_password_hash.clone(),
otp: data.otp.clone(),
MasterPasswordHash: data.MasterPasswordHash.clone(),
Otp: data.Otp.clone(),
}
.validate(&user, true, &mut conn)
.await?;
@ -184,16 +184,16 @@ async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbC
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"enabled": true,
"host": data.host,
"secretKey": data.sk,
"integrationKey": data.ik,
"object": "twoFactorDuo"
"Enabled": true,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
})))
}
#[put("/two-factor/duo", data = "<data>")]
async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_duo(data, headers, conn).await
}
@ -210,7 +210,10 @@ async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData)
let m = Method::from_str(method).unwrap_or_default();
make_http_request(m, &url)?
let client = get_reqwest_client();
client
.request(m, &url)
.basic_auth(username, Some(password))
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
.header(header::DATE, date)
@ -252,7 +255,7 @@ async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
}
// let (ik, sk, ak, host) = get_duo_keys();
pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
let data = match User::find_by_mail(email, conn).await {
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
_ => DuoData::global(),
@ -281,6 +284,10 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
}
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
// email is as entered by the user, so it needs to be normalized before
// comparison with auth_user below.
let email = &email.to_lowercase();
let split: Vec<&str> = response.split(':').collect();
if split.len() != 2 {
err!(

View File

@ -1,498 +0,0 @@
use chrono::Utc;
use data_encoding::HEXLOWER;
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation};
use reqwest::{header, StatusCode};
use ring::digest::{digest, Digest, SHA512_256};
use serde::Serialize;
use std::collections::HashMap;
use crate::{
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
crypto,
db::{
models::{EventType, TwoFactorDuoContext},
DbConn, DbPool,
},
error::Error,
http_client::make_http_request,
CONFIG,
};
use url::Url;
// The location on this service that Duo should redirect users to. For us, this is a bridge
// built in to the Bitwarden clients.
// See: https://github.com/bitwarden/clients/blob/main/apps/web/src/connectors/duo-redirect.ts
const DUO_REDIRECT_LOCATION: &str = "duo-redirect-connector.html";
// Number of seconds that a JWT we generate for Duo should be valid for.
const JWT_VALIDITY_SECS: i64 = 300;
// Number of seconds that a Duo context stored in the database should be valid for.
const CTX_VALIDITY_SECS: i64 = 300;
// Expected algorithm used by Duo to sign JWTs.
const DUO_RESP_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
// Signature algorithm we're using to sign JWTs for Duo. Must be either HS512 or HS256.
const JWT_SIGNATURE_ALG: Algorithm = Algorithm::HS512;
// Size of random strings for state and nonce. Must be at least 16 characters and at most 1024 characters.
// If increasing this above 64, also increase the size of the twofactor_duo_ctx.state and
// twofactor_duo_ctx.nonce database columns for postgres and mariadb.
const STATE_LENGTH: usize = 64;
// client_assertion payload for health checks and obtaining MFA results.
#[derive(Debug, Serialize, Deserialize)]
struct ClientAssertion {
pub iss: String,
pub sub: String,
pub aud: String,
pub exp: i64,
pub jti: String,
pub iat: i64,
}
// authorization request payload sent with clients to Duo for MFA
#[derive(Debug, Serialize, Deserialize)]
struct AuthorizationRequest {
pub response_type: String,
pub scope: String,
pub exp: i64,
pub client_id: String,
pub redirect_uri: String,
pub state: String,
pub duo_uname: String,
pub iss: String,
pub aud: String,
pub nonce: String,
}
// Duo service health check responses
#[derive(Debug, Serialize, Deserialize)]
#[serde(untagged)]
enum HealthCheckResponse {
HealthOK {
stat: String,
},
HealthFail {
message: String,
message_detail: String,
},
}
// Outer structure of response when exchanging authz code for MFA results
#[derive(Debug, Serialize, Deserialize)]
struct IdTokenResponse {
id_token: String, // IdTokenClaims
access_token: String,
expires_in: i64,
token_type: String,
}
// Inner structure of IdTokenResponse.id_token
#[derive(Debug, Serialize, Deserialize)]
struct IdTokenClaims {
preferred_username: String,
nonce: String,
}
// Duo OIDC Authorization Client
// See https://duo.com/docs/oauthapi
struct DuoClient {
client_id: String, // Duo Client ID (DuoData.ik)
client_secret: String, // Duo Client Secret (DuoData.sk)
api_host: String, // Duo API hostname (DuoData.host)
redirect_uri: String, // URL in this application clients should call for MFA verification
}
impl DuoClient {
// Construct a new DuoClient
fn new(client_id: String, client_secret: String, api_host: String, redirect_uri: String) -> DuoClient {
DuoClient {
client_id,
client_secret,
api_host,
redirect_uri,
}
}
// Generate a client assertion for health checks and authorization code exchange.
fn new_client_assertion(&self, url: &str) -> ClientAssertion {
let now = Utc::now().timestamp();
let jwt_id = crypto::get_random_string_alphanum(STATE_LENGTH);
ClientAssertion {
iss: self.client_id.clone(),
sub: self.client_id.clone(),
aud: url.to_string(),
exp: now + JWT_VALIDITY_SECS,
jti: jwt_id,
iat: now,
}
}
// Given a serde-serializable struct, attempt to encode it as a JWT
fn encode_duo_jwt<T: Serialize>(&self, jwt_payload: T) -> Result<String, Error> {
match jsonwebtoken::encode(
&Header::new(JWT_SIGNATURE_ALG),
&jwt_payload,
&EncodingKey::from_secret(self.client_secret.as_bytes()),
) {
Ok(token) => Ok(token),
Err(e) => err!(format!("Error encoding Duo JWT: {e:?}")),
}
}
// "required" health check to verify the integration is configured and Duo's services
// are up.
// https://duo.com/docs/oauthapi#health-check
async fn health_check(&self) -> Result<(), Error> {
let health_check_url: String = format!("https://{}/oauth/v1/health_check", self.api_host);
let jwt_payload = self.new_client_assertion(&health_check_url);
let token = match self.encode_duo_jwt(jwt_payload) {
Ok(token) => token,
Err(e) => return Err(e),
};
let mut post_body = HashMap::new();
post_body.insert("client_assertion", token);
post_body.insert("client_id", self.client_id.clone());
let res = match make_http_request(reqwest::Method::POST, &health_check_url)?
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
.form(&post_body)
.send()
.await
{
Ok(r) => r,
Err(e) => err!(format!("Error requesting Duo health check: {e:?}")),
};
let response: HealthCheckResponse = match res.json::<HealthCheckResponse>().await {
Ok(r) => r,
Err(e) => err!(format!("Duo health check response decode error: {e:?}")),
};
let health_stat: String = match response {
HealthCheckResponse::HealthOK {
stat,
} => stat,
HealthCheckResponse::HealthFail {
message,
message_detail,
} => err!(format!("Duo health check FAIL response, msg: {}, detail: {}", message, message_detail)),
};
if health_stat != "OK" {
err!(format!("Duo health check failed, got OK-like body with stat {health_stat}"));
}
Ok(())
}
// Constructs the URL for the authorization request endpoint on Duo's service.
// Clients are sent here to continue authentication.
// https://duo.com/docs/oauthapi#authorization-request
fn make_authz_req_url(&self, duo_username: &str, state: String, nonce: String) -> Result<String, Error> {
let now = Utc::now().timestamp();
let jwt_payload = AuthorizationRequest {
response_type: String::from("code"),
scope: String::from("openid"),
exp: now + JWT_VALIDITY_SECS,
client_id: self.client_id.clone(),
redirect_uri: self.redirect_uri.clone(),
state,
duo_uname: String::from(duo_username),
iss: self.client_id.clone(),
aud: format!("https://{}", self.api_host),
nonce,
};
let token = match self.encode_duo_jwt(jwt_payload) {
Ok(token) => token,
Err(e) => return Err(e),
};
let authz_endpoint = format!("https://{}/oauth/v1/authorize", self.api_host);
let mut auth_url = match Url::parse(authz_endpoint.as_str()) {
Ok(url) => url,
Err(e) => err!(format!("Error parsing Duo authorization URL: {e:?}")),
};
{
let mut query_params = auth_url.query_pairs_mut();
query_params.append_pair("response_type", "code");
query_params.append_pair("client_id", self.client_id.as_str());
query_params.append_pair("request", token.as_str());
}
let final_auth_url = auth_url.to_string();
Ok(final_auth_url)
}
// Exchange the authorization code obtained from an access token provided by the user
// for the result of the MFA and validate.
// See: https://duo.com/docs/oauthapi#access-token (under Response Format)
async fn exchange_authz_code_for_result(
&self,
duo_code: &str,
duo_username: &str,
nonce: &str,
) -> Result<(), Error> {
if duo_code.is_empty() {
err!("Empty Duo authorization code")
}
let token_url = format!("https://{}/oauth/v1/token", self.api_host);
let jwt_payload = self.new_client_assertion(&token_url);
let token = match self.encode_duo_jwt(jwt_payload) {
Ok(token) => token,
Err(e) => return Err(e),
};
let mut post_body = HashMap::new();
post_body.insert("grant_type", String::from("authorization_code"));
post_body.insert("code", String::from(duo_code));
// Must be the same URL that was supplied in the authorization request for the supplied duo_code
post_body.insert("redirect_uri", self.redirect_uri.clone());
post_body
.insert("client_assertion_type", String::from("urn:ietf:params:oauth:client-assertion-type:jwt-bearer"));
post_body.insert("client_assertion", token);
let res = match make_http_request(reqwest::Method::POST, &token_url)?
.header(header::USER_AGENT, "vaultwarden:Duo/2.0 (Rust)")
.form(&post_body)
.send()
.await
{
Ok(r) => r,
Err(e) => err!(format!("Error exchanging Duo code: {e:?}")),
};
let status_code = res.status();
if status_code != StatusCode::OK {
err!(format!("Failure response from Duo: {}", status_code))
}
let response: IdTokenResponse = match res.json::<IdTokenResponse>().await {
Ok(r) => r,
Err(e) => err!(format!("Error decoding ID token response: {e:?}")),
};
let mut validation = Validation::new(DUO_RESP_SIGNATURE_ALG);
validation.set_required_spec_claims(&["exp", "aud", "iss"]);
validation.set_audience(&[&self.client_id]);
validation.set_issuer(&[token_url.as_str()]);
let token_data = match jsonwebtoken::decode::<IdTokenClaims>(
&response.id_token,
&DecodingKey::from_secret(self.client_secret.as_bytes()),
&validation,
) {
Ok(c) => c,
Err(e) => err!(format!("Failed to decode Duo token {e:?}")),
};
let matching_nonces = crypto::ct_eq(nonce, &token_data.claims.nonce);
let matching_usernames = crypto::ct_eq(duo_username, &token_data.claims.preferred_username);
if !(matching_nonces && matching_usernames) {
err!("Error validating Duo authorization, nonce or username mismatch.")
};
Ok(())
}
}
struct DuoAuthContext {
pub state: String,
pub user_email: String,
pub nonce: String,
pub exp: i64,
}
// Given a state string, retrieve the associated Duo auth context and
// delete the retrieved state from the database.
async fn extract_context(state: &str, conn: &mut DbConn) -> Option<DuoAuthContext> {
let ctx: TwoFactorDuoContext = match TwoFactorDuoContext::find_by_state(state, conn).await {
Some(c) => c,
None => return None,
};
if ctx.exp < Utc::now().timestamp() {
ctx.delete(conn).await.ok();
return None;
}
// Copy the context data, so that we can delete the context from
// the database before returning.
let ret_ctx = DuoAuthContext {
state: ctx.state.clone(),
user_email: ctx.user_email.clone(),
nonce: ctx.nonce.clone(),
exp: ctx.exp,
};
ctx.delete(conn).await.ok();
Some(ret_ctx)
}
// Task to clean up expired Duo authentication contexts that may have accumulated in the database.
pub async fn purge_duo_contexts(pool: DbPool) {
debug!("Purging Duo authentication contexts");
if let Ok(mut conn) = pool.get().await {
TwoFactorDuoContext::purge_expired_duo_contexts(&mut conn).await;
} else {
error!("Failed to get DB connection while purging expired Duo authentications")
}
}
// Construct the url that Duo should redirect users to.
fn make_callback_url(client_name: &str) -> Result<String, Error> {
// Get the location of this application as defined in the config.
let base = match Url::parse(&format!("{}/", CONFIG.domain())) {
Ok(url) => url,
Err(e) => err!(format!("Error parsing configured domain URL (check your domain configuration): {e:?}")),
};
// Add the client redirect bridge location
let mut callback = match base.join(DUO_REDIRECT_LOCATION) {
Ok(url) => url,
Err(e) => err!(format!("Error constructing Duo redirect URL (check your domain configuration): {e:?}")),
};
// Add the 'client' string with the authenticating device type. The callback connector uses this
// information to figure out how it should handle certain clients.
{
let mut query_params = callback.query_pairs_mut();
query_params.append_pair("client", client_name);
}
Ok(callback.to_string())
}
// Pre-redirect first stage of the Duo OIDC authentication flow.
// Returns the "AuthUrl" that should be returned to clients for MFA.
pub async fn get_duo_auth_url(
email: &str,
client_id: &str,
device_identifier: &String,
conn: &mut DbConn,
) -> Result<String, Error> {
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
let callback_url = match make_callback_url(client_id) {
Ok(url) => url,
Err(e) => return Err(e),
};
let client = DuoClient::new(ik, sk, host, callback_url);
match client.health_check().await {
Ok(()) => {}
Err(e) => return Err(e),
};
// Generate random OAuth2 state and OIDC Nonce
let state: String = crypto::get_random_string_alphanum(STATE_LENGTH);
let nonce: String = crypto::get_random_string_alphanum(STATE_LENGTH);
// Bind the nonce to the device that's currently authing by hashing the nonce and device id
// and sending the result as the OIDC nonce.
let d: Digest = digest(&SHA512_256, format!("{nonce}{device_identifier}").as_bytes());
let hash: String = HEXLOWER.encode(d.as_ref());
match TwoFactorDuoContext::save(state.as_str(), email, nonce.as_str(), CTX_VALIDITY_SECS, conn).await {
Ok(()) => client.make_authz_req_url(email, state, hash),
Err(e) => err!(format!("Error saving Duo authentication context: {e:?}")),
}
}
// Post-redirect second stage of the Duo OIDC authentication flow.
// Exchanges an authorization code for the MFA result with Duo's API and validates the result.
pub async fn validate_duo_login(
email: &str,
two_factor_token: &str,
client_id: &str,
device_identifier: &str,
conn: &mut DbConn,
) -> EmptyResult {
// Result supplied to us by clients in the form "<authz code>|<state>"
let split: Vec<&str> = two_factor_token.split('|').collect();
if split.len() != 2 {
err!(
"Invalid response length",
ErrorEvent {
event: EventType::UserFailedLogIn2fa
}
);
}
let code = split[0];
let state = split[1];
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
// Get the context by the state reported by the client. If we don't have one,
// it means the context is either missing or expired.
let ctx = match extract_context(state, conn).await {
Some(c) => c,
None => {
err!(
"Error validating duo authentication",
ErrorEvent {
event: EventType::UserFailedLogIn2fa
}
)
}
};
// Context validation steps
let matching_usernames = crypto::ct_eq(email, &ctx.user_email);
// Probably redundant, but we're double-checking them anyway.
let matching_states = crypto::ct_eq(state, &ctx.state);
let unexpired_context = ctx.exp > Utc::now().timestamp();
if !(matching_usernames && matching_states && unexpired_context) {
err!(
"Error validating duo authentication",
ErrorEvent {
event: EventType::UserFailedLogIn2fa
}
)
}
let callback_url = match make_callback_url(client_id) {
Ok(url) => url,
Err(e) => return Err(e),
};
let client = DuoClient::new(ik, sk, host, callback_url);
match client.health_check().await {
Ok(()) => {}
Err(e) => return Err(e),
};
let d: Digest = digest(&SHA512_256, format!("{}{}", ctx.nonce, device_identifier).as_bytes());
let hash: String = HEXLOWER.encode(d.as_ref());
match client.exchange_authz_code_for_result(code, email, hash.as_str()).await {
Ok(_) => Ok(()),
Err(_) => {
err!(
"Error validating duo authentication",
ErrorEvent {
event: EventType::UserFailedLogIn2fa
}
)
}
}
}

View File

@ -1,16 +1,16 @@
use chrono::{DateTime, TimeDelta, Utc};
use chrono::{Duration, NaiveDateTime, Utc};
use rocket::serde::json::Json;
use rocket::Route;
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, PasswordOrOtpData,
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
},
auth::Headers,
crypto,
db::{
models::{EventType, TwoFactor, TwoFactorType, User},
models::{EventType, TwoFactor, TwoFactorType},
DbConn,
},
error::{Error, MapResult},
@ -22,31 +22,28 @@ pub fn routes() -> Vec<Route> {
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct SendEmailLoginData {
// DeviceIdentifier: String, // Currently not used
#[serde(alias = "Email")]
email: String,
#[serde(alias = "MasterPasswordHash")]
master_password_hash: String,
Email: String,
MasterPasswordHash: String,
}
/// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner();
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner().data;
use crate::db::models::User;
// Get the user
let user = match User::find_by_mail(&data.email, &mut conn).await {
let user = match User::find_by_mail(&data.Email, &mut conn).await {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.master_password_hash) {
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
@ -79,8 +76,8 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
/// When user clicks on Manage email 2FA show the user the related information
#[post("/two-factor/get-email", data = "<data>")]
async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -95,30 +92,30 @@ async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: Db
};
Ok(Json(json!({
"email": mfa_email,
"enabled": enabled,
"object": "twoFactorEmail"
"Email": mfa_email,
"Enabled": enabled,
"Object": "twoFactorEmail"
})))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct SendEmailData {
/// Email where 2FA codes will be sent to, can be different than user email account.
email: String,
master_password_hash: Option<String>,
otp: Option<String>,
Email: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
#[post("/two-factor/send-email", data = "<data>")]
async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner();
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner().data;
let user = headers.user;
PasswordOrOtpData {
master_password_hash: data.master_password_hash,
otp: data.otp,
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, false, &mut conn)
.await?;
@ -134,7 +131,7 @@ async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbCon
}
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
let twofactor_data = EmailTokenData::new(data.email, generated_token);
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
@ -146,24 +143,24 @@ async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbCon
}
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct EmailData {
email: String,
token: String,
master_password_hash: Option<String>,
otp: Option<String>,
Email: String,
Token: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
/// Verify email belongs to user and can be used for 2FA email codes.
#[put("/two-factor/email", data = "<data>")]
async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner();
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner().data;
let mut user = headers.user;
// This is the last step in the verification process, delete the otp directly afterwards
PasswordOrOtpData {
master_password_hash: data.master_password_hash,
otp: data.otp,
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -179,7 +176,7 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, data.token) {
if !crypto::ct_eq(issued_token, data.Token) {
err!("Token is invalid")
}
@ -193,9 +190,9 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"email": email_data.email,
"enabled": "true",
"object": "twoFactorEmail"
"Email": email_data.email,
"Enabled": "true",
"Object": "twoFactorEmail"
})))
}
@ -235,9 +232,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
twofactor.data = email_data.to_json();
twofactor.save(conn).await?;
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
let max_time = CONFIG.email_expiration_time() as i64;
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
err!(
"Token has expired",
ErrorEvent {
@ -268,14 +265,14 @@ impl EmailTokenData {
EmailTokenData {
email,
last_token: Some(token),
token_sent: Utc::now().timestamp(),
token_sent: Utc::now().naive_utc().timestamp(),
attempts: 0,
}
}
pub fn set_token(&mut self, token: String) {
self.last_token = Some(token);
self.token_sent = Utc::now().timestamp();
self.token_sent = Utc::now().naive_utc().timestamp();
}
pub fn reset_token(&mut self) {
@ -292,7 +289,7 @@ impl EmailTokenData {
}
pub fn from_json(string: &str) -> Result<EmailTokenData, Error> {
let res: Result<EmailTokenData, serde_json::Error> = serde_json::from_str(string);
let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string);
match res {
Ok(x) => Ok(x),
Err(_) => err!("Could not decode EmailTokenData from string"),
@ -300,15 +297,6 @@ impl EmailTokenData {
}
}
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
if user.verified_at.is_none() {
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
}
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
twofactor.save(conn).await
}
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
pub fn obscure_email(email: &str) -> String {
let split: Vec<&str> = email.rsplitn(2, '@').collect();
@ -330,14 +318,6 @@ pub fn obscure_email(email: &str) -> String {
format!("{}@{}", new_name, &domain)
}
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
activate_email_2fa(&user, conn).await
} else {
err!("User not found!");
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,25 +1,19 @@
use chrono::{TimeDelta, Utc};
use chrono::{Duration, Utc};
use data_encoding::BASE32;
use rocket::serde::json::Json;
use rocket::Route;
use serde_json::Value;
use crate::{
api::{
core::{log_event, log_user_event},
EmptyResult, JsonResult, PasswordOrOtpData,
},
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData},
auth::{ClientHeaders, Headers},
crypto,
db::{models::*, DbConn, DbPool},
mail,
util::NumberOrString,
CONFIG,
mail, CONFIG,
};
pub mod authenticator;
pub mod duo;
pub mod duo_oidc;
pub mod email;
pub mod protected_actions;
pub mod webauthn;
@ -51,58 +45,57 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Json(json!({
"data": twofactors_json,
"object": "list",
"continuationToken": null,
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
}))
}
#[post("/two-factor/get-recover", data = "<data>")]
async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, true, &mut conn).await?;
Ok(Json(json!({
"code": user.totp_recover,
"object": "twoFactorRecover"
"Code": user.totp_recover,
"Object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct RecoverTwoFactor {
master_password_hash: String,
email: String,
recovery_code: String,
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
}
#[post("/two-factor/recover", data = "<data>")]
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner();
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.email, &mut conn).await {
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.master_password_hash) {
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.recovery_code) {
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
// Remove all twofactors from the user
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?;
log_user_event(
EventType::UserRecovered2fa as i32,
@ -128,27 +121,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct DisableTwoFactorData {
master_password_hash: Option<String>,
otp: Option<String>,
r#type: NumberOrString,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
Type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner();
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let user = headers.user;
// Delete directly after a valid token has been provided
PasswordOrOtpData {
master_password_hash: data.master_password_hash,
otp: data.otp,
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, true, &mut conn)
.await?;
let type_ = data.r#type.into_i32()?;
let type_ = data.Type.into_i32()?;
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
twofactor.delete(&mut conn).await?;
@ -156,94 +149,36 @@ async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, m
.await;
}
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
if twofactor_disabled {
for user_org in
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
.await
.into_iter()
{
if user_org.atype < UserOrgType::Admin {
if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
}
user_org.delete(&mut conn).await?;
}
}
}
Ok(Json(json!({
"enabled": false,
"type": type_,
"object": "twoFactorProvider"
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn).await
}
pub async fn enforce_2fa_policy(
user: &User,
act_uuid: &str,
device_type: i32,
ip: &std::net::IpAddr,
conn: &mut DbConn,
) -> EmptyResult {
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
.await
.into_iter()
{
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
if member.atype < UserOrgType::Admin {
if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
}
let mut member = member;
member.revoke();
member.save(conn).await?;
log_event(
EventType::OrganizationUserRevoked as i32,
&member.uuid,
&member.org_uuid,
act_uuid,
device_type,
ip,
conn,
)
.await;
}
}
Ok(())
}
pub async fn enforce_2fa_policy_for_org(
org_uuid: &str,
act_uuid: &str,
device_type: i32,
ip: &std::net::IpAddr,
conn: &mut DbConn,
) -> EmptyResult {
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
// Don't enforce the policy for Admins and Owners.
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
if CONFIG.mail_enabled() {
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
}
let mut member = member;
member.revoke();
member.save(conn).await?;
log_event(
EventType::OrganizationUserRevoked as i32,
&member.uuid,
org_uuid,
act_uuid,
device_type,
ip,
conn,
)
.await;
}
}
Ok(())
}
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
debug!("Sending notifications for incomplete 2FA logins");
@ -260,7 +195,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
};
let now = Utc::now().naive_utc();
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
let time_before = now - time_limit;
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
for login in incomplete_logins {
@ -269,24 +204,10 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
user.email, login.ip_address
);
match mail::send_incomplete_2fa_login(
&user.email,
&login.ip_address,
&login.login_time,
&login.device_name,
&DeviceType::from_i32(login.device_type).to_string(),
)
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
.await
{
Ok(_) => {
if let Err(e) = login.delete(&mut conn).await {
error!("Error deleting incomplete 2FA record: {e:#?}");
}
}
Err(e) => {
error!("Error sending incomplete 2FA email: {e:#?}");
}
}
.expect("Error sending incomplete 2FA email");
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
}
}

View File

@ -1,8 +1,8 @@
use chrono::{DateTime, TimeDelta, Utc};
use rocket::{serde::json::Json, Route};
use chrono::{Duration, NaiveDateTime, Utc};
use rocket::Route;
use crate::{
api::EmptyResult,
api::{EmptyResult, JsonUpcase},
auth::Headers,
crypto,
db::{
@ -18,7 +18,7 @@ pub fn routes() -> Vec<Route> {
}
/// Data stored in the TwoFactor table in the db
#[derive(Debug, Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Debug)]
pub struct ProtectedActionData {
/// Token issued to validate the protected action
pub token: String,
@ -32,7 +32,7 @@ impl ProtectedActionData {
pub fn new(token: String) -> Self {
Self {
token,
token_sent: Utc::now().timestamp(),
token_sent: Utc::now().naive_utc().timestamp(),
attempts: 0,
}
}
@ -42,7 +42,7 @@ impl ProtectedActionData {
}
pub fn from_json(string: &str) -> Result<Self, Error> {
let res: Result<Self, serde_json::Error> = serde_json::from_str(string);
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
match res {
Ok(x) => Ok(x),
Err(_) => err!("Could not decode ProtectedActionData from string"),
@ -82,24 +82,23 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
}
#[derive(Deserialize, Serialize, Debug)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct ProtectedActionVerify {
#[serde(rename = "OTP", alias = "otp")]
otp: String,
OTP: String,
}
#[post("/accounts/verify-otp", data = "<data>")]
async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.mail_enabled() {
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
}
let user = headers.user;
let data: ProtectedActionVerify = data.into_inner();
let data: ProtectedActionVerify = data.into_inner().data;
// Delete the token after one validation attempt
// This endpoint only gets called for the vault export, and doesn't need a second attempt
validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
}
pub async fn validate_protected_action_otp(
@ -123,9 +122,9 @@ pub async fn validate_protected_action_otp(
// Check if the token has expired (Using the email 2fa expiration time)
let date =
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.");
let max_time = CONFIG.email_expiration_time() as i64;
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
pa.delete(conn).await?;
err!("Token has expired")
}

View File

@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, PasswordOrOtpData,
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData,
},
auth::Headers,
db::{
@ -15,7 +15,6 @@ use crate::{
DbConn,
},
error::Error,
util::NumberOrString,
CONFIG,
};
@ -96,20 +95,20 @@ pub struct WebauthnRegistration {
impl WebauthnRegistration {
fn to_json(&self) -> Value {
json!({
"id": self.id,
"name": self.name,
"Id": self.id,
"Name": self.name,
"migrated": self.migrated,
})
}
}
#[post("/two-factor/get-webauthn", data = "<data>")]
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
}
let data: PasswordOrOtpData = data.into_inner();
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -118,15 +117,19 @@ async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn:
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"enabled": enabled,
"keys": registrations_json,
"object": "twoFactorWebAuthn"
"Enabled": enabled,
"Keys": registrations_json,
"Object": "twoFactorWebAuthn"
})))
}
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
async fn generate_webauthn_challenge(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -157,94 +160,102 @@ async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Hea
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct EnableWebauthnData {
id: NumberOrString, // 1..5
name: String,
device_response: RegisterPublicKeyCredentialCopy,
master_password_hash: Option<String>,
otp: Option<String>,
Id: NumberOrString, // 1..5
Name: String,
DeviceResponse: RegisterPublicKeyCredentialCopy,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct RegisterPublicKeyCredentialCopy {
pub id: String,
pub raw_id: Base64UrlSafeData,
pub response: AuthenticatorAttestationResponseRawCopy,
pub r#type: String,
pub Id: String,
pub RawId: Base64UrlSafeData,
pub Response: AuthenticatorAttestationResponseRawCopy,
pub Type: String,
}
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct AuthenticatorAttestationResponseRawCopy {
#[serde(rename = "AttestationObject", alias = "attestationObject")]
pub attestation_object: Base64UrlSafeData,
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub client_data_json: Base64UrlSafeData,
pub AttestationObject: Base64UrlSafeData,
pub ClientDataJson: Base64UrlSafeData,
}
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
Self {
id: r.id,
raw_id: r.raw_id,
id: r.Id,
raw_id: r.RawId,
response: AuthenticatorAttestationResponseRaw {
attestation_object: r.response.attestation_object,
client_data_json: r.response.client_data_json,
attestation_object: r.Response.AttestationObject,
client_data_json: r.Response.ClientDataJson,
},
type_: r.r#type,
type_: r.Type,
}
}
}
// This is copied from PublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct PublicKeyCredentialCopy {
pub id: String,
pub raw_id: Base64UrlSafeData,
pub response: AuthenticatorAssertionResponseRawCopy,
pub extensions: Option<AuthenticationExtensionsClientOutputs>,
pub r#type: String,
pub Id: String,
pub RawId: Base64UrlSafeData,
pub Response: AuthenticatorAssertionResponseRawCopy,
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
pub Type: String,
}
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct AuthenticatorAssertionResponseRawCopy {
pub authenticator_data: Base64UrlSafeData,
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub client_data_json: Base64UrlSafeData,
pub signature: Base64UrlSafeData,
pub user_handle: Option<Base64UrlSafeData>,
pub AuthenticatorData: Base64UrlSafeData,
pub ClientDataJson: Base64UrlSafeData,
pub Signature: Base64UrlSafeData,
pub UserHandle: Option<Base64UrlSafeData>,
}
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
pub struct AuthenticationExtensionsClientOutputsCopy {
#[serde(default)]
pub Appid: bool,
}
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
fn from(r: PublicKeyCredentialCopy) -> Self {
Self {
id: r.id,
raw_id: r.raw_id,
id: r.Id,
raw_id: r.RawId,
response: AuthenticatorAssertionResponseRaw {
authenticator_data: r.response.authenticator_data,
client_data_json: r.response.client_data_json,
signature: r.response.signature,
user_handle: r.response.user_handle,
authenticator_data: r.Response.AuthenticatorData,
client_data_json: r.Response.ClientDataJson,
signature: r.Response.Signature,
user_handle: r.Response.UserHandle,
},
extensions: r.extensions,
type_: r.r#type,
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
appid: e.Appid,
}),
type_: r.Type,
}
}
}
#[post("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableWebauthnData = data.into_inner();
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableWebauthnData = data.into_inner().data;
let mut user = headers.user;
PasswordOrOtpData {
master_password_hash: data.master_password_hash,
otp: data.otp,
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -262,13 +273,13 @@ async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut
// Verify the credentials with the saved state
let (credential, _data) =
WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
// TODO: Check for repeated ID's
registrations.push(WebauthnRegistration {
id: data.id.into_i32()?,
name: data.name,
id: data.Id.into_i32()?,
name: data.Name,
migrated: false,
credential,
@ -284,28 +295,28 @@ async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"enabled": true,
"keys": keys_json,
"object": "twoFactorU2f"
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
#[put("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_webauthn(data, headers, conn).await
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct DeleteU2FData {
id: NumberOrString,
master_password_hash: String,
Id: NumberOrString,
MasterPasswordHash: String,
}
#[delete("/two-factor/webauthn", data = "<data>")]
async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let id = data.id.into_i32()?;
if !headers.user.check_valid_password(&data.master_password_hash) {
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let id = data.data.Id.into_i32()?;
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
err!("Invalid password");
}
@ -346,9 +357,9 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn:
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"enabled": true,
"keys": keys_json,
"object": "twoFactorU2f"
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
})))
}
@ -401,8 +412,8 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
),
};
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.into();
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.data.into();
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;

View File

@ -1,12 +1,12 @@
use rocket::serde::json::Json;
use rocket::Route;
use serde_json::Value;
use yubico::{config::Config, verify_async};
use yubico::{config::Config, verify};
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, PasswordOrOtpData,
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
},
auth::Headers,
db::{
@ -21,35 +21,33 @@ pub fn routes() -> Vec<Route> {
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EnableYubikeyData {
key1: Option<String>,
key2: Option<String>,
key3: Option<String>,
key4: Option<String>,
key5: Option<String>,
nfc: bool,
master_password_hash: Option<String>,
otp: Option<String>,
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
#[derive(Deserialize, Serialize, Debug)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct YubikeyMetadata {
#[serde(rename = "keys", alias = "Keys")]
keys: Vec<String>,
#[serde(rename = "nfc", alias = "Nfc")]
pub nfc: bool,
Keys: Vec<String>,
pub Nfc: bool,
}
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
fn jsonify_yubikeys(yubikeys: Vec<String>) -> Value {
fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value {
let mut result = Value::Object(serde_json::Map::new());
for (i, key) in yubikeys.into_iter().enumerate() {
@ -76,18 +74,21 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
None => verify_async(otp, config).await,
Some(server) => {
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
}
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordOrOtpData = data.into_inner();
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -100,29 +101,29 @@ async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut c
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["enabled"] = Value::Bool(true);
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["object"] = Value::String("twoFactorU2f".to_owned());
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"enabled": false,
"object": "twoFactorU2f",
"Enabled": false,
"Object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner();
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
let mut user = headers.user;
PasswordOrOtpData {
master_password_hash: data.master_password_hash.clone(),
otp: data.otp.clone(),
MasterPasswordHash: data.MasterPasswordHash.clone(),
Otp: data.Otp.clone(),
}
.validate(&user, true, &mut conn)
.await?;
@ -138,8 +139,8 @@ async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut c
if yubikeys.is_empty() {
return Ok(Json(json!({
"enabled": false,
"object": "twoFactorU2f",
"Enabled": false,
"Object": "twoFactorU2f",
})));
}
@ -156,8 +157,8 @@ async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut c
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
keys: yubikey_ids,
nfc: data.nfc,
Keys: yubikey_ids,
Nfc: data.Nfc,
};
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
@ -167,17 +168,17 @@ async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut c
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
result["enabled"] = Value::Bool(true);
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["object"] = Value::String("twoFactorU2f".to_owned());
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn).await
}
@ -189,10 +190,14 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
Ok(())
let result = verify_yubikey_otp(response.to_owned()).await;
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
}

View File

@ -1,5 +1,4 @@
use std::{
collections::HashMap,
net::IpAddr,
sync::Arc,
time::{Duration, SystemTime},
@ -17,14 +16,14 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
};
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{
error::Error,
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached,
util::{get_reqwest_client_builder, Cached},
CONFIG,
};
@ -50,32 +49,48 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests
get_reqwest_client_builder()
let client = get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.default_headers(default_headers.clone())
.trust_dns(true)
.default_headers(default_headers.clone());
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder()
.cookie_provider(cookie_store)
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.trust_dns(false)
.default_headers(default_headers)
.build()
.expect("Failed to build client")
}
}
});
// Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Option<Redirect> {
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain);
return None;
}
if should_block_address(domain) {
warn!("Blocked address: {}", domain);
if check_domain_blacklist_reason(domain).await.is_some() {
return None;
}
let url = CONFIG._icon_service_url().replace("{}", domain);
let url = template.replace("{}", domain);
match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -88,6 +103,11 @@ fn icon_external(domain: &str) -> Option<Redirect> {
}
}
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -101,15 +121,6 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
);
}
if should_block_address(domain) {
warn!("Blocked address: {}", domain);
return Cached::ttl(
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
}
match get_icon(domain).await {
Some((icon, icon_type)) => {
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
@ -155,6 +166,155 @@ fn is_valid_domain(domain: &str) -> bool {
true
}
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
is_match
};
if is_match {
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
return Some(DomainBlacklistReason::Regex);
}
}
if CONFIG.icon_blacklist_non_global_ips() {
if let Ok(s) = lookup_host((domain, 0)).await {
for addr in s {
if !is_global(addr.ip()) {
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
@ -182,13 +342,6 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
// If this error comes from the custom resolver, this means this is a blocked domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomHttpClientError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await;
@ -338,12 +491,12 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).await {
Err(e) if CustomHttpClientError::downcast_ref(&e).is_none() => {
// If we get an error that is not caused by the blacklist, we retry with HTTP
match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// First check the domain as given during the request for both HTTPS and HTTP.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
Ok(c) => Ok(c),
Err(e) => {
let mut sub_resp = Err(e);
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
@ -374,12 +527,6 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
}
sub_resp
}
res => res,
}
}
// If we get a result or a blacklist error, just continue
res => res,
};
// Create the iconlist
@ -426,12 +573,21 @@ async fn get_page(url: &str) -> Result<Response, Error> {
}
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url);
if !referer.is_empty() {
client = client.header("Referer", referer)
}
Ok(client.send().await?.error_for_status()?)
match client.send().await {
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@ -447,9 +603,6 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Err
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
/// ```
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
static PRIORITY_MAP: Lazy<HashMap<&'static str, u8>> =
Lazy::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect());
// Check if there is a dimension set
let (width, height) = parse_sizes(sizes);
@ -474,9 +627,13 @@ fn get_icon_priority(href: &str, sizes: &str) -> u8 {
200
}
} else {
match href.rsplit_once('.') {
Some((_, extension)) => PRIORITY_MAP.get(&*extension.to_ascii_lowercase()).copied().unwrap_or(30),
None => 30,
// Change priority by file extension
if href.ends_with(".png") {
10
} else if href.ends_with(".jpg") || href.ends_with(".jpeg") {
20
} else {
30
}
}
}
@ -513,6 +670,12 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
}
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new();
@ -548,8 +711,8 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list.
@ -562,6 +725,9 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
info!("Downloaded icon from {}", icon.href);
break;
}
Err(e) => debug!("{:?}", e),
};
}
}
if buffer.is_empty() {
@ -623,7 +789,7 @@ use cookie_store::CookieStore;
pub struct Jar(std::sync::RwLock<CookieStore>);
impl reqwest::cookie::CookieStore for Jar {
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &HeaderValue>, url: &url::Url) {
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError};
use time::Duration;
@ -642,7 +808,7 @@ impl reqwest::cookie::CookieStore for Jar {
cookie_store.store_response_cookies(cookies, url);
}
fn cookies(&self, url: &url::Url) -> Option<HeaderValue> {
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
let cookie_store = self.0.read().unwrap();
let s = cookie_store
.get_request_values(url)
@ -654,7 +820,7 @@ impl reqwest::cookie::CookieStore for Jar {
return None;
}
HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
}
}

View File

@ -9,13 +9,10 @@ use serde_json::Value;
use crate::{
api::{
core::{
accounts::{PreloginData, RegisterData, _prelogin, _register},
log_user_event,
two_factor::{authenticator, duo, duo_oidc, email, enforce_2fa_policy, webauthn, yubikey},
},
push::register_push_device,
ApiResult, EmptyResult, JsonResult,
core::accounts::{PreloginData, RegisterData, _prelogin, _register},
core::log_user_event,
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
ApiResult, EmptyResult, JsonResult, JsonUpcase,
},
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
db::{models::*, DbConn},
@ -106,13 +103,8 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
// Common
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(conn).await?;
let result = json!({
@ -135,18 +127,6 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
Ok(Json(result))
}
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct MasterPasswordPolicy {
min_complexity: u8,
min_length: u32,
require_lower: bool,
require_upper: bool,
require_numbers: bool,
require_special: bool,
enforce_on_login: bool,
}
async fn _password_login(
data: ConnectData,
user_uuid: &mut Option<String>,
@ -262,10 +242,10 @@ async fn _password_login(
let (mut device, new_device) = get_device(&data, conn, &user).await;
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
if CONFIG.mail_enabled() && new_device {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() {
@ -279,51 +259,11 @@ async fn _password_login(
}
}
// register push device
if !new_device {
register_push_device(&mut device, conn).await?;
}
// Common
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(conn).await?;
// Fetch all valid Master Password Policies and merge them into one with all true's and larges numbers as one policy
let master_password_policies: Vec<MasterPasswordPolicy> =
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
&user.uuid,
OrgPolicyType::MasterPassword,
conn,
)
.await
.into_iter()
.filter_map(|p| serde_json::from_str(&p.data).ok())
.collect();
let master_password_policy = if !master_password_policies.is_empty() {
let mut mpp_json = json!(master_password_policies.into_iter().reduce(|acc, policy| {
MasterPasswordPolicy {
min_complexity: acc.min_complexity.max(policy.min_complexity),
min_length: acc.min_length.max(policy.min_length),
require_lower: acc.require_lower || policy.require_lower,
require_upper: acc.require_upper || policy.require_upper,
require_numbers: acc.require_numbers || policy.require_numbers,
require_special: acc.require_special || policy.require_special,
enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
}
}));
mpp_json["object"] = json!("masterPasswordPolicy");
mpp_json
} else {
json!({"object": "masterPasswordPolicy"})
};
let mut result = json!({
"access_token": access_token,
"expires_in": expires_in,
@ -338,9 +278,6 @@ async fn _password_login(
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false,// TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": master_password_policy,
"scope": scope,
"unofficialServer": true,
"UserDecryptionOptions": {
@ -421,7 +358,7 @@ async fn _user_api_key_login(
if CONFIG.mail_enabled() && new_device {
let now = Utc::now().naive_utc();
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device).await {
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() {
@ -437,13 +374,8 @@ async fn _user_api_key_login(
// Common
let scope_vec = vec!["api".into()];
// ---
// Disabled this variable, it was used to generate the JWT
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(conn).await?;
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
@ -521,34 +453,32 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
}
async fn twofactor_auth(
user: &User,
user_uuid: &str,
data: &ConnectData,
device: &mut Device,
ip: &ClientIp,
conn: &mut DbConn,
) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
// No twofactor token if twofactor is disabled
if twofactors.is_empty() {
enforce_2fa_policy(user, &user.uuid, device.atype, &ip.ip, conn).await?;
return Ok(None);
}
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, device.atype, ip, conn).await?;
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
let twofactor_code = match data.two_factor_token {
Some(ref code) => code,
None => {
err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?, "2FA token not provided")
}
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
};
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
use crate::api::core::two_factor as _tf;
use crate::crypto::ct_eq;
let selected_data = _selected_data(selected_twofactor);
@ -556,31 +486,17 @@ async fn twofactor_auth(
match TwoFactorType::from_i32(selected_id) {
Some(TwoFactorType::Authenticator) => {
authenticator::validate_totp_code_str(&user.uuid, twofactor_code, &selected_data?, ip, conn).await?
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
}
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
Some(TwoFactorType::Webauthn) => {
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
}
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
Some(TwoFactorType::Duo) => {
match CONFIG.duo_use_iframe() {
true => {
// Legacy iframe prompt flow
duo::validate_duo_login(&user.email, twofactor_code, conn).await?
}
false => {
// OIDC based flow
duo_oidc::validate_duo_login(
&user.email,
twofactor_code,
data.client_id.as_ref().unwrap(),
data.device_identifier.as_ref().unwrap(),
conn,
)
.await?
}
}
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
}
Some(TwoFactorType::Email) => {
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
}
Some(TwoFactorType::Remember) => {
@ -590,7 +506,7 @@ async fn twofactor_auth(
}
_ => {
err_json!(
_json_err_twofactor(&twofactor_ids, &user.uuid, data, conn).await?,
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
"2FA Remember token not provided"
)
}
@ -604,7 +520,7 @@ async fn twofactor_auth(
),
}
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
if !CONFIG.disable_2fa_remember() && remember == 1 {
Ok(Some(device.refresh_twofactor_remember()))
@ -618,20 +534,14 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
tf.map(|t| t.data).map_res("Two factor doesn't exist")
}
async fn _json_err_twofactor(
providers: &[i32],
user_uuid: &str,
data: &ConnectData,
conn: &mut DbConn,
) -> ApiResult<Value> {
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
use crate::api::core::two_factor;
let mut result = json!({
"error" : "invalid_grant",
"error_description" : "Two factor required.",
"TwoFactorProviders" : providers.iter().map(ToString::to_string).collect::<Vec<String>>(),
"TwoFactorProviders2" : {}, // { "0" : null }
"MasterPasswordPolicy": {
"Object": "masterPasswordPolicy"
}
"TwoFactorProviders" : providers,
"TwoFactorProviders2" : {} // { "0" : null }
});
for provider in providers {
@ -641,7 +551,7 @@ async fn _json_err_twofactor(
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
result["TwoFactorProviders2"][provider.to_string()] = request.0;
}
@ -651,30 +561,12 @@ async fn _json_err_twofactor(
None => err!("User does not exist"),
};
match CONFIG.duo_use_iframe() {
true => {
// Legacy iframe prompt flow
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Host": host,
"Signature": signature,
})
}
false => {
// OIDC based flow
let auth_url = duo_oidc::get_duo_auth_url(
&email,
data.client_id.as_ref().unwrap(),
data.device_identifier.as_ref().unwrap(),
conn,
)
.await?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"AuthUrl": auth_url,
})
}
}
});
}
Some(tf_type @ TwoFactorType::YubiKey) => {
@ -686,11 +578,13 @@ async fn _json_err_twofactor(
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Nfc": yubikey_metadata.nfc,
"Nfc": yubikey_metadata.Nfc,
})
}
Some(tf_type @ TwoFactorType::Email) => {
use crate::api::core::two_factor as _tf;
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
Some(tf) => tf,
None => err!("No twofactor email registered"),
@ -698,10 +592,10 @@ async fn _json_err_twofactor(
// Send email immediately if email is the only 2FA option
if providers.len() == 1 {
email::send_token(user_uuid, conn).await?
_tf::email::send_token(user_uuid, conn).await?
}
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
let email_data = EmailTokenData::from_json(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Email": email::obscure_email(&email_data.email),
})
@ -715,18 +609,19 @@ async fn _json_err_twofactor(
}
#[post("/accounts/prelogin", data = "<data>")]
async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/register", data = "<data>")]
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, conn).await
}
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
#[derive(Debug, Clone, Default, FromForm)]
#[allow(non_snake_case)]
struct ConnectData {
#[field(name = uncased("grant_type"))]
#[field(name = uncased("granttype"))]

View File

@ -23,7 +23,7 @@ pub use crate::api::{
icons::routes as icons_routes,
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
push::{
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
unregister_push_device,
@ -33,18 +33,23 @@ pub use crate::api::{
web::static_files,
};
use crate::db::{models::User, DbConn};
use crate::util;
// Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>;
type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
type JsonVec<T> = Json<Vec<T>>;
// Common structs representing JSON data received
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
struct PasswordOrOtpData {
master_password_hash: Option<String>,
otp: Option<String>,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
}
impl PasswordOrOtpData {
@ -54,7 +59,7 @@ impl PasswordOrOtpData {
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
(Some(pw_hash), None) => {
if !user.check_valid_password(pw_hash) {
err!("Invalid password");
@ -68,3 +73,30 @@ impl PasswordOrOtpData {
Ok(())
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(untagged)]
enum NumberOrString {
Number(i32),
String(String),
}
impl NumberOrString {
fn into_string(self) -> String {
match self {
NumberOrString::Number(n) => n.to_string(),
NumberOrString::String(s) => s,
}
}
#[allow(clippy::wrong_self_convention)]
fn into_i32(&self) -> ApiResult<i32> {
use std::num::ParseIntError as PIE;
match self {
NumberOrString::Number(n) => Ok(*n),
NumberOrString::String(s) => {
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
}
}
}
}

View File

@ -1,11 +1,23 @@
use std::{net::IpAddr, sync::Arc, time::Duration};
use std::{
net::{IpAddr, SocketAddr},
sync::Arc,
time::Duration,
};
use chrono::{NaiveDateTime, Utc};
use rmpv::Value;
use rocket::{futures::StreamExt, Route};
use tokio::sync::mpsc::Sender;
use rocket_ws::{Message, WebSocket};
use rocket::{
futures::{SinkExt, StreamExt},
Route,
};
use tokio::{
net::{TcpListener, TcpStream},
sync::mpsc::Sender,
};
use tokio_tungstenite::{
accept_hdr_async,
tungstenite::{handshake, Message},
};
use crate::{
auth::{ClientIp, WsAccessTokenHeader},
@ -18,7 +30,7 @@ use crate::{
use once_cell::sync::Lazy;
pub static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
Arc::new(WebSocketUsers {
map: Arc::new(dashmap::DashMap::new()),
})
@ -35,15 +47,8 @@ use super::{
push_send_update, push_user_update,
};
static NOTIFICATIONS_DISABLED: Lazy<bool> = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled());
pub fn routes() -> Vec<Route> {
if CONFIG.enable_websocket() {
routes![websockets_hub, anonymous_websockets_hub]
} else {
info!("WebSocket are disabled, realtime sync functionality will not work!");
routes![]
}
}
#[derive(FromForm, Debug)]
@ -103,7 +108,7 @@ impl Drop for WSAnonymousEntryMapGuard {
#[get("/hub?<data..>")]
fn websockets_hub<'r>(
ws: WebSocket,
ws: rocket_ws::WebSocket,
data: WsAccessToken,
ip: ClientIp,
header_token: WsAccessTokenHeader,
@ -159,11 +164,6 @@ fn websockets_hub<'r>(
continue;
}
}
// Prevent sending anything back when a `Close` Message is received.
// Just break the loop
Message::Close(_) => break,
// Just echo anything else the client sends
_ => yield message,
}
@ -187,7 +187,11 @@ fn websockets_hub<'r>(
}
#[get("/anonymous-hub?<token..>")]
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
fn anonymous_websockets_hub<'r>(
ws: rocket_ws::WebSocket,
token: String,
ip: ClientIp,
) -> Result<rocket_ws::Stream!['r], Error> {
let addr = ip.ip;
info!("Accepting Anonymous Rocket WS connection from {addr}");
@ -226,11 +230,6 @@ fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> R
continue;
}
}
// Prevent sending anything back when a `Close` Message is received.
// Just break the loop
Message::Close(_) => break,
// Just echo anything else the client sends
_ => yield message,
}
@ -288,8 +287,8 @@ fn serialize(val: Value) -> Vec<u8> {
}
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.and_utc().timestamp();
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes();
@ -340,19 +339,13 @@ impl WebSocketUsers {
// NOTE: The last modified date needs to be updated before calling these methods
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
ut,
None,
);
if CONFIG.enable_websocket() {
self.send_update(&user.uuid, &data).await;
}
if CONFIG.push_enabled() {
push_user_update(ut, user);
@ -360,19 +353,13 @@ impl WebSocketUsers {
}
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
UpdateType::LogOut,
acting_device_uuid.clone(),
);
if CONFIG.enable_websocket() {
self.send_update(&user.uuid, &data).await;
}
if CONFIG.push_enabled() {
push_logout(user, acting_device_uuid);
@ -386,10 +373,6 @@ impl WebSocketUsers {
acting_device_uuid: &String,
conn: &mut DbConn,
) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update(
vec![
("Id".into(), folder.uuid.clone().into()),
@ -400,9 +383,7 @@ impl WebSocketUsers {
Some(acting_device_uuid.into()),
);
if CONFIG.enable_websocket() {
self.send_update(&folder.user_uuid, &data).await;
}
if CONFIG.push_enabled() {
push_folder_update(ut, folder, acting_device_uuid, conn).await;
@ -418,17 +399,13 @@ impl WebSocketUsers {
collection_uuids: Option<Vec<String>>,
conn: &mut DbConn,
) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let org_uuid = convert_option(cipher.organization_uuid.clone());
// Depending if there are collections provided or not, we need to have different values for the following variables.
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
(
Value::Nil,
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<Value>>()),
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()),
serialize_date(Utc::now().naive_utc()),
)
} else {
@ -447,11 +424,9 @@ impl WebSocketUsers {
Some(acting_device_uuid.into()),
);
if CONFIG.enable_websocket() {
for uuid in user_uuids {
self.send_update(uuid, &data).await;
}
}
if CONFIG.push_enabled() && user_uuids.len() == 1 {
push_cipher_update(ut, cipher, acting_device_uuid, conn).await;
@ -466,10 +441,6 @@ impl WebSocketUsers {
acting_device_uuid: &String,
conn: &mut DbConn,
) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let user_uuid = convert_option(send.user_uuid.clone());
let data = create_update(
@ -482,11 +453,9 @@ impl WebSocketUsers {
None,
);
if CONFIG.enable_websocket() {
for uuid in user_uuids {
self.send_update(uuid, &data).await;
}
}
if CONFIG.push_enabled() && user_uuids.len() == 1 {
push_send_update(ut, send, acting_device_uuid, conn).await;
}
@ -499,18 +468,12 @@ impl WebSocketUsers {
acting_device_uuid: &String,
conn: &mut DbConn,
) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update(
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequest,
Some(acting_device_uuid.to_string()),
);
if CONFIG.enable_websocket() {
self.send_update(user_uuid, &data).await;
}
if CONFIG.push_enabled() {
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
@ -524,18 +487,12 @@ impl WebSocketUsers {
approving_device_uuid: String,
conn: &mut DbConn,
) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequestResponse,
approving_device_uuid.clone().into(),
);
if CONFIG.enable_websocket() {
self.send_update(auth_response_uuid, &data).await;
}
if CONFIG.push_enabled() {
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
@ -559,9 +516,6 @@ impl AnonymousWebSocketSubscriptions {
}
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
if !CONFIG.enable_websocket() {
return;
}
let data = create_anonymous_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequestResponse,
@ -656,3 +610,127 @@ pub enum UpdateType {
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
pub fn start_notification_server() -> Arc<WebSocketUsers> {
let users = Arc::clone(&WS_USERS);
if CONFIG.websocket_enabled() {
let users2 = Arc::<WebSocketUsers>::clone(&users);
tokio::spawn(async move {
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
CONFIG.set_ws_shutdown_handle(shutdown_tx);
loop {
tokio::select! {
Ok((stream, addr)) = listener.accept() => {
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
}
_ = &mut shutdown_rx => {
break;
}
}
}
info!("Shutting down WebSockets server!")
});
}
users
}
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
let mut user_uuid: Option<String> = None;
info!("Accepting WS connection from {addr}");
// Accept connection, do initial handshake, validate auth token and get the user ID
use handshake::server::{Request, Response};
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
if let Some(token) = get_request_token(req) {
if let Ok(claims) = crate::auth::decode_login(&token) {
user_uuid = Some(claims.sub);
return Ok(res);
}
}
Err(Response::builder().status(401).body(None).unwrap())
})
.await?;
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
let (mut rx, guard) = {
// Add a channel to send messages to this client to the map
let entry_uuid = uuid::Uuid::new_v4();
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
};
let _guard = guard;
let mut interval = tokio::time::interval(Duration::from_secs(15));
loop {
tokio::select! {
res = stream.next() => {
match res {
Some(Ok(message)) => {
match message {
// Respond to any pings
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
Message::Pong(_) => {/* Ignored */},
// We should receive an initial message with the protocol and version, and we will reply to it
Message::Text(ref message) => {
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
continue;
}
}
// Just echo anything else the client sends
_ => stream.send(message).await?,
}
}
_ => break,
}
}
res = rx.recv() => {
match res {
Some(res) => stream.send(res).await?,
None => break,
}
}
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
}
}
Ok(())
}
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
const ACCESS_TOKEN_KEY: &str = "access_token=";
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
if let Some(token_part) = auth.strip_prefix("Bearer ") {
return Some(token_part.to_owned());
}
}
if let Some(params) = req.uri().query() {
let params_iter = params.split('&').take(1);
for val in params_iter {
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
return Some(stripped.to_owned());
}
}
}
None
}

View File

@ -1,14 +1,11 @@
use reqwest::{
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
Method,
};
use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
use serde_json::Value;
use tokio::sync::RwLock;
use crate::{
api::{ApiResult, EmptyResult, UpdateType},
db::models::{Cipher, Device, Folder, Send, User},
http_client::make_http_request,
util::get_reqwest_client,
CONFIG,
};
@ -53,10 +50,7 @@ async fn get_auth_push_token() -> ApiResult<String> {
("client_secret", &client_secret),
];
let res = match make_http_request(Method::POST, &format!("{}/connect/token", CONFIG.push_identity_uri()))?
.form(&params)
.send()
.await
let res = match get_reqwest_client().post("https://identity.bitwarden.com/connect/token").form(&params).send().await
{
Ok(r) => r,
Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")),
@ -78,62 +72,45 @@ async fn get_auth_push_token() -> ApiResult<String> {
Ok(push_token.access_token.clone())
}
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
if !CONFIG.push_enabled() || !device.is_push_device() || device.is_registered() {
pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyResult {
if !CONFIG.push_enabled() {
return Ok(());
}
if device.push_token.is_none() {
warn!("Skipping the registration of the device {} because the push_token field is empty.", device.uuid);
warn!("To get rid of this message you need to clear the app data and reconnect the device.");
return Ok(());
}
debug!("Registering Device {}", device.uuid);
// generate a random push_uuid so we know the device is registered
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
let auth_push_token = get_auth_push_token().await?;
//Needed to register a device for push to bitwarden :
let data = json!({
"userId": device.user_uuid,
"userId": user_uuid,
"deviceId": device.push_uuid,
"identifier": device.uuid,
"type": device.atype,
"pushToken": device.push_token
});
let auth_push_token = get_auth_push_token().await?;
let auth_header = format!("Bearer {}", &auth_push_token);
if let Err(e) = make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/register"))?
get_reqwest_client()
.post(CONFIG.push_relay_uri() + "/push/register")
.header(CONTENT_TYPE, "application/json")
.header(ACCEPT, "application/json")
.header(AUTHORIZATION, auth_header)
.json(&data)
.send()
.await?
.error_for_status()
{
err!(format!("An error occurred while proceeding registration of a device: {e}"));
}
if let Err(e) = device.save(conn).await {
err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
}
.error_for_status()?;
Ok(())
}
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
if !CONFIG.push_enabled() || push_uuid.is_none() {
pub async fn unregister_push_device(uuid: String) -> EmptyResult {
if !CONFIG.push_enabled() {
return Ok(());
}
let auth_push_token = get_auth_push_token().await?;
let auth_header = format!("Bearer {}", &auth_push_token);
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))?
match get_reqwest_client()
.delete(CONFIG.push_relay_uri() + "/push/" + &uuid)
.header(AUTHORIZATION, auth_header)
.send()
.await
@ -266,15 +243,8 @@ async fn send_to_push_relay(notification_data: Value) {
let auth_header = format!("Bearer {}", &auth_push_token);
let req = match make_http_request(Method::POST, &(CONFIG.push_relay_uri() + "/push/send")) {
Ok(r) => r,
Err(e) => {
error!("An error occurred while sending a send update to the push relay: {}", e);
return;
}
};
if let Err(e) = req
if let Err(e) = get_reqwest_client()
.post(CONFIG.push_relay_uri() + "/push/send")
.header(ACCEPT, "application/json")
.header(CONTENT_TYPE, "application/json")
.header(AUTHORIZATION, &auth_header)

View File

@ -170,11 +170,11 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
}
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
"jquery-3.7.1.slim.js" => {
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.1.slim.js")))
"jquery-3.7.0.slim.js" => {
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.0.slim.js")))
}
_ => err!(format!("Static file not found: {filename}")),
}

View File

@ -1,24 +1,18 @@
// JWT Handling
//
use chrono::{TimeDelta, Utc};
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use chrono::{Duration, Utc};
use num_traits::FromPrimitive;
use once_cell::sync::{Lazy, OnceCell};
use openssl::rsa::Rsa;
use once_cell::sync::Lazy;
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
use std::{
env,
fs::File,
io::{Read, Write},
net::IpAddr,
};
use crate::{error::Error, CONFIG};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
@ -32,55 +26,23 @@ static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.do
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
let key =
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
});
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
});
pub fn initialize_keys() -> Result<(), Error> {
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
let mut priv_key_buffer = Vec::with_capacity(2048);
let mut priv_key_file = File::options()
.create(create_if_missing)
.truncate(false)
.read(true)
.write(create_if_missing)
.open(CONFIG.private_rsa_key())?;
#[allow(clippy::verbose_file_reads)]
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
let rsa_key = if bytes_read > 0 {
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
} else if create_if_missing {
// Only create the key if the file doesn't exist or is empty
let rsa_key = Rsa::generate(2048)?;
priv_key_buffer = rsa_key.private_key_to_pem()?;
priv_key_file.write_all(&priv_key_buffer)?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
rsa_key
} else {
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
};
Ok((rsa_key, priv_key_buffer))
}
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
let pub_key_buffer = priv_key.public_key_to_pem()?;
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
let dec: DecodingKey = DecodingKey::from_rsa_pem(&pub_key_buffer)?;
if PRIVATE_RSA_KEY.set(enc).is_err() {
err!("PRIVATE_RSA_KEY must only be initialized once")
}
if PUBLIC_RSA_KEY.set(dec).is_err() {
err!("PUBLIC_RSA_KEY must only be initialized once")
}
Ok(())
pub fn load_keys() {
Lazy::force(&PRIVATE_RSA_KEY);
Lazy::force(&PUBLIC_RSA_KEY);
}
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jsonwebtoken::encode(&JWT_HEADER, claims, PRIVATE_RSA_KEY.wait()) {
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
Ok(token) => token,
Err(e) => panic!("Error encoding jwt {e}"),
}
@ -94,7 +56,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
validation.set_issuer(&[issuer]);
let token = token.replace(char::is_whitespace, "");
match jsonwebtoken::decode(&token, PUBLIC_RSA_KEY.wait(), &validation) {
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
Ok(d) => Ok(d.claims),
Err(err) => match *err.kind() {
ErrorKind::InvalidToken => err!("Token is invalid"),
@ -157,16 +119,10 @@ pub struct LoginJwtClaims {
pub email: String,
pub email_verified: bool,
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// pub orgowner: Vec<String>,
// pub orgadmin: Vec<String>,
// pub orguser: Vec<String>,
// pub orgmanager: Vec<String>,
pub orgowner: Vec<String>,
pub orgadmin: Vec<String>,
pub orguser: Vec<String>,
pub orgmanager: Vec<String>,
// user security_stamp
pub sstamp: String,
@ -202,11 +158,11 @@ pub fn generate_invite_claims(
user_org_id: Option<String>,
invited_by_email: Option<String>,
) -> InviteJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
InviteJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
iss: JWT_INVITE_ISSUER.to_string(),
sub: uuid,
email,
@ -240,11 +196,11 @@ pub fn generate_emergency_access_invite_claims(
grantor_name: String,
grantor_email: String,
) -> EmergencyAccessInviteJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
EmergencyAccessInviteJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
sub: uuid,
email,
@ -271,10 +227,10 @@ pub struct OrgApiKeyLoginJwtClaims {
}
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
OrgApiKeyLoginJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
exp: (time_now + Duration::hours(1)).timestamp(),
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
sub: uuid,
client_id: format!("organization.{org_id}"),
@ -298,10 +254,10 @@ pub struct FileDownloadClaims {
}
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
FileDownloadClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
exp: (time_now + Duration::minutes(5)).timestamp(),
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
sub: uuid,
file_id,
@ -321,42 +277,42 @@ pub struct BasicJwtClaims {
}
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
BasicJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
iss: JWT_DELETE_ISSUER.to_string(),
sub: uuid,
}
}
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
BasicJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
sub: uuid,
}
}
pub fn generate_admin_claims() -> BasicJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
BasicJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
iss: JWT_ADMIN_ISSUER.to_string(),
sub: "admin_panel".to_string(),
}
}
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
let time_now = Utc::now();
let time_now = Utc::now().naive_utc();
BasicJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
exp: (time_now + Duration::minutes(2)).timestamp(),
iss: JWT_SEND_ISSUER.to_string(),
sub: format!("{send_id}/{file_id}"),
}
@ -393,6 +349,8 @@ impl<'r> FromRequest<'r> for Host {
referer.to_string()
} else {
// Try to guess from the headers
use std::env;
let protocol = if let Some(proto) = headers.get_one("X-Forwarded-Proto") {
proto
} else if env::var("ROCKET_TLS").is_ok() {
@ -403,8 +361,10 @@ impl<'r> FromRequest<'r> for Host {
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host
} else if let Some(host) = headers.get_one("Host") {
host
} else {
headers.get_one("Host").unwrap_or_default()
""
};
format!("{protocol}://{host}")
@ -417,6 +377,7 @@ impl<'r> FromRequest<'r> for Host {
}
pub struct ClientHeaders {
pub host: String,
pub device_type: i32,
pub ip: ClientIp,
}
@ -426,6 +387,7 @@ impl<'r> FromRequest<'r> for ClientHeaders {
type Error = &'static str;
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let host = try_outcome!(Host::from_request(request).await).host;
let ip = match ClientIp::from_request(request).await {
Outcome::Success(ip) => ip,
_ => err_handler!("Error getting Client IP"),
@ -435,6 +397,7 @@ impl<'r> FromRequest<'r> for ClientHeaders {
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
Outcome::Success(ClientHeaders {
host,
device_type,
ip,
})
@ -506,7 +469,7 @@ impl<'r> FromRequest<'r> for Headers {
// Check if the stamp exception has expired first.
// Then, check if the current route matches any of the allowed routes.
// After that check the stamp in exception matches the one in the claims.
if Utc::now().timestamp() > stamp_exception.expire {
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
// If the stamp exception has been expired remove it from the database.
// This prevents checking this stamp exception for new requests.
let mut user = user;
@ -540,6 +503,7 @@ pub struct OrgHeaders {
pub user: User,
pub org_user_type: UserOrgType,
pub org_user: UserOrganization,
pub org_id: String,
pub ip: ClientIp,
}
@ -602,6 +566,7 @@ impl<'r> FromRequest<'r> for OrgHeaders {
}
},
org_user,
org_id: String::from(org_id),
ip: headers.ip,
})
}
@ -678,6 +643,7 @@ pub struct ManagerHeaders {
pub host: String,
pub device: Device,
pub user: User,
pub org_user_type: UserOrgType,
pub ip: ClientIp,
}
@ -695,7 +661,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"),
};
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@ -706,6 +672,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
host: headers.host,
device: headers.device,
user: headers.user,
org_user_type: headers.org_user_type,
ip: headers.ip,
})
} else {
@ -732,6 +699,7 @@ pub struct ManagerHeadersLoose {
pub device: Device,
pub user: User,
pub org_user: UserOrganization,
pub org_user_type: UserOrgType,
pub ip: ClientIp,
}
@ -747,6 +715,7 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
device: headers.device,
user: headers.user,
org_user: headers.org_user,
org_user_type: headers.org_user_type,
ip: headers.ip,
})
} else {
@ -765,6 +734,10 @@ impl From<ManagerHeadersLoose> for Headers {
}
}
}
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders {
pub async fn from_loose(
@ -776,7 +749,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!");
}
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
if !can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!");
}
}
@ -785,12 +758,14 @@ impl ManagerHeaders {
host: h.host,
device: h.device,
user: h.user,
org_user_type: h.org_user_type,
ip: h.ip,
})
}
}
pub struct OwnerHeaders {
pub host: String,
pub device: Device,
pub user: User,
pub ip: ClientIp,
@ -804,6 +779,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type == UserOrgType::Owner {
Outcome::Success(Self {
host: headers.host,
device: headers.device,
user: headers.user,
ip: headers.ip,
@ -817,6 +793,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
//
// Client IP address detection
//
use std::net::IpAddr;
pub struct ClientIp {
pub ip: IpAddr,
@ -849,35 +826,6 @@ impl<'r> FromRequest<'r> for ClientIp {
}
}
pub struct Secure {
pub https: bool,
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for Secure {
type Error = ();
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = request.headers();
// Try to guess from the headers
let protocol = match headers.get_one("X-Forwarded-Proto") {
Some(proto) => proto,
None => {
if env::var("ROCKET_TLS").is_ok() {
"https"
} else {
"http"
}
}
};
Outcome::Success(Secure {
https: protocol == "https",
})
}
}
pub struct WsAccessTokenHeader {
pub access_token: Option<String>,
}

View File

@ -9,7 +9,7 @@ use reqwest::Url;
use crate::{
db::DbConnType,
error::Error,
util::{get_env, get_env_bool, parse_experimental_client_feature_flags},
util::{get_env, get_env_bool},
};
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
@ -39,6 +39,7 @@ macro_rules! make_config {
struct Inner {
rocket_shutdown_handle: Option<rocket::Shutdown>,
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
templates: Handlebars<'static>,
config: ConfigItems,
@ -146,12 +147,6 @@ macro_rules! make_config {
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
// Copy the values from the deprecated flags to the new ones
if config.http_request_block_regex.is_none() {
config.http_request_block_regex = config.icon_blacklist_regex.clone();
}
config
}
}
@ -331,7 +326,7 @@ macro_rules! make_config {
}
}
}};
( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{
( @build $value:expr, $config:expr, gen, $default_fn:expr ) => {{
let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn;
f($config)
}};
@ -352,7 +347,7 @@ macro_rules! make_config {
// def: Use a default value
// auto: Value is auto generated based on other values
// option: Value is optional
// generated: Value is always autogenerated and it's original value ignored
// gen: Value is always autogenerated and it's original value ignored
make_config! {
folders {
/// Data folder |> Main data folder
@ -376,15 +371,17 @@ make_config! {
},
ws {
/// Enable websocket notifications
enable_websocket: bool, false, def, true;
websocket_enabled: bool, false, def, false;
/// Websocket address
websocket_address: String, false, def, "0.0.0.0".to_string();
/// Websocket port
websocket_port: u16, false, def, 3012;
},
push {
/// Enable push notifications
push_enabled: bool, false, def, false;
/// Push relay uri
/// Push relay base uri
push_relay_uri: String, false, def, "https://push.bitwarden.com".to_string();
/// Push identity uri
push_identity_uri: String, false, def, "https://identity.bitwarden.com".to_string();
/// Installation id |> The installation id from https://bitwarden.com/host
push_installation_id: Pass, false, def, String::new();
/// Installation key |> The installation key from https://bitwarden.com/host
@ -415,9 +412,7 @@ make_config! {
/// Auth Request cleanup schedule |> Cron schedule of the job that cleans old auth requests from the auth request.
/// Defaults to every minute. Set blank to disable this job.
auth_request_purge_schedule: String, false, def, "30 * * * * *".to_string();
/// Duo Auth context cleanup schedule |> Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
/// Defaults to once every minute. Set blank to disable this job.
duo_context_purge_schedule: String, false, def, "30 * * * * *".to_string();
},
/// General settings
@ -445,8 +440,6 @@ make_config! {
user_attachment_limit: i64, true, option;
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
org_attachment_limit: i64, true, option;
/// Per-user send storage limit (KB) |> Max kilobytes of sends storage allowed per user. When this limit is reached, the user will not be allowed to upload further sends.
user_send_limit: i64, true, option;
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
@ -485,7 +478,7 @@ make_config! {
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
/// email verification token and deletion request token will expire (must be at least 1)
invitation_expiration_hours: u32, false, def, 120;
/// Enable emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
emergency_access_allowed: bool, true, def, true;
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
email_change_allowed: bool, true, def, true;
@ -515,7 +508,7 @@ make_config! {
/// Set to the string "none" (without quotes), to disable any headers and just use the remote IP
ip_header: String, true, def, "X-Real-IP".to_string();
/// Internal IP header property, used to avoid recomputing each time
_ip_header_enabled: bool, false, generated, |c| &c.ip_header.trim().to_lowercase() != "none";
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
@ -524,9 +517,9 @@ make_config! {
/// corresponding icon at the external service.
icon_service: String, false, def, "internal".to_string();
/// _icon_service_url
_icon_service_url: String, false, generated, |c| generate_icon_service_url(&c.icon_service);
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
/// _icon_service_csp
_icon_service_csp: String, false, generated, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
/// Temporary redirects are useful while testing different icon services, but once a service
@ -539,18 +532,12 @@ make_config! {
icon_cache_negttl: u64, true, def, 259_200;
/// Icon download timeout |> Number of seconds when to stop attempting to download an icon.
icon_download_timeout: u64, true, def, 10;
/// [Deprecated] Icon blacklist Regex |> Use `http_request_block_regex` instead
icon_blacklist_regex: String, false, option;
/// [Deprecated] Icon blacklist non global IPs |> Use `http_request_block_non_global_ips` instead
icon_blacklist_non_global_ips: bool, false, def, true;
/// Block HTTP domains/IPs by Regex |> Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
/// Icon blacklist Regex |> Any domains or IPs that match this regex won't be fetched by the icon service.
/// Useful to hide other servers in the local network. Check the WIKI for more details
http_request_block_regex: String, true, option;
/// Block non global IPs |> Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
icon_blacklist_regex: String, true, option;
/// Icon blacklist non global IPs |> Any IP which is not defined as a global IP will be blacklisted.
/// Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
http_request_block_non_global_ips: bool, true, auto, |c| c.icon_blacklist_non_global_ips;
icon_blacklist_non_global_ips: bool, true, def, true;
/// Disable Two-Factor remember |> Enabling this would force the users to use a second factor to login every time.
/// Note that the checkbox would still be present, but ignored.
@ -560,9 +547,6 @@ make_config! {
/// TOTP codes of the previous and next 30 seconds will be invalid.
authenticator_disable_time_drift: bool, true, def, false;
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
experimental_client_feature_flags: String, false, def, "fido2-vault-credentials".to_string();
/// Require new device emails |> When a user logs in an email is required to be sent.
/// If sending the email fails the login attempt will fail.
require_device_email: bool, true, def, false;
@ -578,9 +562,8 @@ make_config! {
use_syslog: bool, false, def, false;
/// Log file path
log_file: String, false, option;
/// Log level |> Valid values are "trace", "debug", "info", "warn", "error" and "off"
/// For a specific module append it as a comma separated value "info,path::to::module=debug"
log_level: String, false, def, "info".to_string();
/// Log level
log_level: String, false, def, "Info".to_string();
/// Enable DB WAL |> Turning this off might lead to worse performance, but might help if using vaultwarden on some exotic filesystems,
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
@ -619,17 +602,6 @@ make_config! {
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
org_groups_enabled: bool, false, def, false;
/// Increase note size limit (Know the risks!) |> Sets the secure note size limit to 100_000 instead of the default 10_000.
/// WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
increase_note_size_limit: bool, true, def, false;
/// Generated max_note_size value to prevent if..else matching during every check
_max_note_size: usize, false, generated, |c| if c.increase_note_size_limit {100_000} else {10_000};
/// Enforce Single Org with Reset Password Policy |> Enforce that the Single Org policy is enabled before setting the Reset Password policy
/// Bitwarden enforces this by default. In Vaultwarden we encouraged to use multiple organizations because groups were not available.
/// Setting this to true will enforce the Single Org Policy to be enabled before you can enable the Reset Password policy.
enforce_single_org_with_reset_pw_policy: bool, false, def, false;
},
/// Yubikey settings
@ -648,8 +620,6 @@ make_config! {
duo: _enable_duo {
/// Enabled
_enable_duo: bool, true, def, true;
/// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2)
duo_use_iframe: bool, false, def, false;
/// Integration Key
duo_ikey: String, true, option;
/// Secret Key
@ -695,7 +665,7 @@ make_config! {
/// Embed images as email attachments.
smtp_embed_images: bool, true, def, true;
/// _smtp_img_src
_smtp_img_src: String, false, generated, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
_smtp_img_src: String, false, gen, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
smtp_debug: bool, false, def, false;
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
@ -714,10 +684,6 @@ make_config! {
email_expiration_time: u64, true, def, 600;
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
email_attempts_limit: u64, true, def, 3;
/// Automatically enforce at login |> Setup email 2FA provider regardless of any organization policy
email_2fa_enforce_on_verified_invite: bool, true, def, false;
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
email_2fa_auto_fallback: bool, true, def, false;
},
}
@ -785,57 +751,6 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
)
}
if cfg.push_enabled {
let push_relay_uri = cfg.push_relay_uri.to_lowercase();
if !push_relay_uri.starts_with("https://") {
err!("`PUSH_RELAY_URI` must start with 'https://'.")
}
if Url::parse(&push_relay_uri).is_err() {
err!("Invalid URL format for `PUSH_RELAY_URI`.");
}
let push_identity_uri = cfg.push_identity_uri.to_lowercase();
if !push_identity_uri.starts_with("https://") {
err!("`PUSH_IDENTITY_URI` must start with 'https://'.")
}
if Url::parse(&push_identity_uri).is_err() {
err!("Invalid URL format for `PUSH_IDENTITY_URI`.");
}
}
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
const KNOWN_FLAGS: &[&str] =
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "fido2-vault-credentials"];
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
if !invalid_flags.is_empty() {
err!(format!("Unrecognized experimental client feature flags: {invalid_flags:?}.\n\n\
Please ensure all feature flags are spelled correctly and that they are supported in this version.\n\
Supported flags: {KNOWN_FLAGS:?}"));
}
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
if let Some(limit) = cfg.user_attachment_limit {
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
err!("`USER_ATTACHMENT_LIMIT` is out of bounds");
}
}
if let Some(limit) = cfg.org_attachment_limit {
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
err!("`ORG_ATTACHMENT_LIMIT` is out of bounds");
}
}
if let Some(limit) = cfg.user_send_limit {
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
err!("`USER_SEND_LIMIT` is out of bounds");
}
}
if cfg._enable_duo
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
@ -920,19 +835,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
err!("To enable email 2FA, a mail transport must be configured")
}
if !cfg._enable_email_2fa && cfg.email_2fa_enforce_on_verified_invite {
err!("To enforce email 2FA on verified invitations, email 2fa has to be enabled!");
}
if !cfg._enable_email_2fa && cfg.email_2fa_auto_fallback {
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
}
// Check if the HTTP request block regex is valid
if let Some(ref r) = cfg.http_request_block_regex {
// Check if the icon blacklist regex is valid
if let Some(ref r) = cfg.icon_blacklist_regex {
let validate_regex = regex::Regex::new(r);
match validate_regex {
Ok(_) => (),
Err(e) => err!(format!("`HTTP_REQUEST_BLOCK_REGEX` is invalid: {e:#?}")),
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")),
}
}
@ -1012,11 +920,6 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
_ => {}
}
}
if cfg.increase_note_size_limit {
println!("[WARNING] Secure Note size limit is increased to 100_000!");
println!("[WARNING] This could cause issues with clients. Also exports will not work on Bitwarden servers!.");
}
Ok(())
}
@ -1110,6 +1013,7 @@ impl Config {
Ok(Config {
inner: RwLock::new(Inner {
rocket_shutdown_handle: None,
ws_shutdown_handle: None,
templates: load_templates(&config.templates_folder),
config,
_env,
@ -1202,7 +1106,7 @@ impl Config {
}
pub fn delete_user_config(&self) -> Result<(), Error> {
std::fs::remove_file(&*CONFIG_FILE)?;
crate::util::delete_file(&CONFIG_FILE)?;
// Empty user config
let usr = ConfigBuilder::default();
@ -1225,7 +1129,10 @@ impl Config {
}
pub fn private_rsa_key(&self) -> String {
format!("{}.pem", self.rsa_key_filename())
format!("{}.pem", CONFIG.rsa_key_filename())
}
pub fn public_rsa_key(&self) -> String {
format!("{}.pub.pem", CONFIG.rsa_key_filename())
}
pub fn mail_enabled(&self) -> bool {
let inner = &self.inner.read().unwrap().config;
@ -1256,8 +1163,12 @@ impl Config {
token.is_some() && !token.unwrap().trim().is_empty()
}
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
if self.reload_templates() {
pub fn render_template<T: serde::ser::Serialize>(
&self,
name: &str,
data: &T,
) -> Result<String, crate::error::Error> {
if CONFIG.reload_templates() {
warn!("RELOADING TEMPLATES");
let hb = load_templates(CONFIG.templates_folder());
hb.render(name, data).map_err(Into::into)
@ -1271,8 +1182,16 @@ impl Config {
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
}
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
}
pub fn shutdown(&self) {
if let Ok(mut c) = self.inner.write() {
if let Some(handle) = c.ws_shutdown_handle.take() {
handle.send(()).ok();
}
if let Some(handle) = c.rocket_shutdown_handle.take() {
handle.notify();
}
@ -1280,10 +1199,7 @@ impl Config {
}
}
use handlebars::{
Context, DirectorySourceOptions, Handlebars, Helper, HelperResult, Output, RenderContext, RenderErrorReason,
Renderable,
};
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable};
fn load_templates<P>(path: P) -> Handlebars<'static>
where
@ -1294,6 +1210,7 @@ where
hb.set_strict_mode(true);
// Register helpers
hb.register_helper("case", Box::new(case_helper));
hb.register_helper("jsesc", Box::new(js_escape_helper));
hb.register_helper("to_json", Box::new(to_json));
macro_rules! reg {
@ -1351,20 +1268,19 @@ where
// And then load user templates to overwrite the defaults
// Use .hbs extension for the files
// Templates get registered with their relative name
hb.register_templates_directory(path, DirectorySourceOptions::default()).unwrap();
hb.register_templates_directory(".hbs", path).unwrap();
hb
}
fn case_helper<'reg, 'rc>(
h: &Helper<'rc>,
h: &Helper<'reg, 'rc>,
r: &'reg Handlebars<'_>,
ctx: &'rc Context,
rc: &mut RenderContext<'reg, 'rc>,
out: &mut dyn Output,
) -> HelperResult {
let param =
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"case\"")))?;
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
let value = param.value().clone();
if h.params().iter().skip(1).any(|x| x.value() == &value) {
@ -1374,19 +1290,38 @@ fn case_helper<'reg, 'rc>(
}
}
fn to_json<'reg, 'rc>(
h: &Helper<'rc>,
fn js_escape_helper<'reg, 'rc>(
h: &Helper<'reg, 'rc>,
_r: &'reg Handlebars<'_>,
_ctx: &'rc Context,
_rc: &mut RenderContext<'reg, 'rc>,
out: &mut dyn Output,
) -> HelperResult {
let param = h
.param(0)
.ok_or_else(|| RenderErrorReason::Other(String::from("Expected 1 parameter for \"to_json\"")))?
.value();
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?;
let no_quote = h.param(1).is_some();
let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?;
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
if !no_quote {
escaped_value = format!("&quot;{escaped_value}&quot;");
}
out.write(&escaped_value)?;
Ok(())
}
fn to_json<'reg, 'rc>(
h: &Helper<'reg, 'rc>,
_r: &'reg Handlebars<'_>,
_ctx: &'rc Context,
_rc: &mut RenderContext<'reg, 'rc>,
out: &mut dyn Output,
) -> HelperResult {
let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value();
let json = serde_json::to_string(param)
.map_err(|e| RenderErrorReason::Other(format!("Can't serialize parameter to JSON: {e}")))?;
.map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?;
out.write(&json)?;
Ok(())
}

View File

@ -300,17 +300,19 @@ pub trait FromDb {
impl<T: FromDb> FromDb for Vec<T> {
type Output = Vec<T::Output>;
#[allow(clippy::wrong_self_convention)]
#[inline(always)]
fn from_db(self) -> Self::Output {
self.into_iter().map(FromDb::from_db).collect()
self.into_iter().map(crate::db::FromDb::from_db).collect()
}
}
impl<T: FromDb> FromDb for Option<T> {
type Output = Option<T::Output>;
#[allow(clippy::wrong_self_convention)]
#[inline(always)]
fn from_db(self) -> Self::Output {
self.map(FromDb::from_db)
self.map(crate::db::FromDb::from_db)
}
}
@ -366,42 +368,34 @@ pub mod models;
/// Creates a back-up of the sqlite database
/// MySQL/MariaDB and PostgreSQL are not supported.
pub async fn backup_database(conn: &mut DbConn) -> Result<String, Error> {
pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
db_run! {@raw conn:
postgresql, mysql {
let _ = conn;
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
}
sqlite {
backup_sqlite_database(conn)
}
}
}
#[cfg(sqlite)]
pub fn backup_sqlite_database(conn: &mut diesel::sqlite::SqliteConnection) -> Result<String, Error> {
use diesel::RunQueryDsl;
use std::path::Path;
let db_url = CONFIG.database_url();
let db_path = std::path::Path::new(&db_url).parent().unwrap();
let backup_file = db_path
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
.to_string_lossy()
.into_owned();
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
Ok(backup_file)
let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy();
let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string();
diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?;
Ok(())
}
}
}
/// Get the SQL Server version
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
db_run! {@raw conn:
postgresql, mysql {
define_sql_function!{
sql_function!{
fn version() -> diesel::sql_types::Text;
}
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
}
sqlite {
define_sql_function!{
sql_function!{
fn sqlite_version() -> diesel::sql_types::Text;
}
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())

View File

@ -1,6 +1,5 @@
use std::io::ErrorKind;
use bigdecimal::{BigDecimal, ToPrimitive};
use serde_json::Value;
use crate::CONFIG;
@ -14,14 +13,14 @@ db_object! {
pub id: String,
pub cipher_uuid: String,
pub file_name: String, // encrypted
pub file_size: i64,
pub file_size: i32,
pub akey: Option<String>,
}
}
/// Local methods
impl Attachment {
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32, akey: Option<String>) -> Self {
Self {
id,
cipher_uuid,
@ -42,13 +41,13 @@ impl Attachment {
pub fn to_json(&self, host: &str) -> Value {
json!({
"id": self.id,
"url": self.get_url(host),
"fileName": self.file_name,
"size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey,
"object": "attachment"
"Id": self.id,
"Url": self.get_url(host),
"FileName": self.file_name,
"Size": self.file_size.to_string(),
"SizeName": crate::util::get_display_size(self.file_size),
"Key": self.akey,
"Object": "attachment"
})
}
}
@ -95,7 +94,7 @@ impl Attachment {
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
let _: () = crate::util::retry(
crate::util::retry(
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
10,
)
@ -103,7 +102,7 @@ impl Attachment {
let file_path = &self.get_file_path();
match std::fs::remove_file(file_path) {
match crate::util::delete_file(file_path) {
// Ignore "file not found" errors. This can happen when the
// upstream caller has already cleaned up the file as part of
// its own error handling.
@ -146,18 +145,13 @@ impl Attachment {
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
db_run! { conn: {
let result: Option<BigDecimal> = attachments::table
let result: Option<i64> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::user_uuid.eq(user_uuid))
.select(diesel::dsl::sum(attachments::file_size))
.first(conn)
.expect("Error loading user attachment total size");
match result.map(|r| r.to_i64()) {
Some(Some(r)) => r,
Some(None) => i64::MAX,
None => 0
}
result.unwrap_or(0)
}}
}
@ -174,18 +168,13 @@ impl Attachment {
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
db_run! { conn: {
let result: Option<BigDecimal> = attachments::table
let result: Option<i64> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
.filter(ciphers::organization_uuid.eq(org_uuid))
.select(diesel::dsl::sum(attachments::file_size))
.first(conn)
.expect("Error loading user attachment total size");
match result.map(|r| r.to_i64()) {
Some(Some(r)) => r,
Some(None) => i64::MAX,
None => 0
}
result.unwrap_or(0)
}}
}

View File

@ -140,7 +140,7 @@ impl AuthRequest {
}
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
for auth_request in Self::find_created_before(&expiry_time, conn).await {
auth_request.delete(conn).await.ok();
}

View File

@ -1,6 +1,5 @@
use crate::util::LowerCase;
use crate::CONFIG;
use chrono::{DateTime, NaiveDateTime, TimeDelta, Utc};
use chrono::{Duration, NaiveDateTime, Utc};
use serde_json::Value;
use super::{
@ -79,39 +78,21 @@ impl Cipher {
}
}
pub fn validate_cipher_data(cipher_data: &[CipherData]) -> EmptyResult {
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new();
let max_note_size = CONFIG._max_note_size();
let max_note_size_msg =
format!("The field Notes exceeds the maximum encrypted value length of {} characters.", &max_note_size);
for (index, cipher) in cipher_data.iter().enumerate() {
// Validate the note size and if it is exceeded return a warning
if let Some(note) = &cipher.notes {
if note.len() > max_note_size {
validation_errors
.insert(format!("Ciphers[{index}].Notes"), serde_json::to_value([&max_note_size_msg]).unwrap());
}
}
// Validate the password history if it contains `null` values and if so, return a warning
if let Some(Value::Array(password_history)) = &cipher.password_history {
for pwh in password_history {
if let Value::Object(pwo) = pwh {
if pwo.get("password").is_some_and(|p| !p.is_string()) {
if let Some(note) = &cipher.Notes {
if note.len() > 10_000 {
validation_errors.insert(
format!("Ciphers[{index}].Notes"),
serde_json::to_value([
"The password history contains a `null` value. Only strings are allowed.",
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
])
.unwrap(),
);
break;
}
}
}
}
}
if !validation_errors.is_empty() {
let err_json = json!({
"message": "The model state is invalid.",
@ -154,6 +135,10 @@ impl Cipher {
}
}
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let password_history_json =
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
// We don't need these values at all for Organizational syncs
// Skip any other database calls if this is the case and just return false.
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
@ -168,74 +153,20 @@ impl Cipher {
(false, false)
};
let fields_json: Vec<_> = self
.fields
.as_ref()
.and_then(|s| {
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
.inspect_err(|e| warn!("Error parsing fields {e:?} for {}", self.uuid))
.ok()
})
.map(|d| d.into_iter().map(|d| d.data).collect())
.unwrap_or_default();
let password_history_json: Vec<_> = self
.password_history
.as_ref()
.and_then(|s| {
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
.inspect_err(|e| warn!("Error parsing password history {e:?} for {}", self.uuid))
.ok()
})
.map(|d| {
// Check every password history item if they are valid and return it.
// If a password field has the type `null` skip it, it breaks newer Bitwarden clients
// A second check is done to verify the lastUsedDate exists and is a valid DateTime string, if not the epoch start time will be used
d.into_iter()
.filter_map(|d| match d.data.get("password") {
Some(p) if p.is_string() => Some(d.data),
_ => None,
})
.map(|d| match d.get("lastUsedDate").and_then(|l| l.as_str()) {
Some(l) if DateTime::parse_from_rfc3339(l).is_ok() => d,
_ => {
let mut d = d;
d["lastUsedDate"] = json!("1970-01-01T00:00:00.000Z");
d
}
})
.collect()
})
.unwrap_or_default();
// Get the type_data or a default to an empty json object '{}'.
// If not passing an empty object, mobile clients will crash.
let mut type_data_json =
serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_else(|_| {
warn!("Error parsing data field for {}", self.uuid);
Value::Object(serde_json::Map::new())
});
let mut type_data_json: Value =
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1 {
if type_data_json["uris"].is_array() {
let uri = type_data_json["uris"][0]["uri"].clone();
type_data_json["uri"] = uri;
if type_data_json["Uris"].is_array() {
let uri = type_data_json["Uris"][0]["Uri"].clone();
type_data_json["Uri"] = uri;
} else {
// Upstream always has an Uri key/value
type_data_json["uri"] = Value::Null;
}
}
// Fix secure note issues when data is invalid
// This breaks at least the native mobile clients
if self.atype == 2 {
match type_data_json {
Value::Object(ref t) if t.get("type").is_some_and(|t| t.is_number()) => {}
_ => {
type_data_json = json!({"type": 0});
}
type_data_json["Uri"] = Value::Null;
}
}
@ -244,10 +175,10 @@ impl Cipher {
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// data_json should always contain the following keys with every atype
data_json["fields"] = Value::Array(fields_json.clone());
data_json["name"] = json!(self.name);
data_json["notes"] = json!(self.notes);
data_json["passwordHistory"] = Value::Array(password_history_json.clone());
data_json["Fields"] = fields_json.clone();
data_json["Name"] = json!(self.name);
data_json["Notes"] = json!(self.notes);
data_json["PasswordHistory"] = password_history_json.clone();
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
@ -256,7 +187,7 @@ impl Cipher {
Cow::from(Vec::with_capacity(0))
}
} else {
Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await)
Cow::from(self.get_collections(user_uuid.to_string(), conn).await)
};
// There are three types of cipher response models in upstream
@ -267,48 +198,48 @@ impl Cipher {
//
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
let mut json_object = json!({
"object": "cipherDetails",
"id": self.uuid,
"type": self.atype,
"creationDate": format_date(&self.created_at),
"revisionDate": format_date(&self.updated_at),
"deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"organizationId": self.organization_uuid,
"key": self.key,
"attachments": attachments_json,
"Object": "cipherDetails",
"Id": self.uuid,
"Type": self.atype,
"CreationDate": format_date(&self.created_at),
"RevisionDate": format_date(&self.updated_at),
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"OrganizationId": self.organization_uuid,
"Key": self.key,
"Attachments": attachments_json,
// We have UseTotp set to true by default within the Organization model.
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
"organizationUseTotp": true,
"OrganizationUseTotp": true,
// This field is specific to the cipherDetails type.
"collectionIds": collection_ids,
"CollectionIds": collection_ids,
"name": self.name,
"notes": self.notes,
"fields": fields_json,
"Name": self.name,
"Notes": self.notes,
"Fields": fields_json,
"data": data_json,
"Data": data_json,
"passwordHistory": password_history_json,
"PasswordHistory": password_history_json,
// All Cipher types are included by default as null, but only the matching one will be populated
"login": null,
"secureNote": null,
"card": null,
"identity": null,
"Login": null,
"SecureNote": null,
"Card": null,
"Identity": null,
});
// These values are only needed for user/default syncs
// Not during an organizational sync like `get_org_details`
// Skip adding these fields in that case
if sync_type == CipherSyncType::User {
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
} else {
self.get_folder_uuid(user_uuid, conn).await
});
json_object["favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_favorites.contains(&self.uuid)
} else {
self.is_favorite(user_uuid, conn).await
@ -316,15 +247,15 @@ impl Cipher {
// These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user.
json_object["edit"] = json!(!read_only);
json_object["viewPassword"] = json!(!hide_passwords);
json_object["Edit"] = json!(!read_only);
json_object["ViewPassword"] = json!(!hide_passwords);
}
let key = match self.atype {
1 => "login",
2 => "secureNote",
3 => "card",
4 => "identity",
1 => "Login",
2 => "SecureNote",
3 => "Card",
4 => "Identity",
_ => panic!("Wrong type"),
};
@ -342,16 +273,7 @@ impl Cipher {
None => {
// Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid {
// users having access to the collection
let mut collection_users =
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
if CONFIG.org_groups_enabled() {
// members of a group having access to the collection
let group_users =
UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
collection_users.extend(group_users);
}
for user_org in collection_users {
for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() {
User::update_uuid_revision(&user_org.user_uuid, conn).await;
user_uuids.push(user_org.user_uuid.clone())
}
@ -430,7 +352,7 @@ impl Cipher {
pub async fn purge_trash(conn: &mut DbConn) {
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
let now = Utc::now().naive_utc();
let dt = now - TimeDelta::try_days(auto_delete_days).unwrap();
let dt = now - Duration::days(auto_delete_days);
for cipher in Self::find_deleted_before(&dt, conn).await {
cipher.delete(conn).await.ok();
}
@ -495,12 +417,9 @@ impl Cipher {
cipher_sync_data: Option<&CipherSyncData>,
conn: &mut DbConn,
) -> bool {
if !CONFIG.org_groups_enabled() {
return false;
}
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data {
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
} else {
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
}
@ -593,9 +512,6 @@ impl Cipher {
}
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
if !CONFIG.org_groups_enabled() {
return Vec::new();
}
db_run! {conn: {
ciphers::table
.filter(ciphers::uuid.eq(&self.uuid))
@ -664,17 +580,6 @@ impl Cipher {
}}
}
pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: {
ciphers::table
.filter(ciphers::uuid.eq(cipher_uuid))
.filter(ciphers::organization_uuid.eq(org_uuid))
.first::<CipherDb>(conn)
.ok()
.from_db()
}}
}
// Find all ciphers accessible or visible to the specified user.
//
// "Accessible" means the user has read access to the cipher, either via
@ -688,7 +593,6 @@ impl Cipher {
// result, those ciphers will not appear in "My Vault" for the org
// owner/admin, but they can still be accessed via the org vault view.
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
if CONFIG.org_groups_enabled() {
db_run! {conn: {
let mut query = ciphers::table
.left_join(ciphers_collections::table.on(
@ -733,39 +637,6 @@ impl Cipher {
.distinct()
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
}}
} else {
db_run! {conn: {
let mut query = ciphers::table
.left_join(ciphers_collections::table.on(
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
))
.left_join(users_organizations::table.on(
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
.and(users_organizations::user_uuid.eq(user_uuid))
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
))
.left_join(users_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
))
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
.into_boxed();
if !visible_only {
query = query.or_filter(
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
);
}
query
.select(ciphers::all_columns)
.distinct()
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
}}
}
}
// Find all ciphers visible to the specified user.
@ -834,123 +705,30 @@ impl Cipher {
}
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
if CONFIG.org_groups_enabled() {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false)))
)
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
} else {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
users_organizations::org_uuid.eq(collections::org_uuid).and(
users_organizations::user_uuid.eq(user_id.clone())
)
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
users_collections::user_uuid.eq(user_id.clone())
)
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
}
}
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
if CONFIG.org_groups_enabled() {
db_run! {conn: {
ciphers_collections::table
))
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
users_organizations::access_all.eq(true).or( // User has access all
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
)
))
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
} else {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
)
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
}
}
/// Return a Vec with (cipher_uuid, collection_uuid)

View File

@ -1,7 +1,6 @@
use serde_json::Value;
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG;
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -49,11 +48,11 @@ impl Collection {
pub fn to_json(&self) -> Value {
json!({
"externalId": self.external_id,
"id": self.uuid,
"organizationId": self.org_uuid,
"name": self.name,
"object": "collection",
"ExternalId": self.external_id,
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Name": self.name,
"Object": "collection",
})
}
@ -78,57 +77,30 @@ impl Collection {
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
conn: &mut DbConn,
) -> Value {
let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data {
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
// Only for Manager types Bitwarden returns true for the can_manage option
// Owners and Admins always have false, but they can manage all collections anyway
Some(uo) if uo.has_full_access() => (false, false, uo.atype == UserOrgType::Manager),
Some(uo) => {
// Only let a manager manage collections when the have full read/write access
let is_manager = uo.atype == UserOrgType::Manager;
Some(uo) if uo.has_full_access() => (false, false),
Some(_) => {
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
(uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords)
(uc.read_only, uc.hide_passwords)
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
(cg.read_only, cg.hide_passwords, is_manager && !cg.read_only && !cg.hide_passwords)
(cg.read_only, cg.hide_passwords)
} else {
(false, false, false)
(false, false)
}
}
_ => (true, true, false),
_ => (true, true),
}
} else {
match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
Some(ou) if ou.has_full_access() => (false, false, ou.atype == UserOrgType::Manager),
Some(ou) => {
let is_manager = ou.atype == UserOrgType::Manager;
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
}
_ => (
!self.is_writable_by_user(user_uuid, conn).await,
self.hide_passwords_for_user(user_uuid, conn).await,
false,
),
}
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
};
let mut json_object = self.to_json();
json_object["object"] = json!("collectionDetails");
json_object["readOnly"] = json!(read_only);
json_object["hidePasswords"] = json!(hide_passwords);
json_object["manage"] = json!(can_manage);
json_object["Object"] = json!("collectionDetails");
json_object["ReadOnly"] = json!(read_only);
json_object["HidePasswords"] = json!(hide_passwords);
json_object
}
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
}
use crate::db::DbConn;
@ -209,7 +181,6 @@ impl Collection {
}
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
if CONFIG.org_groups_enabled() {
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
@ -251,32 +222,17 @@ impl Collection {
.distinct()
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
}}
} else {
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(collections::uuid).and(
users_collections::user_uuid.eq(user_uuid.clone())
)
))
.left_join(users_organizations::table.on(
collections::org_uuid.eq(users_organizations::org_uuid).and(
users_organizations::user_uuid.eq(user_uuid.clone())
)
))
.filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
)
.filter(
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true) // access_all in Organization
)
)
.select(collections::all_columns)
.distinct()
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
}}
}
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
@ -321,7 +277,6 @@ impl Collection {
}
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
if CONFIG.org_groups_enabled() {
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
@ -361,7 +316,10 @@ impl Collection {
.first::<CollectionDb>(conn).ok()
.from_db()
}}
} else {
}
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string();
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
@ -374,33 +332,6 @@ impl Collection {
users_organizations::user_uuid.eq(user_uuid)
)
))
.filter(collections::uuid.eq(uuid))
.filter(
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
))
).select(collections::all_columns)
.first::<CollectionDb>(conn).ok()
.from_db()
}}
}
}
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string();
if CONFIG.org_groups_enabled() {
db_run! { conn: {
collections::table
.filter(collections::uuid.eq(&self.uuid))
.inner_join(users_organizations::table.on(
collections::org_uuid.eq(users_organizations::org_uuid)
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(collections::uuid)
.and(users_collections::user_uuid.eq(user_uuid))
))
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
@ -408,38 +339,23 @@ impl Collection {
groups::uuid.eq(groups_users::groups_uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
.and(collections_groups::collections_uuid.eq(collections::uuid))
))
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
.or(users_organizations::access_all.eq(true)) // access_all via membership
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
.and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // access_all via group
.or(collections_groups::collections_uuid.is_not_null() // write access given via group
.and(collections_groups::read_only.eq(false)))
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
collections_groups::collections_uuid.eq(collections::uuid)
)
.count()
.first::<i64>(conn)
.ok()
.unwrap_or(0) != 0
}}
} else {
db_run! { conn: {
collections::table
))
.filter(collections::uuid.eq(&self.uuid))
.inner_join(users_organizations::table.on(
collections::org_uuid.eq(users_organizations::org_uuid)
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(collections::uuid)
.and(users_collections::user_uuid.eq(user_uuid))
))
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
.or(users_organizations::access_all.eq(true)) // access_all via membership
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
.and(users_collections::read_only.eq(false)))
.filter(
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
)).or(
groups::access_all.eq(true) // access_all in groups
).or( // access via groups
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
collections_groups::collections_uuid.is_not_null().and(
collections_groups::read_only.eq(false))
)
)
)
.count()
.first::<i64>(conn)
@ -447,7 +363,6 @@ impl Collection {
.unwrap_or(0) != 0
}}
}
}
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string();
@ -666,7 +581,7 @@ impl CollectionUser {
db_run! { conn: {
for user in collectionusers {
let _: () = diesel::delete(users_collections::table.filter(
diesel::delete(users_collections::table.filter(
users_collections::user_uuid.eq(user_uuid)
.and(users_collections::collection_uuid.eq(user.collection_uuid))
))
@ -676,10 +591,6 @@ impl CollectionUser {
Ok(())
}}
}
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
}
/// Database methods

View File

@ -16,7 +16,7 @@ db_object! {
pub user_uuid: String,
pub name: String,
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
pub push_uuid: Option<String>,
pub push_token: Option<String>,
@ -59,7 +59,12 @@ impl Device {
self.twofactor_remember = None;
}
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
pub fn refresh_tokens(
&mut self,
user: &super::User,
orgs: Vec<super::UserOrganization>,
scope: Vec<String>,
) -> (String, i64) {
// If there is no refresh token, we create one
if self.refresh_token.is_empty() {
use data_encoding::BASE64URL;
@ -67,20 +72,13 @@ impl Device {
}
// Update the expiration of the device and the last update date
let time_now = Utc::now();
self.updated_at = time_now.naive_utc();
let time_now = Utc::now().naive_utc();
self.updated_at = time_now;
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// ---
// fn arg: orgs: Vec<super::UserOrganization>,
// ---
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
// Create the JWT claims struct, to send to the client
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
@ -95,16 +93,11 @@ impl Device {
email: user.email.clone(),
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
// ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
// ---
// orgowner,
// orgadmin,
// orguser,
// orgmanager,
orgowner,
orgadmin,
orguser,
orgmanager,
sstamp: user.security_stamp.clone(),
device: self.uuid.clone(),
scope,
@ -113,14 +106,6 @@ impl Device {
(encode_jwt(&claims), DEFAULT_VALIDITY.num_seconds())
}
pub fn is_push_device(&self) -> bool {
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
}
pub fn is_registered(&self) -> bool {
self.push_uuid.is_some()
}
}
use crate::db::DbConn;
@ -218,7 +203,6 @@ impl Device {
.from_db()
}}
}
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
devices::table
@ -267,9 +251,6 @@ pub enum DeviceType {
SafariExtension = 20,
Sdk = 21,
Server = 22,
WindowsCLI = 23,
MacOsCLI = 24,
LinuxCLI = 25,
}
impl fmt::Display for DeviceType {
@ -281,26 +262,23 @@ impl fmt::Display for DeviceType {
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
DeviceType::OperaExtension => write!(f, "Opera Extension"),
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
DeviceType::WindowsDesktop => write!(f, "Windows"),
DeviceType::MacOsDesktop => write!(f, "macOS"),
DeviceType::LinuxDesktop => write!(f, "Linux"),
DeviceType::ChromeBrowser => write!(f, "Chrome"),
DeviceType::FirefoxBrowser => write!(f, "Firefox"),
DeviceType::OperaBrowser => write!(f, "Opera"),
DeviceType::EdgeBrowser => write!(f, "Edge"),
DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
DeviceType::OperaBrowser => write!(f, "Opera Browser"),
DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
DeviceType::AndroidAmazon => write!(f, "Android"),
DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
DeviceType::Uwp => write!(f, "UWP"),
DeviceType::SafariBrowser => write!(f, "Safari"),
DeviceType::VivaldiBrowser => write!(f, "Vivaldi"),
DeviceType::SafariBrowser => write!(f, "Safari Browser"),
DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
DeviceType::SafariExtension => write!(f, "Safari Extension"),
DeviceType::Sdk => write!(f, "SDK"),
DeviceType::Server => write!(f, "Server"),
DeviceType::WindowsCLI => write!(f, "Windows CLI"),
DeviceType::MacOsCLI => write!(f, "macOS CLI"),
DeviceType::LinuxCLI => write!(f, "Linux CLI"),
}
}
}
@ -331,9 +309,6 @@ impl DeviceType {
20 => DeviceType::SafariExtension,
21 => DeviceType::Sdk,
22 => DeviceType::Server,
23 => DeviceType::WindowsCLI,
24 => DeviceType::MacOsCLI,
25 => DeviceType::LinuxCLI,
_ => DeviceType::UnknownBrowser,
}
}

View File

@ -58,11 +58,11 @@ impl EmergencyAccess {
pub fn to_json(&self) -> Value {
json!({
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"object": "emergencyAccess",
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"Object": "emergencyAccess",
})
}
@ -70,43 +70,36 @@ impl EmergencyAccess {
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
json!({
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"grantorId": grantor_user.uuid,
"email": grantor_user.email,
"name": grantor_user.name,
"object": "emergencyAccessGrantorDetails",
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GrantorId": grantor_user.uuid,
"Email": grantor_user.email,
"Name": grantor_user.name,
"Object": "emergencyAccessGrantorDetails",
})
}
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
} else if let Some(email) = self.email.as_deref() {
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
Self::delete_all_by_grantee_email(email, conn).await.ok();
return None;
}
}
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
} else {
return None;
None
};
Some(json!({
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"granteeId": grantee_user.uuid,
"email": grantee_user.email,
"name": grantee_user.name,
"object": "emergencyAccessGranteeDetails",
}))
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
"Object": "emergencyAccessGranteeDetails",
})
}
}
@ -181,7 +174,7 @@ impl EmergencyAccess {
// Update the grantee so that it will refresh it's status.
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
self.status = status;
date.clone_into(&mut self.updated_at);
self.updated_at = date.to_owned();
db_run! {conn: {
crate::util::retry(|| {
@ -199,7 +192,7 @@ impl EmergencyAccess {
conn: &mut DbConn,
) -> EmptyResult {
self.last_notification_at = Some(date.to_owned());
date.clone_into(&mut self.updated_at);
self.updated_at = date.to_owned();
db_run! {conn: {
crate::util::retry(|| {
@ -221,13 +214,6 @@ impl EmergencyAccess {
Ok(())
}
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -238,6 +224,15 @@ impl EmergencyAccess {
}}
}
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
grantor_uuid: &str,
grantee_uuid: &str,
@ -272,26 +267,6 @@ impl EmergencyAccess {
}}
}
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.filter(emergency_access::email.eq(grantee_email))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
@ -310,15 +285,6 @@ impl EmergencyAccess {
}}
}
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
@ -326,21 +292,6 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
}
// endregion

View File

@ -3,7 +3,7 @@ use serde_json::Value;
use crate::{api::EmptyResult, error::MapResult, CONFIG};
use chrono::{NaiveDateTime, TimeDelta, Utc};
use chrono::{Duration, NaiveDateTime, Utc};
// https://bitwarden.com/help/event-logs/
@ -316,7 +316,7 @@ impl Event {
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
if let Some(days_to_retain) = CONFIG.events_days_retain() {
let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
db_run! { conn: {
diesel::delete(event::table.filter(event::event_date.lt(dt)))
.execute(conn)

View File

@ -43,10 +43,10 @@ impl Folder {
use crate::util::format_date;
json!({
"id": self.uuid,
"revisionDate": format_date(&self.updated_at),
"name": self.name,
"object": "folder",
"Id": self.uuid,
"RevisionDate": format_date(&self.updated_at),
"Name": self.name,
"Object": "folder",
})
}
}

View File

@ -1,7 +1,3 @@
use super::{User, UserOrgType, UserOrganization};
use crate::api::EmptyResult;
use crate::db::DbConn;
use crate::error::MapResult;
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
@ -62,39 +58,38 @@ impl Group {
use crate::util::format_date;
json!({
"id": self.uuid,
"organizationId": self.organizations_uuid,
"name": self.name,
"accessAll": self.access_all,
"externalId": self.external_id,
"creationDate": format_date(&self.creation_date),
"revisionDate": format_date(&self.revision_date),
"object": "group"
"Id": self.uuid,
"OrganizationId": self.organizations_uuid,
"Name": self.name,
"AccessAll": self.access_all,
"ExternalId": self.external_id,
"CreationDate": format_date(&self.creation_date),
"RevisionDate": format_date(&self.revision_date),
"Object": "group"
})
}
pub async fn to_json_details(&self, user_org_type: &i32, conn: &mut DbConn) -> Value {
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
.await
.iter()
.map(|entry| {
json!({
"id": entry.collections_uuid,
"readOnly": entry.read_only,
"hidePasswords": entry.hide_passwords,
"manage": *user_org_type == UserOrgType::Manager && !entry.read_only && !entry.hide_passwords
"Id": entry.collections_uuid,
"ReadOnly": entry.read_only,
"HidePasswords": entry.hide_passwords
})
})
.collect();
json!({
"id": self.uuid,
"organizationId": self.organizations_uuid,
"name": self.name,
"accessAll": self.access_all,
"externalId": self.external_id,
"collections": collections_groups,
"object": "groupDetails"
"Id": self.uuid,
"OrganizationId": self.organizations_uuid,
"Name": self.name,
"AccessAll": self.access_all,
"ExternalId": self.external_id,
"Collections": collections_groups,
"Object": "groupDetails"
})
}
@ -127,6 +122,13 @@ impl GroupUser {
}
}
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
use super::{User, UserOrganization};
/// Database methods
impl Group {
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
@ -201,11 +203,10 @@ impl Group {
}}
}
pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
groups::table
.filter(groups::external_id.eq(external_id))
.filter(groups::organizations_uuid.eq(org_uuid))
.filter(groups::external_id.eq(id))
.first::<GroupDb>(conn)
.ok()
.from_db()
@ -485,39 +486,6 @@ impl GroupUser {
}}
}
pub async fn has_access_to_collection_by_member(
collection_uuid: &str,
member_uuid: &str,
conn: &mut DbConn,
) -> bool {
db_run! { conn: {
groups_users::table
.inner_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
))
.filter(collections_groups::collections_uuid.eq(collection_uuid))
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
.count()
.first::<i64>(conn)
.unwrap_or(0) != 0
}}
}
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool {
db_run! { conn: {
groups_users::table
.inner_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.filter(groups::organizations_uuid.eq(org_uuid))
.filter(groups::access_all.eq(true))
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
.count()
.first::<i64>(conn)
.unwrap_or(0) != 0
}}
}
pub async fn update_user_revision(&self, conn: &mut DbConn) {
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,

View File

@ -12,7 +12,6 @@ mod org_policy;
mod organization;
mod send;
mod two_factor;
mod two_factor_duo_context;
mod two_factor_incomplete;
mod user;
@ -30,6 +29,5 @@ pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
pub use self::send::{Send, SendType};
pub use self::two_factor::{TwoFactor, TwoFactorType};
pub use self::two_factor_duo_context::TwoFactorDuoContext;
pub use self::two_factor_incomplete::TwoFactorIncomplete;
pub use self::user::{Invitation, User, UserKdfType, UserStampException};

View File

@ -4,6 +4,7 @@ use serde_json::Value;
use crate::api::EmptyResult;
use crate::db::DbConn;
use crate::error::MapResult;
use crate::util::UpCase;
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
@ -38,18 +39,16 @@ pub enum OrgPolicyType {
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct SendOptionsPolicyData {
#[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
pub disable_hide_email: bool,
pub DisableHideEmail: bool,
}
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(non_snake_case)]
pub struct ResetPasswordDataModel {
#[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
pub auto_enroll_enabled: bool,
pub AutoEnrollEnabled: bool,
}
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
@ -79,12 +78,12 @@ impl OrgPolicy {
pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({
"id": self.uuid,
"organizationId": self.org_uuid,
"type": self.atype,
"data": data_json,
"enabled": self.enabled,
"object": "policy",
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Type": self.atype,
"Data": data_json,
"Enabled": self.enabled,
"Object": "policy",
})
}
}
@ -115,7 +114,7 @@ impl OrgPolicy {
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
// not support multiple constraints on ON CONFLICT clauses.
let _: () = diesel::delete(
diesel::delete(
org_policies::table
.filter(org_policies::org_uuid.eq(&self.org_uuid))
.filter(org_policies::atype.eq(&self.atype)),
@ -308,9 +307,9 @@ impl OrgPolicy {
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
Ok(opts) => {
return policy.enabled && opts.auto_enroll_enabled;
return policy.enabled && opts.data.AutoEnrollEnabled;
}
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
},
@ -328,9 +327,9 @@ impl OrgPolicy {
{
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < UserOrgType::Admin {
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
Ok(opts) => {
if opts.disable_hide_email {
if opts.data.DisableHideEmail {
return true;
}
}
@ -341,13 +340,4 @@ impl OrgPolicy {
}
false
}
pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await {
if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await {
return policy.enabled;
}
}
false
}
}

View File

@ -1,13 +1,9 @@
use chrono::{NaiveDateTime, Utc};
use num_traits::FromPrimitive;
use serde_json::Value;
use std::{
cmp::Ordering,
collections::{HashMap, HashSet},
};
use std::cmp::Ordering;
use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
use crate::db::models::{Collection, CollectionGroup};
use crate::CONFIG;
db_object! {
@ -116,7 +112,7 @@ impl PartialOrd<i32> for UserOrgType {
}
fn ge(&self, other: &i32) -> bool {
matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal))
matches!(self.partial_cmp(other), Some(Ordering::Greater) | Some(Ordering::Equal))
}
}
@ -139,7 +135,7 @@ impl PartialOrd<UserOrgType> for i32 {
}
fn le(&self, other: &UserOrgType) -> bool {
matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal) | None)
matches!(self.partial_cmp(other), Some(Ordering::Less) | Some(Ordering::Equal) | None)
}
}
@ -157,39 +153,39 @@ impl Organization {
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
pub fn to_json(&self) -> Value {
json!({
"id": self.uuid,
"identifier": null, // not supported by us
"name": self.name,
"seats": null,
"maxAutoscaleSeats": null,
"maxCollections": null,
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
"use2fa": true,
"useCustomPermissions": false,
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"useEvents": CONFIG.org_events_enabled(),
"useGroups": CONFIG.org_groups_enabled(),
"useTotp": true,
"usePolicies": true,
// "useScim": false, // Not supported (Not AGPLv3 Licensed)
"useSso": false, // Not supported
// "useKeyConnector": false, // Not supported
"selfHost": true,
"useApi": true,
"hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
"useResetPassword": CONFIG.mail_enabled(),
"Id": self.uuid,
"Identifier": null, // not supported by us
"Name": self.name,
"Seats": 10, // The value doesn't matter, we don't check server-side
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
"Use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(),
"UseTotp": true,
"UsePolicies": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UseSso": false, // Not supported
// "UseKeyConnector": false, // Not supported
"SelfHost": true,
"UseApi": true,
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(),
"businessName": null,
"businessAddress1": null,
"businessAddress2": null,
"businessAddress3": null,
"businessCountry": null,
"businessTaxNumber": null,
"BusinessName": null,
"BusinessAddress1": null,
"BusinessAddress2": null,
"BusinessAddress3": null,
"BusinessCountry": null,
"BusinessTaxNumber": null,
"billingEmail": self.billing_email,
"planType": 6, // Custom plan
"usersGetPremium": true,
"object": "organization",
"BillingEmail": self.billing_email,
"Plan": "TeamsAnnually",
"PlanType": 5, // TeamsAnnually plan
"UsersGetPremium": true,
"Object": "organization",
})
}
}
@ -218,7 +214,7 @@ impl UserOrganization {
}
pub fn restore(&mut self) -> bool {
if self.status < UserOrgStatus::Invited as i32 {
if self.status < UserOrgStatus::Accepted as i32 {
self.status += ACTIVATE_REVOKE_DIFF;
return true;
}
@ -320,7 +316,6 @@ impl Organization {
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
Group::delete_all_by_organization(&self.uuid, conn).await?;
OrganizationApiKey::delete_all_by_organization(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
@ -349,84 +344,65 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
let permissions = json!({
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
"accessEventLogs": false,
"accessImportExport": false,
"accessReports": false,
"createNewCollections": false,
"editAnyCollection": false,
"deleteAnyCollection": false,
"editAssignedCollections": false,
"deleteAssignedCollections": false,
"manageGroups": false,
"managePolicies": false,
"manageSso": false, // Not supported
"manageUsers": false,
"manageResetPassword": false,
"manageScim": false // Not supported (Not AGPLv3 Licensed)
});
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({
"id": self.org_uuid,
"identifier": null, // Not supported
"name": org.name,
"seats": null,
"maxAutoscaleSeats": null,
"maxCollections": null,
"usersGetPremium": true,
"use2fa": true,
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"useEvents": CONFIG.org_events_enabled(),
"useGroups": CONFIG.org_groups_enabled(),
"useTotp": true,
"useScim": false, // Not supported (Not AGPLv3 Licensed)
"usePolicies": true,
"useApi": true,
"selfHost": true,
"hasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"resetPasswordEnrolled": self.reset_password_key.is_some(),
"useResetPassword": CONFIG.mail_enabled(),
"ssoBound": false, // Not supported
"useSso": false, // Not supported
"useKeyConnector": false,
"useSecretsManager": false,
"usePasswordManager": true,
"useCustomPermissions": false,
"useActivateAutofillPolicy": false,
"Id": self.org_uuid,
"Identifier": null, // Not supported
"Name": org.name,
"Seats": 10, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
"UsersGetPremium": true,
"Use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(),
"UseTotp": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UsePolicies": true,
"UseApi": true,
"SelfHost": true,
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(),
"SsoBound": false, // Not supported
"UseSso": false, // Not supported
"ProviderId": null,
"ProviderName": null,
// "KeyConnectorEnabled": false,
// "KeyConnectorUrl": null,
"organizationUserId": self.uuid,
"providerId": null,
"providerName": null,
"providerType": null,
"familySponsorshipFriendlyName": null,
"familySponsorshipAvailable": false,
"planProductType": 3,
"productTierType": 3, // Enterprise tier
"keyConnectorEnabled": false,
"keyConnectorUrl": null,
"familySponsorshipLastSyncDate": null,
"familySponsorshipValidUntil": null,
"familySponsorshipToDelete": null,
"accessSecretsManager": false,
"limitCollectionCreationDeletion": true,
"allowAdminAccessToAllCollectionItems": true,
"flexibleCollections": false,
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "Permissions": {
// "AccessEventLogs": false,
// "AccessImportExport": false,
// "AccessReports": false,
// "ManageAllCollections": false,
// "CreateNewCollections": false,
// "EditAnyCollection": false,
// "DeleteAnyCollection": false,
// "ManageAssignedCollections": false,
// "editAssignedCollections": false,
// "deleteAssignedCollections": false,
// "ManageCiphers": false,
// "ManageGroups": false,
// "ManagePolicies": false,
// "ManageResetPassword": false,
// "ManageSso": false, // Not supported
// "ManageUsers": false,
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
// },
"permissions": permissions,
"maxStorageGb": i16::MAX, // The value doesn't matter, we don't check server-side
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
// These are per user
"userId": self.user_uuid,
"key": self.akey,
"status": self.status,
"type": self.atype,
"enabled": true,
"UserId": self.user_uuid,
"Key": self.akey,
"Status": self.status,
"Type": self.atype,
"Enabled": true,
"object": "profileOrganization",
"Object": "profileOrganization",
})
}
@ -457,47 +433,15 @@ impl UserOrganization {
};
let collections: Vec<Value> = if include_collections {
// Get all collections for the user here already to prevent more queries
let cu: HashMap<String, CollectionUser> =
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
.await
.into_iter()
.map(|cu| (cu.collection_uuid.clone(), cu))
.collect();
// Get all collection groups for this user to prevent there inclusion
let cg: HashSet<String> = CollectionGroup::find_by_user(&self.user_uuid, conn)
.await
.into_iter()
.map(|cg| cg.collections_uuid)
.collect();
Collection::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
.await
.into_iter()
.filter_map(|c| {
let (read_only, hide_passwords, can_manage) = if self.has_full_access() {
(false, false, self.atype == UserOrgType::Manager)
} else if let Some(cu) = cu.get(&c.uuid) {
(
cu.read_only,
cu.hide_passwords,
self.atype == UserOrgType::Manager && !cu.read_only && !cu.hide_passwords,
)
// If previous checks failed it might be that this user has access via a group, but we should not return those elements here
// Those are returned via a special group endpoint
} else if cg.contains(&c.uuid) {
return None;
} else {
(true, true, false)
};
Some(json!({
"id": c.uuid,
"readOnly": read_only,
"hidePasswords": hide_passwords,
"manage": can_manage,
}))
.iter()
.map(|cu| {
json!({
"Id": cu.collection_uuid,
"ReadOnly": cu.read_only,
"HidePasswords": cu.hide_passwords,
})
})
.collect()
} else {
@ -505,30 +449,29 @@ impl UserOrganization {
};
json!({
"id": self.uuid,
"userId": self.user_uuid,
"name": user.name,
"email": user.email,
"externalId": self.external_id,
"avatarColor": user.avatar_color,
"groups": groups,
"collections": collections,
"Id": self.uuid,
"UserId": self.user_uuid,
"Name": user.name,
"Email": user.email,
"ExternalId": self.external_id,
"Groups": groups,
"Collections": collections,
"status": status,
"type": self.atype,
"accessAll": self.access_all,
"twoFactorEnabled": twofactor_enabled,
"resetPasswordEnrolled": self.reset_password_key.is_some(),
"Status": status,
"Type": self.atype,
"AccessAll": self.access_all,
"TwoFactorEnabled": twofactor_enabled,
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
"object": "organizationUserUserDetails",
"Object": "organizationUserUserDetails",
})
}
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
json!({
"id": self.uuid,
"readOnly": col_user.read_only,
"hidePasswords": col_user.hide_passwords,
"Id": self.uuid,
"ReadOnly": col_user.read_only,
"HidePasswords": col_user.hide_passwords,
})
}
@ -542,9 +485,9 @@ impl UserOrganization {
.iter()
.map(|c| {
json!({
"id": c.collection_uuid,
"readOnly": c.read_only,
"hidePasswords": c.hide_passwords,
"Id": c.collection_uuid,
"ReadOnly": c.read_only,
"HidePasswords": c.hide_passwords,
})
})
.collect()
@ -559,15 +502,15 @@ impl UserOrganization {
};
json!({
"id": self.uuid,
"userId": self.user_uuid,
"Id": self.uuid,
"UserId": self.user_uuid,
"status": status,
"type": self.atype,
"accessAll": self.access_all,
"collections": coll_uuids,
"Status": status,
"Type": self.atype,
"AccessAll": self.access_all,
"Collections": coll_uuids,
"object": "organizationUserDetails",
"Object": "organizationUserDetails",
})
}
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
@ -632,7 +575,7 @@ impl UserOrganization {
}
pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option<UserOrganization> {
if let Some(user) = User::find_by_mail(email, conn).await {
if let Some(user) = super::User::find_by_mail(email, conn).await {
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await {
return Some(user_org);
}
@ -705,7 +648,8 @@ impl UserOrganization {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32).or(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)))
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32))
.or_filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.count()
.first::<i64>(conn)
.unwrap_or(0)
@ -721,16 +665,6 @@ impl UserOrganization {
}}
}
pub async fn find_confirmed_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid))
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
.load::<UserOrganizationDb>(conn)
.unwrap_or_default().from_db()
}}
}
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
db_run! { conn: {
users_organizations::table
@ -774,19 +708,6 @@ impl UserOrganization {
}}
}
pub async fn find_confirmed_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::org_uuid.eq(org_uuid))
.filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
)
.first::<UserOrganizationDb>(conn)
.ok().from_db()
}}
}
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
@ -848,32 +769,6 @@ impl UserOrganization {
}}
}
pub async fn find_by_cipher_and_org_with_group(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid))
.inner_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(ciphers_collections::table.on(
ciphers_collections::collection_uuid.eq(collections_groups::collections_uuid).and(ciphers_collections::cipher_uuid.eq(&cipher_uuid))
))
.filter(
groups::access_all.eq(true).or( // AccessAll via groups
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection via group
)
)
.select(users_organizations::all_columns)
.distinct()
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations with groups").from_db()
}}
}
pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool {
db_run! { conn: {
users_organizations::table
@ -957,14 +852,6 @@ impl OrganizationApiKey {
.ok().from_db()
}}
}
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid)))
.execute(conn)
.map_res("Error removing organization api key from organization")
}}
}
}
#[cfg(test)]

View File

@ -1,8 +1,6 @@
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
use crate::util::LowerCase;
use super::User;
db_object! {
@ -124,58 +122,48 @@ impl Send {
use data_encoding::BASE64URL_NOPAD;
use uuid::Uuid;
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
// Mobile clients expect size to be a string instead of a number
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
data["size"] = Value::String(size.to_string());
}
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
json!({
"id": self.uuid,
"accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
"type": self.atype,
"Id": self.uuid,
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
"Type": self.atype,
"name": self.name,
"notes": self.notes,
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"Name": self.name,
"Notes": self.notes,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"key": self.akey,
"maxAccessCount": self.max_access_count,
"accessCount": self.access_count,
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"disabled": self.disabled,
"hideEmail": self.hide_email,
"Key": self.akey,
"MaxAccessCount": self.max_access_count,
"AccessCount": self.access_count,
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"Disabled": self.disabled,
"HideEmail": self.hide_email,
"revisionDate": format_date(&self.revision_date),
"expirationDate": self.expiration_date.as_ref().map(format_date),
"deletionDate": format_date(&self.deletion_date),
"object": "send",
"RevisionDate": format_date(&self.revision_date),
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
"DeletionDate": format_date(&self.deletion_date),
"Object": "send",
})
}
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
use crate::util::format_date;
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
// Mobile clients expect size to be a string instead of a number
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
data["size"] = Value::String(size.to_string());
}
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
json!({
"id": self.uuid,
"type": self.atype,
"Id": self.uuid,
"Type": self.atype,
"name": self.name,
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"Name": self.name,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"expirationDate": self.expiration_date.as_ref().map(format_date),
"creatorIdentifier": self.creator_identifier(conn).await,
"object": "send-access",
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
"CreatorIdentifier": self.creator_identifier(conn).await,
"Object": "send-access",
})
}
}
@ -184,7 +172,6 @@ use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
use crate::util::NumberOrString;
impl Send {
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
@ -299,29 +286,6 @@ impl Send {
}}
}
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
let sends = Self::find_by_user(user_uuid, conn).await;
#[derive(serde::Deserialize)]
struct FileData {
#[serde(rename = "size", alias = "Size")]
size: NumberOrString,
}
let mut total: i64 = 0;
for send in sends {
if send.atype == SendType::File as i32 {
if let Ok(size) =
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
{
total = total.checked_add(size)?;
};
}
}
Some(total)
}
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: {
sends::table

View File

@ -12,7 +12,7 @@ db_object! {
pub atype: i32,
pub enabled: bool,
pub data: String,
pub last_used: i64,
pub last_used: i32,
}
}
@ -54,17 +54,17 @@ impl TwoFactor {
pub fn to_json(&self) -> Value {
json!({
"enabled": self.enabled,
"key": "", // This key and value vary
"Oobject": "twoFactorAuthenticator" // This value varies
"Enabled": self.enabled,
"Key": "", // This key and value vary
"Object": "twoFactorAuthenticator" // This value varies
})
}
pub fn to_json_provider(&self) -> Value {
json!({
"enabled": self.enabled,
"type": self.atype,
"object": "twoFactorProvider"
"Enabled": self.enabled,
"Type": self.atype,
"Object": "twoFactorProvider"
})
}
}
@ -95,7 +95,7 @@ impl TwoFactor {
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
// not support multiple constraints on ON CONFLICT clauses.
let _: () = diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
.execute(conn)
.map_res("Error deleting twofactor for insert")?;

View File

@ -1,84 +0,0 @@
use chrono::Utc;
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = twofactor_duo_ctx)]
#[diesel(primary_key(state))]
pub struct TwoFactorDuoContext {
pub state: String,
pub user_email: String,
pub nonce: String,
pub exp: i64,
}
}
impl TwoFactorDuoContext {
pub async fn find_by_state(state: &str, conn: &mut DbConn) -> Option<Self> {
db_run! {
conn: {
twofactor_duo_ctx::table
.filter(twofactor_duo_ctx::state.eq(state))
.first::<TwoFactorDuoContextDb>(conn)
.ok()
.from_db()
}
}
}
pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &mut DbConn) -> EmptyResult {
// A saved context should never be changed, only created or deleted.
let exists = Self::find_by_state(state, conn).await;
if exists.is_some() {
return Ok(());
};
let exp = Utc::now().timestamp() + ttl;
db_run! {
conn: {
diesel::insert_into(twofactor_duo_ctx::table)
.values((
twofactor_duo_ctx::state.eq(state),
twofactor_duo_ctx::user_email.eq(user_email),
twofactor_duo_ctx::nonce.eq(nonce),
twofactor_duo_ctx::exp.eq(exp)
))
.execute(conn)
.map_res("Error saving context to twofactor_duo_ctx")
}
}
}
pub async fn find_expired(conn: &mut DbConn) -> Vec<Self> {
let now = Utc::now().timestamp();
db_run! {
conn: {
twofactor_duo_ctx::table
.filter(twofactor_duo_ctx::exp.lt(now))
.load::<TwoFactorDuoContextDb>(conn)
.expect("Error finding expired contexts in twofactor_duo_ctx")
.from_db()
}
}
}
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
db_run! {
conn: {
diesel::delete(
twofactor_duo_ctx::table
.filter(twofactor_duo_ctx::state.eq(&self.state)))
.execute(conn)
.map_res("Error deleting from twofactor_duo_ctx")
}
}
}
pub async fn purge_expired_duo_contexts(conn: &mut DbConn) {
for context in Self::find_expired(conn).await {
context.delete(conn).await.ok();
}
}
}

View File

@ -13,7 +13,6 @@ db_object! {
// must complete 2FA login before being added into the devices table.
pub device_uuid: String,
pub device_name: String,
pub device_type: i32,
pub login_time: NaiveDateTime,
pub ip_address: String,
}
@ -24,7 +23,6 @@ impl TwoFactorIncomplete {
user_uuid: &str,
device_uuid: &str,
device_name: &str,
device_type: i32,
ip: &ClientIp,
conn: &mut DbConn,
) -> EmptyResult {
@ -46,7 +44,6 @@ impl TwoFactorIncomplete {
twofactor_incomplete::user_uuid.eq(user_uuid),
twofactor_incomplete::device_uuid.eq(device_uuid),
twofactor_incomplete::device_name.eq(device_name),
twofactor_incomplete::device_type.eq(device_type),
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
))

View File

@ -1,4 +1,4 @@
use chrono::{NaiveDateTime, TimeDelta, Utc};
use chrono::{Duration, NaiveDateTime, Utc};
use serde_json::Value;
use crate::crypto;
@ -144,14 +144,14 @@ impl User {
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
if let Some(ref totp_recover) = self.totp_recover {
crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
} else {
false
}
}
pub fn check_valid_api_key(&self, key: &str) -> bool {
matches!(self.api_key, Some(ref api_key) if crypto::ct_eq(api_key, key))
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
}
/// Set the password hash generated
@ -202,7 +202,7 @@ impl User {
let stamp_exception = UserStampException {
routes: route_exception,
security_stamp: self.security_stamp.clone(),
expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
};
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
}
@ -240,26 +240,24 @@ impl User {
};
json!({
"_status": status as i32,
"id": self.uuid,
"name": self.name,
"email": self.email,
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"premium": true,
"premiumFromOrganization": false,
"masterPasswordHint": self.password_hint,
"culture": "en-US",
"twoFactorEnabled": twofactor_enabled,
"key": self.akey,
"privateKey": self.private_key,
"securityStamp": self.security_stamp,
"organizations": orgs_json,
"providers": [],
"providerOrganizations": [],
"forcePasswordReset": false,
"avatarColor": self.avatar_color,
"usesKeyConnector": false,
"object": "profile",
"_Status": status as i32,
"Id": self.uuid,
"Name": self.name,
"Email": self.email,
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"Premium": true,
"MasterPasswordHint": self.password_hint,
"Culture": "en-US",
"TwoFactorEnabled": twofactor_enabled,
"Key": self.akey,
"PrivateKey": self.private_key,
"SecurityStamp": self.security_stamp,
"Organizations": orgs_json,
"Providers": [],
"ProviderOrganizations": [],
"ForcePasswordReset": false,
"AvatarColor": self.avatar_color,
"Object": "profile",
})
}
@ -313,7 +311,6 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?;

View File

@ -3,7 +3,7 @@ table! {
id -> Text,
cipher_uuid -> Text,
file_name -> Text,
file_size -> BigInt,
file_size -> Integer,
akey -> Nullable<Text>,
}
}
@ -160,7 +160,7 @@ table! {
atype -> Integer,
enabled -> Bool,
data -> Text,
last_used -> BigInt,
last_used -> Integer,
}
}
@ -169,21 +169,11 @@ table! {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
device_type -> Integer,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
twofactor_duo_ctx (state) {
state -> Text,
user_email -> Text,
nonce -> Text,
exp -> BigInt,
}
}
table! {
users (uuid) {
uuid -> Text,

View File

@ -3,7 +3,7 @@ table! {
id -> Text,
cipher_uuid -> Text,
file_name -> Text,
file_size -> BigInt,
file_size -> Integer,
akey -> Nullable<Text>,
}
}
@ -160,7 +160,7 @@ table! {
atype -> Integer,
enabled -> Bool,
data -> Text,
last_used -> BigInt,
last_used -> Integer,
}
}
@ -169,21 +169,11 @@ table! {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
device_type -> Integer,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
twofactor_duo_ctx (state) {
state -> Text,
user_email -> Text,
nonce -> Text,
exp -> BigInt,
}
}
table! {
users (uuid) {
uuid -> Text,

View File

@ -3,7 +3,7 @@ table! {
id -> Text,
cipher_uuid -> Text,
file_name -> Text,
file_size -> BigInt,
file_size -> Integer,
akey -> Nullable<Text>,
}
}
@ -160,7 +160,7 @@ table! {
atype -> Integer,
enabled -> Bool,
data -> Text,
last_used -> BigInt,
last_used -> Integer,
}
}
@ -169,21 +169,11 @@ table! {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
device_type -> Integer,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
twofactor_duo_ctx (state) {
state -> Text,
user_email -> Text,
nonce -> Text,
exp -> BigInt,
}
}
table! {
users (uuid) {
uuid -> Text,

View File

@ -2,7 +2,6 @@
// Error generator macro
//
use crate::db::models::EventType;
use crate::http_client::CustomHttpClientError;
use std::error::Error as StdError;
macro_rules! make_error {
@ -53,6 +52,7 @@ use rocket::error::Error as RocketErr;
use serde_json::{Error as SerdeErr, Value};
use std::io::Error as IoErr;
use std::time::SystemTimeError as TimeErr;
use tokio_tungstenite::tungstenite::Error as TungstError;
use webauthn_rs::error::WebauthnError as WebauthnErr;
use yubico::yubicoerror::YubicoError as YubiErr;
@ -69,10 +69,6 @@ make_error! {
Empty(Empty): _no_source, _serialize,
// Used to represent err! calls
Simple(String): _no_source, _api_error,
// Used in our custom http client to handle non-global IPs and blocked domains
CustomHttpClient(CustomHttpClientError): _has_source, _api_error,
// Used for special return values, like 2FA errors
Json(Value): _no_source, _serialize,
Db(DieselErr): _has_source, _api_error,
@ -95,6 +91,7 @@ make_error! {
DieselCon(DieselConErr): _has_source, _api_error,
Webauthn(WebauthnErr): _has_source, _api_error,
WebSocket(TungstError): _has_source, _api_error,
}
impl std::fmt::Debug for Error {
@ -184,18 +181,18 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
let json = json!({
"message": msg,
"Message": msg,
"error": "",
"error_description": "",
"validationErrors": {"": [ msg ]},
"errorModel": {
"message": msg,
"object": "error"
"ValidationErrors": {"": [ msg ]},
"ErrorModel": {
"Message": msg,
"Object": "error"
},
"exceptionMessage": null,
"exceptionStackTrace": null,
"innerExceptionMessage": null,
"object": "error"
"ExceptionMessage": null,
"ExceptionStackTrace": null,
"InnerExceptionMessage": null,
"Object": "error"
});
_serialize(&json, "")
}
@ -209,7 +206,7 @@ use rocket::http::{ContentType, Status};
use rocket::request::Request;
use rocket::response::{self, Responder, Response};
impl Responder<'_, 'static> for Error {
impl<'r> Responder<'r, 'static> for Error {
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
match self.error {
ErrorKind::Empty(_) => {} // Don't print the error in this situation

View File

@ -1,246 +0,0 @@
use std::{
fmt,
net::{IpAddr, SocketAddr},
str::FromStr,
sync::{Arc, Mutex},
time::Duration,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use regex::Regex;
use reqwest::{
dns::{Name, Resolve, Resolving},
header, Client, ClientBuilder,
};
use url::Host;
use crate::{util::is_global, CONFIG};
pub fn make_http_request(method: reqwest::Method, url: &str) -> Result<reqwest::RequestBuilder, crate::Error> {
let Ok(url) = url::Url::parse(url) else {
err!("Invalid URL");
};
let Some(host) = url.host() else {
err!("Invalid host");
};
should_block_host(host)?;
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
Ok(INSTANCE.request(method, url))
}
pub fn get_reqwest_client_builder() -> ClientBuilder {
let mut headers = header::HeaderMap::new();
headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden"));
let redirect_policy = reqwest::redirect::Policy::custom(|attempt| {
if attempt.previous().len() >= 5 {
return attempt.error("Too many redirects");
}
let Some(host) = attempt.url().host() else {
return attempt.error("Invalid host");
};
if let Err(e) = should_block_host(host) {
return attempt.error(e);
}
attempt.follow()
});
Client::builder()
.default_headers(headers)
.redirect(redirect_policy)
.dns_resolver(CustomDnsResolver::instance())
.timeout(Duration::from_secs(10))
}
pub fn should_block_address(domain_or_ip: &str) -> bool {
if let Ok(ip) = IpAddr::from_str(domain_or_ip) {
if should_block_ip(ip) {
return true;
}
}
should_block_address_regex(domain_or_ip)
}
fn should_block_ip(ip: IpAddr) -> bool {
if !CONFIG.http_request_block_non_global_ips() {
return false;
}
!is_global(ip)
}
fn should_block_address_regex(domain_or_ip: &str) -> bool {
let Some(block_regex) = CONFIG.http_request_block_regex() else {
return false;
};
static COMPILED_REGEX: Mutex<Option<(String, Regex)>> = Mutex::new(None);
let mut guard = COMPILED_REGEX.lock().unwrap();
// If the stored regex is up to date, use it
if let Some((value, regex)) = &*guard {
if value == &block_regex {
return regex.is_match(domain_or_ip);
}
}
// If we don't have a regex stored, or it's not up to date, recreate it
let regex = Regex::new(&block_regex).unwrap();
let is_match = regex.is_match(domain_or_ip);
*guard = Some((block_regex, regex));
is_match
}
fn should_block_host(host: Host<&str>) -> Result<(), CustomHttpClientError> {
let (ip, host_str): (Option<IpAddr>, String) = match host {
Host::Ipv4(ip) => (Some(ip.into()), ip.to_string()),
Host::Ipv6(ip) => (Some(ip.into()), ip.to_string()),
Host::Domain(d) => (None, d.to_string()),
};
if let Some(ip) = ip {
if should_block_ip(ip) {
return Err(CustomHttpClientError::NonGlobalIp {
domain: None,
ip,
});
}
}
if should_block_address_regex(&host_str) {
return Err(CustomHttpClientError::Blocked {
domain: host_str,
});
}
Ok(())
}
#[derive(Debug, Clone)]
pub enum CustomHttpClientError {
Blocked {
domain: String,
},
NonGlobalIp {
domain: Option<String>,
ip: IpAddr,
},
}
impl CustomHttpClientError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomHttpClientError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomHttpClientError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blocked {
domain,
} => write!(f, "Blocked domain: {domain} matched HTTP_REQUEST_BLOCK_REGEX"),
Self::NonGlobalIp {
domain: Some(domain),
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
Self::NonGlobalIp {
domain: None,
ip,
} => write!(f, "IP {ip} is not a global IP!"),
}
}
}
impl std::error::Error for CustomHttpClientError {}
#[derive(Debug, Clone)]
enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomHttpClientError> {
if should_block_address(name) {
return Err(CustomHttpClientError::Blocked {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomHttpClientError> {
if should_block_ip(ip) {
Err(CustomHttpClientError::NonGlobalIp {
domain: Some(name.to_string()),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}

View File

@ -17,7 +17,6 @@ use crate::{
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
generate_verify_email_claims,
},
db::models::{Device, DeviceType, User},
error::Error,
CONFIG,
};
@ -230,51 +229,37 @@ pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) ->
}
pub async fn send_invite(
user: &User,
address: &str,
uuid: &str,
org_id: Option<String>,
org_user_id: Option<String>,
org_name: &str,
invited_by_email: Option<String>,
) -> EmptyResult {
let claims = generate_invite_claims(
user.uuid.clone(),
user.email.clone(),
uuid.to_string(),
String::from(address),
org_id.clone(),
org_user_id.clone(),
invited_by_email,
);
let invite_token = encode_jwt(&claims);
let mut query = url::Url::parse("https://query.builder").unwrap();
{
let mut query_params = query.query_pairs_mut();
query_params
.append_pair("email", &user.email)
.append_pair("organizationName", org_name)
.append_pair("organizationId", org_id.as_deref().unwrap_or("_"))
.append_pair("organizationUserId", org_user_id.as_deref().unwrap_or("_"))
.append_pair("token", &invite_token);
if user.private_key.is_some() {
query_params.append_pair("orgUserHasExistingUser", "true");
}
}
let query_string = match query.query() {
None => err!(format!("Failed to build invite URL query parameters")),
Some(query) => query,
};
// `url.Url` would place the anchor `#` after the query parameters
let url = format!("{}/#/accept-organization/?{}", CONFIG.domain(), query_string);
let (subject, body_html, body_text) = get_text(
"email/send_org_invite",
json!({
"url": url,
"url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(),
"org_id": org_id.as_deref().unwrap_or("_"),
"org_user_id": org_user_id.as_deref().unwrap_or("_"),
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
"org_name_encoded": percent_encode(org_name.as_bytes(), NON_ALPHANUMERIC).to_string(),
"org_name": org_name,
"token": invite_token,
}),
)?;
send_email(&user.email, &subject, body_html, body_text).await
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_emergency_access_invite(
@ -442,8 +427,9 @@ pub async fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &Device) -> EmptyResult {
pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
use crate::util::upcase_first;
let device = upcase_first(device);
let fmt = "%A, %B %_d, %Y at %r %Z";
let (subject, body_html, body_text) = get_text(
@ -452,8 +438,7 @@ pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTi
"url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(),
"ip": ip,
"device_name": upcase_first(&device.name),
"device_type": DeviceType::from_i32(device.atype).to_string(),
"device": device,
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
}),
)?;
@ -461,14 +446,9 @@ pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTi
send_email(address, &subject, body_html, body_text).await
}
pub async fn send_incomplete_2fa_login(
address: &str,
ip: &str,
dt: &NaiveDateTime,
device_name: &str,
device_type: &str,
) -> EmptyResult {
pub async fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
use crate::util::upcase_first;
let device = upcase_first(device);
let fmt = "%A, %B %_d, %Y at %r %Z";
let (subject, body_html, body_text) = get_text(
@ -477,8 +457,7 @@ pub async fn send_incomplete_2fa_login(
"url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(),
"ip": ip,
"device_name": upcase_first(device_name),
"device_type": device_type,
"device": device,
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
"time_limit": CONFIG.incomplete_2fa_time_limit(),
}),

Some files were not shown because too many files have changed in this diff Show More